summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AMDGPU/R600ISelLowering.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/R600ISelLowering.cpp397
1 files changed, 200 insertions, 197 deletions
diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
index 4851a02e775..a64ffb2c8a0 100644
--- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp
@@ -207,65 +207,63 @@ static inline bool isEOP(MachineBasicBlock::iterator I) {
return std::next(I)->getOpcode() == AMDGPU::RETURN;
}
-MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
- MachineInstr * MI, MachineBasicBlock * BB) const {
+MachineBasicBlock *
+R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
MachineFunction * MF = BB->getParent();
MachineRegisterInfo &MRI = MF->getRegInfo();
- MachineBasicBlock::iterator I = *MI;
+ MachineBasicBlock::iterator I = MI;
const R600InstrInfo *TII = getSubtarget()->getInstrInfo();
- switch (MI->getOpcode()) {
+ switch (MI.getOpcode()) {
default:
// Replace LDS_*_RET instruction that don't have any uses with the
// equivalent LDS_*_NORET instruction.
- if (TII->isLDSRetInstr(MI->getOpcode())) {
- int DstIdx = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst);
+ if (TII->isLDSRetInstr(MI.getOpcode())) {
+ int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
assert(DstIdx != -1);
MachineInstrBuilder NewMI;
// FIXME: getLDSNoRetOp method only handles LDS_1A1D LDS ops. Add
// LDS_1A2D support and remove this special case.
- if (!MRI.use_empty(MI->getOperand(DstIdx).getReg()) ||
- MI->getOpcode() == AMDGPU::LDS_CMPST_RET)
+ if (!MRI.use_empty(MI.getOperand(DstIdx).getReg()) ||
+ MI.getOpcode() == AMDGPU::LDS_CMPST_RET)
return BB;
NewMI = BuildMI(*BB, I, BB->findDebugLoc(I),
- TII->get(AMDGPU::getLDSNoRetOp(MI->getOpcode())));
- for (unsigned i = 1, e = MI->getNumOperands(); i < e; ++i) {
- NewMI.addOperand(MI->getOperand(i));
+ TII->get(AMDGPU::getLDSNoRetOp(MI.getOpcode())));
+ for (unsigned i = 1, e = MI.getNumOperands(); i < e; ++i) {
+ NewMI.addOperand(MI.getOperand(i));
}
} else {
return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
}
break;
case AMDGPU::CLAMP_R600: {
- MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
- AMDGPU::MOV,
- MI->getOperand(0).getReg(),
- MI->getOperand(1).getReg());
+ MachineInstr *NewMI = TII->buildDefaultInstruction(
+ *BB, I, AMDGPU::MOV, MI.getOperand(0).getReg(),
+ MI.getOperand(1).getReg());
TII->addFlag(*NewMI, 0, MO_FLAG_CLAMP);
break;
}
case AMDGPU::FABS_R600: {
- MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
- AMDGPU::MOV,
- MI->getOperand(0).getReg(),
- MI->getOperand(1).getReg());
+ MachineInstr *NewMI = TII->buildDefaultInstruction(
+ *BB, I, AMDGPU::MOV, MI.getOperand(0).getReg(),
+ MI.getOperand(1).getReg());
TII->addFlag(*NewMI, 0, MO_FLAG_ABS);
break;
}
case AMDGPU::FNEG_R600: {
- MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, I,
- AMDGPU::MOV,
- MI->getOperand(0).getReg(),
- MI->getOperand(1).getReg());
+ MachineInstr *NewMI = TII->buildDefaultInstruction(
+ *BB, I, AMDGPU::MOV, MI.getOperand(0).getReg(),
+ MI.getOperand(1).getReg());
TII->addFlag(*NewMI, 0, MO_FLAG_NEG);
break;
}
case AMDGPU::MASK_WRITE: {
- unsigned maskedRegister = MI->getOperand(0).getReg();
+ unsigned maskedRegister = MI.getOperand(0).getReg();
assert(TargetRegisterInfo::isVirtualRegister(maskedRegister));
MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
TII->addFlag(*defInstr, 0, MO_FLAG_MASK);
@@ -273,56 +271,57 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
}
case AMDGPU::MOV_IMM_F32:
- TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
- MI->getOperand(1).getFPImm()->getValueAPF()
- .bitcastToAPInt().getZExtValue());
+ TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(), MI.getOperand(1)
+ .getFPImm()
+ ->getValueAPF()
+ .bitcastToAPInt()
+ .getZExtValue());
break;
case AMDGPU::MOV_IMM_I32:
- TII->buildMovImm(*BB, I, MI->getOperand(0).getReg(),
- MI->getOperand(1).getImm());
+ TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(),
+ MI.getOperand(1).getImm());
break;
case AMDGPU::MOV_IMM_GLOBAL_ADDR: {
//TODO: Perhaps combine this instruction with the next if possible
- auto MIB = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
- MI->getOperand(0).getReg(),
- AMDGPU::ALU_LITERAL_X);
+ auto MIB = TII->buildDefaultInstruction(
+ *BB, MI, AMDGPU::MOV, MI.getOperand(0).getReg(), AMDGPU::ALU_LITERAL_X);
int Idx = TII->getOperandIdx(*MIB, AMDGPU::OpName::literal);
//TODO: Ugh this is rather ugly
- MIB->getOperand(Idx) = MI->getOperand(1);
+ MIB->getOperand(Idx) = MI.getOperand(1);
break;
}
case AMDGPU::CONST_COPY: {
- MachineInstr *NewMI = TII->buildDefaultInstruction(*BB, MI, AMDGPU::MOV,
- MI->getOperand(0).getReg(), AMDGPU::ALU_CONST);
+ MachineInstr *NewMI = TII->buildDefaultInstruction(
+ *BB, MI, AMDGPU::MOV, MI.getOperand(0).getReg(), AMDGPU::ALU_CONST);
TII->setImmOperand(*NewMI, AMDGPU::OpName::src0_sel,
- MI->getOperand(1).getImm());
+ MI.getOperand(1).getImm());
break;
}
case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
case AMDGPU::RAT_WRITE_CACHELESS_64_eg:
case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addImm(isEOP(I)); // Set End of program bit
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addImm(isEOP(I)); // Set End of program bit
break;
}
case AMDGPU::RAT_STORE_TYPED_eg: {
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addOperand(MI->getOperand(2))
- .addImm(isEOP(I)); // Set End of program bit
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addOperand(MI.getOperand(2))
+ .addImm(isEOP(I)); // Set End of program bit
break;
}
case AMDGPU::TXD: {
unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
- MachineOperand &RID = MI->getOperand(4);
- MachineOperand &SID = MI->getOperand(5);
- unsigned TextureId = MI->getOperand(6).getImm();
+ MachineOperand &RID = MI.getOperand(4);
+ MachineOperand &SID = MI.getOperand(5);
+ unsigned TextureId = MI.getOperand(6).getImm();
unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
@@ -355,75 +354,77 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
CTZ = 0;
break;
}
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
- .addOperand(MI->getOperand(3))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
- .addOperand(MI->getOperand(2))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H),
+ T0)
+ .addOperand(MI.getOperand(3))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V),
+ T1)
+ .addOperand(MI.getOperand(2))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_G))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW)
- .addReg(T0, RegState::Implicit)
- .addReg(T1, RegState::Implicit);
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW)
+ .addReg(T0, RegState::Implicit)
+ .addReg(T1, RegState::Implicit);
break;
}
case AMDGPU::TXD_SHADOW: {
unsigned T0 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
unsigned T1 = MRI.createVirtualRegister(&AMDGPU::R600_Reg128RegClass);
- MachineOperand &RID = MI->getOperand(4);
- MachineOperand &SID = MI->getOperand(5);
- unsigned TextureId = MI->getOperand(6).getImm();
+ MachineOperand &RID = MI.getOperand(4);
+ MachineOperand &SID = MI.getOperand(5);
+ unsigned TextureId = MI.getOperand(6).getImm();
unsigned SrcX = 0, SrcY = 1, SrcZ = 2, SrcW = 3;
unsigned CTX = 1, CTY = 1, CTZ = 1, CTW = 1;
@@ -457,99 +458,101 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
break;
}
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H), T0)
- .addOperand(MI->getOperand(3))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW);
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V), T1)
- .addOperand(MI->getOperand(2))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_H),
+ T0)
+ .addOperand(MI.getOperand(3))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SET_GRADIENTS_V),
+ T1)
+ .addOperand(MI.getOperand(2))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::TEX_SAMPLE_C_G))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addImm(SrcX)
- .addImm(SrcY)
- .addImm(SrcZ)
- .addImm(SrcW)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(0)
- .addImm(1)
- .addImm(2)
- .addImm(3)
- .addOperand(RID)
- .addOperand(SID)
- .addImm(CTX)
- .addImm(CTY)
- .addImm(CTZ)
- .addImm(CTW)
- .addReg(T0, RegState::Implicit)
- .addReg(T1, RegState::Implicit);
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addImm(SrcX)
+ .addImm(SrcY)
+ .addImm(SrcZ)
+ .addImm(SrcW)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(0)
+ .addImm(1)
+ .addImm(2)
+ .addImm(3)
+ .addOperand(RID)
+ .addOperand(SID)
+ .addImm(CTX)
+ .addImm(CTY)
+ .addImm(CTZ)
+ .addImm(CTW)
+ .addReg(T0, RegState::Implicit)
+ .addReg(T1, RegState::Implicit);
break;
}
case AMDGPU::BRANCH:
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
- .addOperand(MI->getOperand(0));
- break;
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
+ .addOperand(MI.getOperand(0));
+ break;
case AMDGPU::BRANCH_COND_f32: {
MachineInstr *NewMI =
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
- AMDGPU::PREDICATE_BIT)
- .addOperand(MI->getOperand(1))
- .addImm(OPCODE_IS_NOT_ZERO)
- .addImm(0); // Flags
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
+ AMDGPU::PREDICATE_BIT)
+ .addOperand(MI.getOperand(1))
+ .addImm(OPCODE_IS_NOT_ZERO)
+ .addImm(0); // Flags
TII->addFlag(*NewMI, 0, MO_FLAG_PUSH);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
- .addOperand(MI->getOperand(0))
- .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
+ .addOperand(MI.getOperand(0))
+ .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
break;
}
case AMDGPU::BRANCH_COND_i32: {
MachineInstr *NewMI =
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
- AMDGPU::PREDICATE_BIT)
- .addOperand(MI->getOperand(1))
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::PRED_X),
+ AMDGPU::PREDICATE_BIT)
+ .addOperand(MI.getOperand(1))
.addImm(OPCODE_IS_NOT_ZERO_INT)
.addImm(0); // Flags
TII->addFlag(*NewMI, 0, MO_FLAG_PUSH);
BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP_COND))
- .addOperand(MI->getOperand(0))
- .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
+ .addOperand(MI.getOperand(0))
+ .addReg(AMDGPU::PREDICATE_BIT, RegState::Kill);
break;
}
@@ -557,7 +560,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
case AMDGPU::R600_ExportSwz: {
// Instruction is left unmodified if its not the last one of its type
bool isLastInstructionOfItsType = true;
- unsigned InstExportType = MI->getOperand(1).getImm();
+ unsigned InstExportType = MI.getOperand(1).getImm();
for (MachineBasicBlock::iterator NextExportInst = std::next(I),
EndBlock = BB->end(); NextExportInst != EndBlock;
NextExportInst = std::next(NextExportInst)) {
@@ -574,17 +577,17 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
bool EOP = isEOP(I);
if (!EOP && !isLastInstructionOfItsType)
return BB;
- unsigned CfInst = (MI->getOpcode() == AMDGPU::EG_ExportSwz)? 84 : 40;
- BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()))
- .addOperand(MI->getOperand(0))
- .addOperand(MI->getOperand(1))
- .addOperand(MI->getOperand(2))
- .addOperand(MI->getOperand(3))
- .addOperand(MI->getOperand(4))
- .addOperand(MI->getOperand(5))
- .addOperand(MI->getOperand(6))
- .addImm(CfInst)
- .addImm(EOP);
+ unsigned CfInst = (MI.getOpcode() == AMDGPU::EG_ExportSwz) ? 84 : 40;
+ BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
+ .addOperand(MI.getOperand(0))
+ .addOperand(MI.getOperand(1))
+ .addOperand(MI.getOperand(2))
+ .addOperand(MI.getOperand(3))
+ .addOperand(MI.getOperand(4))
+ .addOperand(MI.getOperand(5))
+ .addOperand(MI.getOperand(6))
+ .addImm(CfInst)
+ .addImm(EOP);
break;
}
case AMDGPU::RETURN: {
@@ -598,7 +601,7 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter(
}
}
- MI->eraseFromParent();
+ MI.eraseFromParent();
return BB;
}
OpenPOWER on IntegriCloud