diff options
Diffstat (limited to 'llvm/lib/Target/R600/R600InstrInfo.cpp')
| -rw-r--r-- | llvm/lib/Target/R600/R600InstrInfo.cpp | 190 |
1 files changed, 74 insertions, 116 deletions
diff --git a/llvm/lib/Target/R600/R600InstrInfo.cpp b/llvm/lib/Target/R600/R600InstrInfo.cpp index 018583dd52d..d17425f4c9a 100644 --- a/llvm/lib/Target/R600/R600InstrInfo.cpp +++ b/llvm/lib/Target/R600/R600InstrInfo.cpp @@ -69,7 +69,7 @@ R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, AMDGPU::MOV, DestReg, SrcReg); - NewMI->getOperand(getOperandIdx(*NewMI, R600Operands::SRC0)) + NewMI->getOperand(getOperandIdx(*NewMI, AMDGPU::OpName::src0)) .setIsKill(KillSrc); } } @@ -170,22 +170,24 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const { SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result; if (MI->getOpcode() == AMDGPU::DOT_4) { - static const R600Operands::VecOps OpTable[8][2] = { - {R600Operands::SRC0_X, R600Operands::SRC0_SEL_X}, - {R600Operands::SRC0_Y, R600Operands::SRC0_SEL_Y}, - {R600Operands::SRC0_Z, R600Operands::SRC0_SEL_Z}, - {R600Operands::SRC0_W, R600Operands::SRC0_SEL_W}, - {R600Operands::SRC1_X, R600Operands::SRC1_SEL_X}, - {R600Operands::SRC1_Y, R600Operands::SRC1_SEL_Y}, - {R600Operands::SRC1_Z, R600Operands::SRC1_SEL_Z}, - {R600Operands::SRC1_W, R600Operands::SRC1_SEL_W}, + static const unsigned OpTable[8][2] = { + {AMDGPU::OpName::src0_X, AMDGPU::OpName::src0_sel_X}, + {AMDGPU::OpName::src0_Y, AMDGPU::OpName::src0_sel_Y}, + {AMDGPU::OpName::src0_Z, AMDGPU::OpName::src0_sel_Z}, + {AMDGPU::OpName::src0_W, AMDGPU::OpName::src0_sel_W}, + {AMDGPU::OpName::src1_X, AMDGPU::OpName::src1_sel_X}, + {AMDGPU::OpName::src1_Y, AMDGPU::OpName::src1_sel_Y}, + {AMDGPU::OpName::src1_Z, AMDGPU::OpName::src1_sel_Z}, + {AMDGPU::OpName::src1_W, AMDGPU::OpName::src1_sel_W}, }; for (unsigned j = 0; j < 8; j++) { - MachineOperand &MO = MI->getOperand(OpTable[j][0] + 1); + MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(), + OpTable[j][0])); unsigned Reg = MO.getReg(); if (Reg == AMDGPU::ALU_CONST) { - unsigned Sel = MI->getOperand(OpTable[j][1] + 1).getImm(); + unsigned Sel = MI->getOperand(getOperandIdx(MI->getOpcode(), + OpTable[j][1])).getImm(); Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Sel)); continue; } @@ -194,10 +196,10 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const { return Result; } - static const R600Operands::Ops OpTable[3][2] = { - {R600Operands::SRC0, R600Operands::SRC0_SEL}, - {R600Operands::SRC1, R600Operands::SRC1_SEL}, - {R600Operands::SRC2, R600Operands::SRC2_SEL}, + static const unsigned OpTable[3][2] = { + {AMDGPU::OpName::src0, AMDGPU::OpName::src0_sel}, + {AMDGPU::OpName::src1, AMDGPU::OpName::src1_sel}, + {AMDGPU::OpName::src2, AMDGPU::OpName::src2_sel}, }; for (unsigned j = 0; j < 3; j++) { @@ -214,7 +216,7 @@ R600InstrInfo::getSrcs(MachineInstr *MI) const { } if (Reg == AMDGPU::ALU_LITERAL_X) { unsigned Imm = MI->getOperand( - getOperandIdx(MI->getOpcode(), R600Operands::IMM)).getImm(); + getOperandIdx(MI->getOpcode(), AMDGPU::OpName::literal)).getImm(); Result.push_back(std::pair<MachineOperand *, int64_t>(&MO, Imm)); continue; } @@ -329,7 +331,7 @@ R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG, for (unsigned i = 0, e = IG.size(); i < e; ++i) { IGSrcs.push_back(ExtractSrcs(IG[i], PV)); unsigned Op = getOperandIdx(IG[i]->getOpcode(), - R600Operands::BANK_SWIZZLE); + AMDGPU::OpName::bank_swizzle); ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle) IG[i]->getOperand(Op).getImm()); } @@ -812,13 +814,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB, unsigned AddrReg = AMDGPU::R600_AddrRegClass.getRegister(Address); MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, AMDGPU::AR_X, OffsetReg); - setImmOperand(MOVA, R600Operands::WRITE, 0); + setImmOperand(MOVA, AMDGPU::OpName::write, 0); MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, AddrReg, ValueReg) .addReg(AMDGPU::AR_X, RegState::Implicit | RegState::Kill); - setImmOperand(Mov, R600Operands::DST_REL, 1); + setImmOperand(Mov, AMDGPU::OpName::dst_rel, 1); return Mov; } @@ -830,13 +832,13 @@ MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB, MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, AMDGPU::MOVA_INT_eg, AMDGPU::AR_X, OffsetReg); - setImmOperand(MOVA, R600Operands::WRITE, 0); + setImmOperand(MOVA, AMDGPU::OpName::write, 0); MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, AMDGPU::MOV, ValueReg, AddrReg) .addReg(AMDGPU::AR_X, RegState::Implicit | RegState::Kill); - setImmOperand(Mov, R600Operands::SRC0_REL, 1); + setImmOperand(Mov, AMDGPU::OpName::src0_rel, 1); return Mov; } @@ -892,7 +894,7 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB #define OPERAND_CASE(Label) \ case Label: { \ - static const R600Operands::VecOps Ops[] = \ + static const unsigned Ops[] = \ { \ Label##_X, \ Label##_Y, \ @@ -902,26 +904,25 @@ MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MB return Ops[Slot]; \ } -static R600Operands::VecOps -getSlotedOps(R600Operands::Ops Op, unsigned Slot) { +static unsigned getSlotedOps(unsigned Op, unsigned Slot) { switch (Op) { - OPERAND_CASE(R600Operands::UPDATE_EXEC_MASK) - OPERAND_CASE(R600Operands::UPDATE_PREDICATE) - OPERAND_CASE(R600Operands::WRITE) - OPERAND_CASE(R600Operands::OMOD) - OPERAND_CASE(R600Operands::DST_REL) - OPERAND_CASE(R600Operands::CLAMP) - OPERAND_CASE(R600Operands::SRC0) - OPERAND_CASE(R600Operands::SRC0_NEG) - OPERAND_CASE(R600Operands::SRC0_REL) - OPERAND_CASE(R600Operands::SRC0_ABS) - OPERAND_CASE(R600Operands::SRC0_SEL) - OPERAND_CASE(R600Operands::SRC1) - OPERAND_CASE(R600Operands::SRC1_NEG) - OPERAND_CASE(R600Operands::SRC1_REL) - OPERAND_CASE(R600Operands::SRC1_ABS) - OPERAND_CASE(R600Operands::SRC1_SEL) - OPERAND_CASE(R600Operands::PRED_SEL) + OPERAND_CASE(AMDGPU::OpName::update_exec_mask) + OPERAND_CASE(AMDGPU::OpName::update_pred) + OPERAND_CASE(AMDGPU::OpName::write) + OPERAND_CASE(AMDGPU::OpName::omod) + OPERAND_CASE(AMDGPU::OpName::dst_rel) + OPERAND_CASE(AMDGPU::OpName::clamp) + OPERAND_CASE(AMDGPU::OpName::src0) + OPERAND_CASE(AMDGPU::OpName::src0_neg) + OPERAND_CASE(AMDGPU::OpName::src0_rel) + OPERAND_CASE(AMDGPU::OpName::src0_abs) + OPERAND_CASE(AMDGPU::OpName::src0_sel) + OPERAND_CASE(AMDGPU::OpName::src1) + OPERAND_CASE(AMDGPU::OpName::src1_neg) + OPERAND_CASE(AMDGPU::OpName::src1_rel) + OPERAND_CASE(AMDGPU::OpName::src1_abs) + OPERAND_CASE(AMDGPU::OpName::src1_sel) + OPERAND_CASE(AMDGPU::OpName::pred_sel) default: llvm_unreachable("Wrong Operand"); } @@ -929,12 +930,6 @@ getSlotedOps(R600Operands::Ops Op, unsigned Slot) { #undef OPERAND_CASE -static int -getVecOperandIdx(R600Operands::VecOps Op) { - return 1 + Op; -} - - MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg) const { @@ -947,31 +942,31 @@ MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction( Opcode = AMDGPU::DOT4_eg; MachineBasicBlock::iterator I = MI; MachineOperand &Src0 = MI->getOperand( - getVecOperandIdx(getSlotedOps(R600Operands::SRC0, Slot))); + getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src0, Slot))); MachineOperand &Src1 = MI->getOperand( - getVecOperandIdx(getSlotedOps(R600Operands::SRC1, Slot))); + getOperandIdx(MI->getOpcode(), getSlotedOps(AMDGPU::OpName::src1, Slot))); MachineInstr *MIB = buildDefaultInstruction( MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg()); - static const R600Operands::Ops Operands[14] = { - R600Operands::UPDATE_EXEC_MASK, - R600Operands::UPDATE_PREDICATE, - R600Operands::WRITE, - R600Operands::OMOD, - R600Operands::DST_REL, - R600Operands::CLAMP, - R600Operands::SRC0_NEG, - R600Operands::SRC0_REL, - R600Operands::SRC0_ABS, - R600Operands::SRC0_SEL, - R600Operands::SRC1_NEG, - R600Operands::SRC1_REL, - R600Operands::SRC1_ABS, - R600Operands::SRC1_SEL, + static const unsigned Operands[14] = { + AMDGPU::OpName::update_exec_mask, + AMDGPU::OpName::update_pred, + AMDGPU::OpName::write, + AMDGPU::OpName::omod, + AMDGPU::OpName::dst_rel, + AMDGPU::OpName::clamp, + AMDGPU::OpName::src0_neg, + AMDGPU::OpName::src0_rel, + AMDGPU::OpName::src0_abs, + AMDGPU::OpName::src0_sel, + AMDGPU::OpName::src1_neg, + AMDGPU::OpName::src1_rel, + AMDGPU::OpName::src1_abs, + AMDGPU::OpName::src1_sel, }; for (unsigned i = 0; i < 14; i++) { MachineOperand &MO = MI->getOperand( - getVecOperandIdx(getSlotedOps(Operands[i], Slot))); + getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot))); assert (MO.isImm()); setImmOperand(MIB, Operands[i], MO.getImm()); } @@ -985,56 +980,19 @@ MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB, uint64_t Imm) const { MachineInstr *MovImm = buildDefaultInstruction(BB, I, AMDGPU::MOV, DstReg, AMDGPU::ALU_LITERAL_X); - setImmOperand(MovImm, R600Operands::IMM, Imm); + setImmOperand(MovImm, AMDGPU::OpName::literal, Imm); return MovImm; } -int R600InstrInfo::getOperandIdx(const MachineInstr &MI, - R600Operands::Ops Op) const { - return getOperandIdx(MI.getOpcode(), Op); -} - -int R600InstrInfo::getOperandIdx(const MachineInstr &MI, - R600Operands::VecOps Op) const { +int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const { return getOperandIdx(MI.getOpcode(), Op); } -int R600InstrInfo::getOperandIdx(unsigned Opcode, - R600Operands::Ops Op) const { - unsigned TargetFlags = get(Opcode).TSFlags; - unsigned OpTableIdx; - - if (!HAS_NATIVE_OPERANDS(TargetFlags)) { - switch (Op) { - case R600Operands::DST: return 0; - case R600Operands::SRC0: return 1; - case R600Operands::SRC1: return 2; - case R600Operands::SRC2: return 3; - default: - assert(!"Unknown operand type for instruction"); - return -1; - } - } - - if (TargetFlags & R600_InstFlag::OP1) { - OpTableIdx = 0; - } else if (TargetFlags & R600_InstFlag::OP2) { - OpTableIdx = 1; - } else { - assert((TargetFlags & R600_InstFlag::OP3) && "OP1, OP2, or OP3 not defined " - "for this instruction"); - OpTableIdx = 2; - } - - return R600Operands::ALUOpTable[OpTableIdx][Op]; -} - -int R600InstrInfo::getOperandIdx(unsigned Opcode, - R600Operands::VecOps Op) const { - return Op + 1; +int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const { + return AMDGPU::getNamedOperandIdx(Opcode, Op); } -void R600InstrInfo::setImmOperand(MachineInstr *MI, R600Operands::Ops Op, +void R600InstrInfo::setImmOperand(MachineInstr *MI, unsigned Op, int64_t Imm) const { int Idx = getOperandIdx(*MI, Op); assert(Idx != -1 && "Operand not supported for this instruction."); @@ -1062,20 +1020,20 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx, bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3; switch (Flag) { case MO_FLAG_CLAMP: - FlagIndex = getOperandIdx(*MI, R600Operands::CLAMP); + FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::clamp); break; case MO_FLAG_MASK: - FlagIndex = getOperandIdx(*MI, R600Operands::WRITE); + FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::write); break; case MO_FLAG_NOT_LAST: case MO_FLAG_LAST: - FlagIndex = getOperandIdx(*MI, R600Operands::LAST); + FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::last); break; case MO_FLAG_NEG: switch (SrcIdx) { - case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_NEG); break; - case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_NEG); break; - case 2: FlagIndex = getOperandIdx(*MI, R600Operands::SRC2_NEG); break; + case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_neg); break; + case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_neg); break; + case 2: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src2_neg); break; } break; @@ -1084,8 +1042,8 @@ MachineOperand &R600InstrInfo::getFlagOp(MachineInstr *MI, unsigned SrcIdx, "instructions."); (void)IsOP3; switch (SrcIdx) { - case 0: FlagIndex = getOperandIdx(*MI, R600Operands::SRC0_ABS); break; - case 1: FlagIndex = getOperandIdx(*MI, R600Operands::SRC1_ABS); break; + case 0: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src0_abs); break; + case 1: FlagIndex = getOperandIdx(*MI, AMDGPU::OpName::src1_abs); break; } break; |

