diff options
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/R600/R600EmitClauseMarkers.cpp | 71 | ||||
| -rw-r--r-- | llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp | 17 | ||||
| -rw-r--r-- | llvm/lib/Target/R600/R600ISelLowering.cpp | 20 | ||||
| -rw-r--r-- | llvm/lib/Target/R600/R600InstrInfo.cpp | 8 | ||||
| -rw-r--r-- | llvm/lib/Target/R600/R600InstrInfo.h | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/R600/R600MachineScheduler.cpp | 32 | ||||
| -rw-r--r-- | llvm/lib/Target/R600/R600MachineScheduler.h | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/R600/R600RegisterInfo.cpp | 13 | ||||
| -rw-r--r-- | llvm/lib/Target/R600/R600RegisterInfo.h | 2 |
9 files changed, 116 insertions, 51 deletions
diff --git a/llvm/lib/Target/R600/R600EmitClauseMarkers.cpp b/llvm/lib/Target/R600/R600EmitClauseMarkers.cpp index 928c0e3ba6d..1bbfd2b68f3 100644 --- a/llvm/lib/Target/R600/R600EmitClauseMarkers.cpp +++ b/llvm/lib/Target/R600/R600EmitClauseMarkers.cpp @@ -47,6 +47,11 @@ private: break; } + // These will be expanded to two ALU instructions in the + // ExpandSpecialInstructions pass. + if (TII->isLDSRetInstr(MI->getOpcode())) + return 2; + if(TII->isVector(*MI) || TII->isCubeOp(MI->getOpcode()) || TII->isReductionOp(MI->getOpcode())) @@ -106,8 +111,13 @@ private: } bool SubstituteKCacheBank(MachineInstr *MI, - std::vector<std::pair<unsigned, unsigned> > &CachedConsts) const { + std::vector<std::pair<unsigned, unsigned> > &CachedConsts, + bool UpdateInstr = true) const { std::vector<std::pair<unsigned, unsigned> > UsedKCache; + + if (!TII->isALUInstr(MI->getOpcode()) && MI->getOpcode() != AMDGPU::DOT_4) + return true; + const SmallVectorImpl<std::pair<MachineOperand *, int64_t> > &Consts = TII->getSrcs(MI); assert((TII->isALUInstr(MI->getOpcode()) || @@ -140,6 +150,9 @@ private: return false; } + if (!UpdateInstr) + return true; + for (unsigned i = 0, j = 0, n = Consts.size(); i < n; ++i) { if (Consts[i].first->getReg() != AMDGPU::ALU_CONST) continue; @@ -160,6 +173,52 @@ private: return true; } + bool canClauseLocalKillFitInClause( + unsigned AluInstCount, + std::vector<std::pair<unsigned, unsigned> > KCacheBanks, + MachineBasicBlock::iterator Def, + MachineBasicBlock::iterator BBEnd) { + const R600RegisterInfo &TRI = TII->getRegisterInfo(); + for (MachineInstr::const_mop_iterator + MOI = Def->operands_begin(), + MOE = Def->operands_end(); MOI != MOE; ++MOI) { + if (!MOI->isReg() || !MOI->isDef() || + TRI.isPhysRegLiveAcrossClauses(MOI->getReg())) + continue; + + // Def defines a clause local register, so check that its use will fit + // in the clause. + unsigned LastUseCount = 0; + for (MachineBasicBlock::iterator UseI = Def; UseI != BBEnd; ++UseI) { + AluInstCount += OccupiedDwords(UseI); + // Make sure we won't need to end the clause due to KCache limitations. + if (!SubstituteKCacheBank(UseI, KCacheBanks, false)) + return false; + + // We have reached the maximum instruction limit before finding the + // use that kills this register, so we cannot use this def in the + // current clause. + if (AluInstCount >= TII->getMaxAlusPerClause()) + return false; + + // Register kill flags have been cleared by the time we get to this + // pass, but it is safe to assume that all uses of this register + // occur in the same basic block as its definition, because + // it is illegal for the scheduler to schedule them in + // different blocks. + if (UseI->findRegisterUseOperandIdx(MOI->getReg())) + LastUseCount = AluInstCount; + + if (UseI != Def && UseI->findRegisterDefOperandIdx(MOI->getReg()) != -1) + break; + } + if (LastUseCount) + return LastUseCount <= TII->getMaxAlusPerClause(); + llvm_unreachable("Clause local register live at end of clause."); + } + return true; + } + MachineBasicBlock::iterator MakeALUClause(MachineBasicBlock &MBB, MachineBasicBlock::iterator I) { MachineBasicBlock::iterator ClauseHead = I; @@ -198,11 +257,13 @@ private: I++; break; } - if (TII->isALUInstr(I->getOpcode()) && - !SubstituteKCacheBank(I, KCacheBanks)) + + // If this instruction defines a clause local register, make sure + // its use can fit in this clause. + if (!canClauseLocalKillFitInClause(AluInstCount, KCacheBanks, I, E)) break; - if (I->getOpcode() == AMDGPU::DOT_4 && - !SubstituteKCacheBank(I, KCacheBanks)) + + if (!SubstituteKCacheBank(I, KCacheBanks)) break; AluInstCount += OccupiedDwords(I); } diff --git a/llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp b/llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp index 67b42d704f7..aeee4aa8956 100644 --- a/llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp +++ b/llvm/lib/Target/R600/R600ExpandSpecialInstrs.cpp @@ -68,6 +68,23 @@ bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) { MachineInstr &MI = *I; I = llvm::next(I); + // Expand LDS_*_RET instructions + if (TII->isLDSRetInstr(MI.getOpcode())) { + int DstIdx = TII->getOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst); + assert(DstIdx != -1); + MachineOperand &DstOp = MI.getOperand(DstIdx); + MachineInstr *Mov = TII->buildMovInstr(&MBB, I, + DstOp.getReg(), AMDGPU::OQAP); + DstOp.setReg(AMDGPU::OQAP); + int LDSPredSelIdx = TII->getOperandIdx(MI.getOpcode(), + AMDGPU::OpName::pred_sel); + int MovPredSelIdx = TII->getOperandIdx(Mov->getOpcode(), + AMDGPU::OpName::pred_sel); + // Copy the pred_sel bit + Mov->getOperand(MovPredSelIdx).setReg( + MI.getOperand(LDSPredSelIdx).getReg()); + } + switch (MI.getOpcode()) { default: break; // Expand PRED_X to one of the PRED_SET instructions. diff --git a/llvm/lib/Target/R600/R600ISelLowering.cpp b/llvm/lib/Target/R600/R600ISelLowering.cpp index 4b6c2eb5b15..0fcb488672f 100644 --- a/llvm/lib/Target/R600/R600ISelLowering.cpp +++ b/llvm/lib/Target/R600/R600ISelLowering.cpp @@ -134,21 +134,17 @@ MachineBasicBlock * R600TargetLowering::EmitInstrWithCustomInserter( switch (MI->getOpcode()) { default: - if (TII->isLDSInstr(MI->getOpcode()) && - TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst) != -1) { + // Replace LDS_*_RET instruction that don't have any uses with the + // equivalent LDS_*_NORET instruction. + if (TII->isLDSRetInstr(MI->getOpcode())) { int DstIdx = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); assert(DstIdx != -1); MachineInstrBuilder NewMI; - if (!MRI.use_empty(MI->getOperand(DstIdx).getReg())) { - NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI->getOpcode()), - AMDGPU::OQAP); - TII->buildDefaultInstruction(*BB, I, AMDGPU::MOV, - MI->getOperand(0).getReg(), - AMDGPU::OQAP); - } else { - NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), - TII->get(AMDGPU::getLDSNoRetOp(MI->getOpcode()))); - } + if (!MRI.use_empty(MI->getOperand(DstIdx).getReg())) + return BB; + + NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), + TII->get(AMDGPU::getLDSNoRetOp(MI->getOpcode()))); for (unsigned i = 1, e = MI->getNumOperands(); i < e; ++i) { NewMI.addOperand(MI->getOperand(i)); } diff --git a/llvm/lib/Target/R600/R600InstrInfo.cpp b/llvm/lib/Target/R600/R600InstrInfo.cpp index aff11ce1b34..3987b2d436c 100644 --- a/llvm/lib/Target/R600/R600InstrInfo.cpp +++ b/llvm/lib/Target/R600/R600InstrInfo.cpp @@ -141,6 +141,14 @@ bool R600InstrInfo::isLDSInstr(unsigned Opcode) const { (TargetFlags & R600_InstFlag::LDS_1A2D)); } +bool R600InstrInfo::isLDSNoRetInstr(unsigned Opcode) const { + return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) == -1; +} + +bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const { + return isLDSInstr(Opcode) && getOperandIdx(Opcode, AMDGPU::OpName::dst) != -1; +} + bool R600InstrInfo::canBeConsideredALU(const MachineInstr *MI) const { if (isALUInstr(MI->getOpcode())) return true; diff --git a/llvm/lib/Target/R600/R600InstrInfo.h b/llvm/lib/Target/R600/R600InstrInfo.h index e2996c7a78f..b29b91f8d7e 100644 --- a/llvm/lib/Target/R600/R600InstrInfo.h +++ b/llvm/lib/Target/R600/R600InstrInfo.h @@ -65,6 +65,8 @@ namespace llvm { bool isALUInstr(unsigned Opcode) const; bool hasInstrModifiers(unsigned Opcode) const; bool isLDSInstr(unsigned Opcode) const; + bool isLDSNoRetInstr(unsigned Opcode) const; + bool isLDSRetInstr(unsigned Opcode) const; /// \returns true if this \p Opcode represents an ALU instruction or an /// instruction that will be lowered in ExpandSpecialInstrs Pass. diff --git a/llvm/lib/Target/R600/R600MachineScheduler.cpp b/llvm/lib/Target/R600/R600MachineScheduler.cpp index 6c26d9ece40..da2a4d862e7 100644 --- a/llvm/lib/Target/R600/R600MachineScheduler.cpp +++ b/llvm/lib/Target/R600/R600MachineScheduler.cpp @@ -92,15 +92,6 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { AllowSwitchFromAlu = true; } - - // We want to scheduled AR defs as soon as possible to make sure they aren't - // put in a different ALU clause from their uses. - if (!SU && !UnscheduledARDefs.empty()) { - SU = UnscheduledARDefs[0]; - UnscheduledARDefs.erase(UnscheduledARDefs.begin()); - NextInstKind = IDAlu; - } - if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) || (!AllowSwitchFromAlu && CurInstKind == IDAlu))) { // try to pick ALU @@ -130,15 +121,6 @@ SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { NextInstKind = IDOther; } - // We want to schedule the AR uses as late as possible to make sure that - // the AR defs have been released. - if (!SU && !UnscheduledARUses.empty()) { - SU = UnscheduledARUses[0]; - UnscheduledARUses.erase(UnscheduledARUses.begin()); - NextInstKind = IDAlu; - } - - DEBUG( if (SU) { dbgs() << " ** Pick node **\n"; @@ -217,20 +199,6 @@ void R600SchedStrategy::releaseBottomNode(SUnit *SU) { int IK = getInstKind(SU); - // Check for AR register defines - for (MachineInstr::const_mop_iterator I = SU->getInstr()->operands_begin(), - E = SU->getInstr()->operands_end(); - I != E; ++I) { - if (I->isReg() && I->getReg() == AMDGPU::AR_X) { - if (I->isDef()) { - UnscheduledARDefs.push_back(SU); - } else { - UnscheduledARUses.push_back(SU); - } - return; - } - } - // There is no export clause, we can schedule one as soon as its ready if (IK == IDOther) Available[IDOther].push_back(SU); diff --git a/llvm/lib/Target/R600/R600MachineScheduler.h b/llvm/lib/Target/R600/R600MachineScheduler.h index 0a6f1204a4d..97c8cdec0aa 100644 --- a/llvm/lib/Target/R600/R600MachineScheduler.h +++ b/llvm/lib/Target/R600/R600MachineScheduler.h @@ -53,8 +53,6 @@ class R600SchedStrategy : public MachineSchedStrategy { std::vector<SUnit *> Available[IDLast], Pending[IDLast]; std::vector<SUnit *> AvailableAlus[AluLast]; - std::vector<SUnit *> UnscheduledARDefs; - std::vector<SUnit *> UnscheduledARUses; std::vector<SUnit *> PhysicalRegCopy; InstKind CurInstKind; diff --git a/llvm/lib/Target/R600/R600RegisterInfo.cpp b/llvm/lib/Target/R600/R600RegisterInfo.cpp index fbe333d2038..f3bb88b3eef 100644 --- a/llvm/lib/Target/R600/R600RegisterInfo.cpp +++ b/llvm/lib/Target/R600/R600RegisterInfo.cpp @@ -85,3 +85,16 @@ const RegClassWeight &R600RegisterInfo::getRegClassWeight( const TargetRegisterClass *RC) const { return RCW; } + +bool R600RegisterInfo::isPhysRegLiveAcrossClauses(unsigned Reg) const { + assert(!TargetRegisterInfo::isVirtualRegister(Reg)); + + switch (Reg) { + case AMDGPU::OQAP: + case AMDGPU::OQBP: + case AMDGPU::AR_X: + return false; + default: + return true; + } +} diff --git a/llvm/lib/Target/R600/R600RegisterInfo.h b/llvm/lib/Target/R600/R600RegisterInfo.h index 8833ee77e04..c74c49ecdcd 100644 --- a/llvm/lib/Target/R600/R600RegisterInfo.h +++ b/llvm/lib/Target/R600/R600RegisterInfo.h @@ -47,6 +47,8 @@ struct R600RegisterInfo : public AMDGPURegisterInfo { virtual const RegClassWeight &getRegClassWeight(const TargetRegisterClass *RC) const; + // \returns true if \p Reg can be defined in one ALU caluse and used in another. + virtual bool isPhysRegLiveAcrossClauses(unsigned Reg) const; }; } // End namespace llvm |

