diff options
Diffstat (limited to 'llvm/lib/CodeGen')
22 files changed, 259 insertions, 285 deletions
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index a355e09824e..8d35c57690a 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -636,20 +636,20 @@ static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) { // We assume a single instruction only has a spill or reload, not // both. const MachineMemOperand *MMO; - if (TII->isLoadFromStackSlotPostFE(&MI, FI)) { + if (TII->isLoadFromStackSlotPostFE(MI, FI)) { if (FrameInfo->isSpillSlotObjectIndex(FI)) { MMO = *MI.memoperands_begin(); CommentOS << MMO->getSize() << "-byte Reload\n"; } - } else if (TII->hasLoadFromStackSlot(&MI, MMO, FI)) { + } else if (TII->hasLoadFromStackSlot(MI, MMO, FI)) { if (FrameInfo->isSpillSlotObjectIndex(FI)) CommentOS << MMO->getSize() << "-byte Folded Reload\n"; - } else if (TII->isStoreToStackSlotPostFE(&MI, FI)) { + } else if (TII->isStoreToStackSlotPostFE(MI, FI)) { if (FrameInfo->isSpillSlotObjectIndex(FI)) { MMO = *MI.memoperands_begin(); CommentOS << MMO->getSize() << "-byte Spill\n"; } - } else if (TII->hasStoreToStackSlot(&MI, MMO, FI)) { + } else if (TII->hasStoreToStackSlot(MI, MMO, FI)) { if (FrameInfo->isSpillSlotObjectIndex(FI)) CommentOS << MMO->getSize() << "-byte Folded Spill\n"; } diff --git a/llvm/lib/CodeGen/CalcSpillWeights.cpp b/llvm/lib/CodeGen/CalcSpillWeights.cpp index f24d8eaffbc..dc2d38a95f9 100644 --- a/llvm/lib/CodeGen/CalcSpillWeights.cpp +++ b/llvm/lib/CodeGen/CalcSpillWeights.cpp @@ -121,7 +121,7 @@ static bool isRematerializable(const LiveInterval &LI, } } - if (!TII.isTriviallyReMaterializable(MI, LIS.getAliasAnalysis())) + if (!TII.isTriviallyReMaterializable(*MI, LIS.getAliasAnalysis())) return false; } return true; diff --git a/llvm/lib/CodeGen/ExecutionDepsFix.cpp b/llvm/lib/CodeGen/ExecutionDepsFix.cpp index 0ba9b038007..566b8d507b2 100644 --- a/llvm/lib/CodeGen/ExecutionDepsFix.cpp +++ b/llvm/lib/CodeGen/ExecutionDepsFix.cpp @@ -320,7 +320,7 @@ void ExeDepsFix::collapse(DomainValue *dv, unsigned domain) { // Collapse all the instructions. while (!dv->Instrs.empty()) - TII->setExecutionDomain(dv->Instrs.pop_back_val(), domain); + TII->setExecutionDomain(*dv->Instrs.pop_back_val(), domain); dv->setSingleDomain(domain); // If there are multiple users, give them new, unique DomainValues. @@ -460,7 +460,7 @@ void ExeDepsFix::visitInstr(MachineInstr *MI) { return; // Update instructions with explicit execution domains. - std::pair<uint16_t, uint16_t> DomP = TII->getExecutionDomain(MI); + std::pair<uint16_t, uint16_t> DomP = TII->getExecutionDomain(*MI); if (DomP.first) { if (DomP.second) visitSoftInstr(MI, DomP.second); @@ -508,7 +508,7 @@ void ExeDepsFix::processDefs(MachineInstr *MI, bool Kill) { // Break dependence on undef uses. Do this before updating LiveRegs below. unsigned OpNum; - unsigned Pref = TII->getUndefRegClearance(MI, OpNum, TRI); + unsigned Pref = TII->getUndefRegClearance(*MI, OpNum, TRI); if (Pref) { if (shouldBreakDependence(MI, OpNum, Pref)) UndefReads.push_back(std::make_pair(MI, OpNum)); @@ -531,9 +531,9 @@ void ExeDepsFix::processDefs(MachineInstr *MI, bool Kill) { // Check clearance before partial register updates. // Call breakDependence before setting LiveRegs[rx].Def. - unsigned Pref = TII->getPartialRegUpdateClearance(MI, i, TRI); + unsigned Pref = TII->getPartialRegUpdateClearance(*MI, i, TRI); if (Pref && shouldBreakDependence(MI, i, Pref)) - TII->breakPartialRegDependency(MI, i, TRI); + TII->breakPartialRegDependency(*MI, i, TRI); // How many instructions since rx was last written? LiveRegs[rx].Def = CurInstr; @@ -571,7 +571,7 @@ void ExeDepsFix::processUndefReads(MachineBasicBlock *MBB) { if (UndefMI == &I) { if (!LiveRegSet.contains(UndefMI->getOperand(OpIdx).getReg())) - TII->breakPartialRegDependency(UndefMI, OpIdx, TRI); + TII->breakPartialRegDependency(*UndefMI, OpIdx, TRI); UndefReads.pop_back(); if (UndefReads.empty()) @@ -645,7 +645,7 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) { // If the collapsed operands force a single domain, propagate the collapse. if (isPowerOf2_32(available)) { unsigned domain = countTrailingZeros(available); - TII->setExecutionDomain(mi, domain); + TII->setExecutionDomain(*mi, domain); visitHardInstr(mi, domain); return; } diff --git a/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp b/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp index e7bf143b82e..3ad1fc7e926 100644 --- a/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp +++ b/llvm/lib/CodeGen/ExpandPostRAPseudos.cpp @@ -192,12 +192,12 @@ bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) { mbbi != mbbe; ++mbbi) { for (MachineBasicBlock::iterator mi = mbbi->begin(), me = mbbi->end(); mi != me;) { - MachineInstr *MI = mi; + MachineInstr &MI = *mi; // Advance iterator here because MI may be erased. ++mi; // Only expand pseudos. - if (!MI->isPseudo()) + if (!MI.isPseudo()) continue; // Give targets a chance to expand even standard pseudos. @@ -207,12 +207,12 @@ bool ExpandPostRA::runOnMachineFunction(MachineFunction &MF) { } // Expand standard pseudos. - switch (MI->getOpcode()) { + switch (MI.getOpcode()) { case TargetOpcode::SUBREG_TO_REG: - MadeChange |= LowerSubregToReg(MI); + MadeChange |= LowerSubregToReg(&MI); break; case TargetOpcode::COPY: - MadeChange |= LowerCopy(MI); + MadeChange |= LowerCopy(&MI); break; case TargetOpcode::DBG_VALUE: continue; diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp index 11df2de216d..03b424313f1 100644 --- a/llvm/lib/CodeGen/ImplicitNullChecks.cpp +++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp @@ -421,14 +421,14 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks( for (auto MII = NotNullSucc->begin(), MIE = NotNullSucc->end(); MII != MIE; ++MII) { - MachineInstr *MI = &*MII; + MachineInstr &MI = *MII; unsigned BaseReg; int64_t Offset; MachineInstr *Dependency = nullptr; if (TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI)) - if (MI->mayLoad() && !MI->isPredicable() && BaseReg == PointerReg && - Offset < PageSize && MI->getDesc().getNumDefs() <= 1 && - HD.isSafeToHoist(MI, Dependency)) { + if (MI.mayLoad() && !MI.isPredicable() && BaseReg == PointerReg && + Offset < PageSize && MI.getDesc().getNumDefs() <= 1 && + HD.isSafeToHoist(&MI, Dependency)) { auto DependencyOperandIsOk = [&](MachineOperand &MO) { assert(!(MO.isReg() && MO.isUse()) && @@ -463,13 +463,13 @@ bool ImplicitNullChecks::analyzeBlockForNullChecks( all_of(Dependency->operands(), DependencyOperandIsOk); if (DependencyOperandsAreOk) { - NullCheckList.emplace_back(MI, MBP.ConditionDef, &MBB, NotNullSucc, + NullCheckList.emplace_back(&MI, MBP.ConditionDef, &MBB, NotNullSucc, NullSucc, Dependency); return true; } } - HD.rememberInstruction(MI); + HD.rememberInstruction(&MI); if (HD.isClobbered()) return false; } diff --git a/llvm/lib/CodeGen/InlineSpiller.cpp b/llvm/lib/CodeGen/InlineSpiller.cpp index dc55398d18b..58a6e8ed294 100644 --- a/llvm/lib/CodeGen/InlineSpiller.cpp +++ b/llvm/lib/CodeGen/InlineSpiller.cpp @@ -236,13 +236,13 @@ Spiller *createInlineSpiller(MachineFunctionPass &pass, /// isFullCopyOf - If MI is a COPY to or from Reg, return the other register, /// otherwise return 0. -static unsigned isFullCopyOf(const MachineInstr *MI, unsigned Reg) { - if (!MI->isFullCopy()) +static unsigned isFullCopyOf(const MachineInstr &MI, unsigned Reg) { + if (!MI.isFullCopy()) return 0; - if (MI->getOperand(0).getReg() == Reg) - return MI->getOperand(1).getReg(); - if (MI->getOperand(1).getReg() == Reg) - return MI->getOperand(0).getReg(); + if (MI.getOperand(0).getReg() == Reg) + return MI.getOperand(1).getReg(); + if (MI.getOperand(1).getReg() == Reg) + return MI.getOperand(0).getReg(); return 0; } @@ -268,7 +268,7 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { for (MachineRegisterInfo::reg_instr_nodbg_iterator RI = MRI.reg_instr_nodbg_begin(SnipLI.reg), E = MRI.reg_instr_nodbg_end(); RI != E; ) { - MachineInstr *MI = &*(RI++); + MachineInstr &MI = *RI++; // Allow copies to/from Reg. if (isFullCopyOf(MI, Reg)) @@ -284,9 +284,9 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) { continue; // Allow a single additional instruction. - if (UseMI && MI != UseMI) + if (UseMI && &MI != UseMI) return false; - UseMI = MI; + UseMI = &MI; } return true; } @@ -307,14 +307,14 @@ void InlineSpiller::collectRegsToSpill() { for (MachineRegisterInfo::reg_instr_iterator RI = MRI.reg_instr_begin(Reg), E = MRI.reg_instr_end(); RI != E; ) { - MachineInstr *MI = &*(RI++); + MachineInstr &MI = *RI++; unsigned SnipReg = isFullCopyOf(MI, Reg); if (!isSibling(SnipReg)) continue; LiveInterval &SnipLI = LIS.getInterval(SnipReg); if (!isSnippet(SnipLI)) continue; - SnippetCopies.insert(MI); + SnippetCopies.insert(&MI); if (isRegToSpill(SnipReg)) continue; RegsToSpill.push_back(SnipReg); @@ -426,10 +426,10 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { for (MachineRegisterInfo::use_instr_nodbg_iterator UI = MRI.use_instr_nodbg_begin(Reg), E = MRI.use_instr_nodbg_end(); UI != E; ) { - MachineInstr *MI = &*(UI++); - if (!MI->isCopy() && !MI->mayStore()) + MachineInstr &MI = *UI++; + if (!MI.isCopy() && !MI.mayStore()) continue; - SlotIndex Idx = LIS.getInstructionIndex(*MI); + SlotIndex Idx = LIS.getInstructionIndex(MI); if (LI->getVNInfoAt(Idx) != VNI) continue; @@ -448,12 +448,12 @@ void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) { // Erase spills. int FI; if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) { - DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << *MI); + DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI); // eliminateDeadDefs won't normally remove stores, so switch opcode. - MI->setDesc(TII.get(TargetOpcode::KILL)); - DeadDefs.push_back(MI); + MI.setDesc(TII.get(TargetOpcode::KILL)); + DeadDefs.push_back(&MI); ++NumSpillsRemoved; - if (HSpiller.rmFromMergeableSpills(MI, StackSlot)) + if (HSpiller.rmFromMergeableSpills(&MI, StackSlot)) --NumSpills; } } @@ -656,10 +656,10 @@ void InlineSpiller::reMaterializeAll() { /// If MI is a load or store of StackSlot, it can be removed. bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, unsigned Reg) { int FI = 0; - unsigned InstrReg = TII.isLoadFromStackSlot(MI, FI); + unsigned InstrReg = TII.isLoadFromStackSlot(*MI, FI); bool IsLoad = InstrReg; if (!IsLoad) - InstrReg = TII.isStoreToStackSlot(MI, FI); + InstrReg = TII.isStoreToStackSlot(*MI, FI); // We have a stack access. Is it the right register and slot? if (InstrReg != Reg || FI != StackSlot) @@ -765,8 +765,8 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops, MachineInstrSpan MIS(MI); MachineInstr *FoldMI = - LoadMI ? TII.foldMemoryOperand(MI, FoldOps, LoadMI, &LIS) - : TII.foldMemoryOperand(MI, FoldOps, StackSlot, &LIS); + LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS) + : TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS); if (!FoldMI) return false; @@ -793,7 +793,7 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> > Ops, } int FI; - if (TII.isStoreToStackSlot(MI, FI) && HSpiller.rmFromMergeableSpills(MI, FI)) + if (TII.isStoreToStackSlot(*MI, FI) && HSpiller.rmFromMergeableSpills(MI, FI)) --NumSpills; LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI); MI->eraseFromParent(); @@ -913,7 +913,7 @@ void InlineSpiller::spillAroundUses(unsigned Reg) { Idx = VNI->def; // Check for a sibling copy. - unsigned SibReg = isFullCopyOf(MI, Reg); + unsigned SibReg = isFullCopyOf(*MI, Reg); if (SibReg && isSibling(SibReg)) { // This may actually be a copy between snippets. if (isRegToSpill(SibReg)) { diff --git a/llvm/lib/CodeGen/LiveRangeEdit.cpp b/llvm/lib/CodeGen/LiveRangeEdit.cpp index 11135f9b621..20003dded70 100644 --- a/llvm/lib/CodeGen/LiveRangeEdit.cpp +++ b/llvm/lib/CodeGen/LiveRangeEdit.cpp @@ -53,7 +53,7 @@ bool LiveRangeEdit::checkRematerializable(VNInfo *VNI, AliasAnalysis *aa) { assert(DefMI && "Missing instruction"); ScannedRemattable = true; - if (!TII.isTriviallyReMaterializable(DefMI, aa)) + if (!TII.isTriviallyReMaterializable(*DefMI, aa)) return false; Remattable.insert(VNI); return true; @@ -130,7 +130,7 @@ bool LiveRangeEdit::canRematerializeAt(Remat &RM, VNInfo *OrigVNI, DefIdx = LIS.getInstructionIndex(*RM.OrigMI); // If only cheap remats were requested, bail out early. - if (cheapAsAMove && !TII.isAsCheapAsAMove(RM.OrigMI)) + if (cheapAsAMove && !TII.isAsCheapAsAMove(*RM.OrigMI)) return false; // Verify that all used registers are available with the same values. @@ -147,7 +147,7 @@ SlotIndex LiveRangeEdit::rematerializeAt(MachineBasicBlock &MBB, const TargetRegisterInfo &tri, bool Late) { assert(RM.OrigMI && "Invalid remat"); - TII.reMaterialize(MBB, MI, DestReg, 0, RM.OrigMI, tri); + TII.reMaterialize(MBB, MI, DestReg, 0, *RM.OrigMI, tri); // DestReg of the cloned instruction cannot be Dead. Set isDead of DestReg // to false anyway in case the isDead flag of RM.OrigMI's dest register // is true. @@ -205,7 +205,7 @@ bool LiveRangeEdit::foldAsLoad(LiveInterval *LI, if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) return false; - MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Ops, DefMI, &LIS); + MachineInstr *FoldMI = TII.foldMemoryOperand(*UseMI, Ops, *DefMI, &LIS); if (!FoldMI) return false; DEBUG(dbgs() << " folded: " << *FoldMI); diff --git a/llvm/lib/CodeGen/MachineCSE.cpp b/llvm/lib/CodeGen/MachineCSE.cpp index b8283eaf9e2..1209f73d960 100644 --- a/llvm/lib/CodeGen/MachineCSE.cpp +++ b/llvm/lib/CodeGen/MachineCSE.cpp @@ -389,7 +389,7 @@ bool MachineCSE::isProfitableToCSE(unsigned CSReg, unsigned Reg, // Heuristics #1: Don't CSE "cheap" computation if the def is not local or in // an immediate predecessor. We don't want to increase register pressure and // end up causing other computation to be spilled. - if (TII->isAsCheapAsAMove(MI)) { + if (TII->isAsCheapAsAMove(*MI)) { MachineBasicBlock *CSBB = CSMI->getParent(); MachineBasicBlock *BB = MI->getParent(); if (CSBB != BB && !CSBB->isSuccessor(BB)) @@ -478,8 +478,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) { // Commute commutable instructions. bool Commuted = false; if (!FoundCSE && MI->isCommutable()) { - MachineInstr *NewMI = TII->commuteInstruction(MI); - if (NewMI) { + if (MachineInstr *NewMI = TII->commuteInstruction(*MI)) { Commuted = true; FoundCSE = VNT.count(NewMI); if (NewMI != MI) { @@ -488,7 +487,7 @@ bool MachineCSE::ProcessBlock(MachineBasicBlock *MBB) { Changed = true; } else if (!FoundCSE) // MI was changed but it didn't help, commute it back! - (void)TII->commuteInstruction(MI); + (void)TII->commuteInstruction(*MI); } } diff --git a/llvm/lib/CodeGen/MachineLICM.cpp b/llvm/lib/CodeGen/MachineLICM.cpp index a6f1c7519c6..e2cedfb3c4b 100644 --- a/llvm/lib/CodeGen/MachineLICM.cpp +++ b/llvm/lib/CodeGen/MachineLICM.cpp @@ -428,7 +428,7 @@ void MachineLICM::ProcessMI(MachineInstr *MI, if (Def && !RuledOut) { int FI = INT_MIN; if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) || - (TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI))) + (TII->isLoadFromStackSlot(*MI, FI) && MFI->isSpillSlotObjectIndex(FI))) Candidates.push_back(CandidateInfo(MI, Def, FI)); } } @@ -982,7 +982,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI, if (MOReg != Reg) continue; - if (TII->hasHighOperandLatency(SchedModel, MRI, &MI, DefIdx, &UseMI, i)) + if (TII->hasHighOperandLatency(SchedModel, MRI, MI, DefIdx, UseMI, i)) return true; } @@ -996,7 +996,7 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI, /// Return true if the instruction is marked "cheap" or the operand latency /// between its def and a use is one or less. bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const { - if (TII->isAsCheapAsAMove(&MI) || MI.isCopyLike()) + if (TII->isAsCheapAsAMove(MI) || MI.isCopyLike()) return true; bool isCheap = false; @@ -1010,7 +1010,7 @@ bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const { if (TargetRegisterInfo::isPhysicalRegister(Reg)) continue; - if (!TII->hasLowDefLatency(SchedModel, &MI, i)) + if (!TII->hasLowDefLatency(SchedModel, MI, i)) return false; isCheap = true; } @@ -1086,7 +1086,7 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { // Rematerializable instructions should always be hoisted since the register // allocator can just pull them down again when needed. - if (TII->isTriviallyReMaterializable(&MI, AA)) + if (TII->isTriviallyReMaterializable(MI, AA)) return true; // FIXME: If there are long latency loop-invariant instructions inside the @@ -1139,8 +1139,7 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { // High register pressure situation, only hoist if the instruction is going // to be remat'ed. - if (!TII->isTriviallyReMaterializable(&MI, AA) && - !MI.isInvariantLoad(AA)) { + if (!TII->isTriviallyReMaterializable(MI, AA) && !MI.isInvariantLoad(AA)) { DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI); return false; } @@ -1177,10 +1176,9 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) { unsigned Reg = MRI->createVirtualRegister(RC); SmallVector<MachineInstr *, 2> NewMIs; - bool Success = - TII->unfoldMemoryOperand(MF, MI, Reg, - /*UnfoldLoad=*/true, /*UnfoldStore=*/false, - NewMIs); + bool Success = TII->unfoldMemoryOperand(MF, *MI, Reg, + /*UnfoldLoad=*/true, + /*UnfoldStore=*/false, NewMIs); (void)Success; assert(Success && "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold " @@ -1221,7 +1219,7 @@ const MachineInstr* MachineLICM::LookForDuplicate(const MachineInstr *MI, std::vector<const MachineInstr*> &PrevMIs) { for (const MachineInstr *PrevMI : PrevMIs) - if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : nullptr))) + if (TII->produceSameValue(*MI, *PrevMI, (PreRegAlloc ? MRI : nullptr))) return PrevMI; return nullptr; diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp index d8b04202c70..a0ac320b977 100644 --- a/llvm/lib/CodeGen/MachineScheduler.cpp +++ b/llvm/lib/CodeGen/MachineScheduler.cpp @@ -406,7 +406,7 @@ static bool isSchedBoundary(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB, MachineFunction *MF, const TargetInstrInfo *TII) { - return MI->isCall() || TII->isSchedulingBoundary(MI, MBB, *MF); + return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF); } /// Main driver for both MachineScheduler and PostMachineScheduler. @@ -1402,7 +1402,7 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps( SUnit *SU = MemOps[Idx]; unsigned BaseReg; int64_t Offset; - if (TII->getMemOpBaseRegImmOfs(SU->getInstr(), BaseReg, Offset, TRI)) + if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI)) MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset)); } if (MemOpRecords.size() < 2) @@ -1418,8 +1418,9 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps( SUnit *SUa = MemOpRecords[Idx].SU; SUnit *SUb = MemOpRecords[Idx+1].SU; - if (TII->shouldClusterMemOps(SUa->getInstr(), SUb->getInstr(), ClusterLength) - && DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { + if (TII->shouldClusterMemOps(*SUa->getInstr(), *SUb->getInstr(), + ClusterLength) && + DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) { DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU(" << SUb->NodeNum << ")\n"); // Copy successor edges from SUa to SUb. Interleaving computation @@ -1529,7 +1530,7 @@ void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) { if (!HasDataDep(TRI, *Branch, *Pred)) continue; - if (!TII.shouldScheduleAdjacent(Pred, Branch)) + if (!TII.shouldScheduleAdjacent(*Pred, *Branch)) continue; // Create a single weak edge from SU to ExitSU. The only effect is to cause diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp index 13924edbb94..a76a4fe7821 100644 --- a/llvm/lib/CodeGen/MachineSink.cpp +++ b/llvm/lib/CodeGen/MachineSink.cpp @@ -366,7 +366,7 @@ bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI, if (!CEBCandidates.insert(std::make_pair(From, To)).second) return true; - if (!MI->isCopy() && !TII->isAsCheapAsAMove(MI)) + if (!MI->isCopy() && !TII->isAsCheapAsAMove(*MI)) return true; // MI is cheap, we probably don't want to break the critical edge for it. @@ -700,7 +700,7 @@ static bool SinkingPreventsImplicitNullCheck(MachineInstr *MI, unsigned BaseReg; int64_t Offset; - if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI)) + if (!TII->getMemOpBaseRegImmOfs(*MI, BaseReg, Offset, TRI)) return false; if (!(MI->mayLoad() && !MI->isPredicable())) diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp index f5c349b59ed..92e7d0ed554 100644 --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -880,7 +880,7 @@ void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) { } StringRef ErrorInfo; - if (!TII->verifyInstruction(MI, ErrorInfo)) + if (!TII->verifyInstruction(*MI, ErrorInfo)) report(ErrorInfo.data(), MI); } diff --git a/llvm/lib/CodeGen/PeepholeOptimizer.cpp b/llvm/lib/CodeGen/PeepholeOptimizer.cpp index 35561506b8d..d521ae8f958 100644 --- a/llvm/lib/CodeGen/PeepholeOptimizer.cpp +++ b/llvm/lib/CodeGen/PeepholeOptimizer.cpp @@ -564,13 +564,13 @@ bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI, // physical register, we can try to optimize it. unsigned SrcReg, SrcReg2; int CmpMask, CmpValue; - if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) || + if (!TII->analyzeCompare(*MI, SrcReg, SrcReg2, CmpMask, CmpValue) || TargetRegisterInfo::isPhysicalRegister(SrcReg) || (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2))) return false; // Attempt to optimize the comparison instruction. - if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) { + if (TII->optimizeCompareInstr(*MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) { ++NumCmps; return true; } @@ -585,11 +585,11 @@ bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI, unsigned FalseOp = 0; bool Optimizable = false; SmallVector<MachineOperand, 4> Cond; - if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable)) + if (TII->analyzeSelect(*MI, Cond, TrueOp, FalseOp, Optimizable)) return false; if (!Optimizable) return false; - if (!TII->optimizeSelect(MI, LocalMIs)) + if (!TII->optimizeSelect(*MI, LocalMIs)) return false; MI->eraseFromParent(); ++NumSelects; @@ -599,7 +599,7 @@ bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI, /// \brief Check if a simpler conditional branch can be // generated bool PeepholeOptimizer::optimizeCondBranch(MachineInstr *MI) { - return TII->optimizeCondBranch(MI); + return TII->optimizeCondBranch(*MI); } /// \brief Try to find the next source that share the same register file @@ -1351,7 +1351,7 @@ bool PeepholeOptimizer::foldImmediate( continue; DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg); assert(II != ImmDefMIs.end() && "couldn't find immediate definition"); - if (TII->FoldImmediate(MI, II->second, Reg, MRI)) { + if (TII->FoldImmediate(*MI, *II->second, Reg, MRI)) { ++NumImmFold; return true; } @@ -1636,10 +1636,8 @@ bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { // we need it for markUsesInDebugValueAsUndef(). unsigned FoldedReg = FoldAsLoadDefReg; MachineInstr *DefMI = nullptr; - MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI, - FoldAsLoadDefReg, - DefMI); - if (FoldMI) { + if (MachineInstr *FoldMI = + TII->optimizeLoadInstr(*MI, MRI, FoldAsLoadDefReg, DefMI)) { // Update LocalMIs since we replaced MI with FoldMI and deleted // DefMI. DEBUG(dbgs() << "Replacing: " << *MI); diff --git a/llvm/lib/CodeGen/PostRASchedulerList.cpp b/llvm/lib/CodeGen/PostRASchedulerList.cpp index 496c2cc9e05..c4d20e46ed3 100644 --- a/llvm/lib/CodeGen/PostRASchedulerList.cpp +++ b/llvm/lib/CodeGen/PostRASchedulerList.cpp @@ -340,7 +340,7 @@ bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { // Calls are not scheduling boundaries before register allocation, but // post-ra we don't gain anything by scheduling across calls since we // don't need to worry about register pressure. - if (MI->isCall() || TII->isSchedulingBoundary(MI, &MBB, Fn)) { + if (MI->isCall() || TII->isSchedulingBoundary(*MI, &MBB, Fn)) { Scheduler.enterRegion(&MBB, I, Current, CurrentCount - Count); Scheduler.setEndIndex(CurrentCount); Scheduler.schedule(); diff --git a/llvm/lib/CodeGen/PrologEpilogInserter.cpp b/llvm/lib/CodeGen/PrologEpilogInserter.cpp index ced77881f13..fb335ee79f6 100644 --- a/llvm/lib/CodeGen/PrologEpilogInserter.cpp +++ b/llvm/lib/CodeGen/PrologEpilogInserter.cpp @@ -1056,7 +1056,7 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, if (I->getOpcode() == FrameSetupOpcode || I->getOpcode() == FrameDestroyOpcode) { InsideCallSequence = (I->getOpcode() == FrameSetupOpcode); - SPAdj += TII.getSPAdjust(I); + SPAdj += TII.getSPAdjust(*I); I = TFI->eliminateCallFramePseudoInstr(Fn, *BB, I); continue; @@ -1135,7 +1135,7 @@ void PEI::replaceFrameIndices(MachineBasicBlock *BB, MachineFunction &Fn, // if I itself referred to a frame index, we shouldn't count its own // adjustment. if (MI && InsideCallSequence) - SPAdj += TII.getSPAdjust(MI); + SPAdj += TII.getSPAdjust(*MI); if (DoIncr && I != BB->end()) ++I; diff --git a/llvm/lib/CodeGen/RegisterCoalescer.cpp b/llvm/lib/CodeGen/RegisterCoalescer.cpp index a30dde8bd9b..fc304e2ccf2 100644 --- a/llvm/lib/CodeGen/RegisterCoalescer.cpp +++ b/llvm/lib/CodeGen/RegisterCoalescer.cpp @@ -684,7 +684,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, // operands then all possible variants (i.e. op#1<->op#2, op#1<->op#3, // op#2<->op#3) of commute transformation should be considered/tried here. unsigned NewDstIdx = TargetInstrInfo::CommuteAnyOperandIndex; - if (!TII->findCommutedOpIndices(DefMI, UseOpIdx, NewDstIdx)) + if (!TII->findCommutedOpIndices(*DefMI, UseOpIdx, NewDstIdx)) return false; MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); @@ -718,7 +718,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, // transformation. Start by commuting the instruction. MachineBasicBlock *MBB = DefMI->getParent(); MachineInstr *NewMI = - TII->commuteInstruction(DefMI, false, UseOpIdx, NewDstIdx); + TII->commuteInstruction(*DefMI, false, UseOpIdx, NewDstIdx); if (!NewMI) return false; if (TargetRegisterInfo::isVirtualRegister(IntA.reg) && @@ -901,9 +901,9 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, IsDefCopy = true; return false; } - if (!TII->isAsCheapAsAMove(DefMI)) + if (!TII->isAsCheapAsAMove(*DefMI)) return false; - if (!TII->isTriviallyReMaterializable(DefMI, AA)) + if (!TII->isTriviallyReMaterializable(*DefMI, AA)) return false; if (!definesFullReg(*DefMI, SrcReg)) return false; @@ -953,7 +953,7 @@ bool RegisterCoalescer::reMaterializeTrivialDef(const CoalescerPair &CP, MachineBasicBlock *MBB = CopyMI->getParent(); MachineBasicBlock::iterator MII = std::next(MachineBasicBlock::iterator(CopyMI)); - TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, DefMI, *TRI); + TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, *DefMI, *TRI); MachineInstr *NewMI = std::prev(MII); NewMI->setDebugLoc(DL); diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp index 0fe63a949de..33b2f8c30c9 100644 --- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -569,7 +569,7 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI, "Dependency checked between two loads"); // Let the target decide if memory accesses cannot possibly overlap. - if (TII->areMemAccessesTriviallyDisjoint(MIa, MIb, AA)) + if (TII->areMemAccessesTriviallyDisjoint(*MIa, *MIb, AA)) return false; // To this point analysis is generic. From here on we do need AA. diff --git a/llvm/lib/CodeGen/StackSlotColoring.cpp b/llvm/lib/CodeGen/StackSlotColoring.cpp index 0ba90f343d6..a6087aa85d0 100644 --- a/llvm/lib/CodeGen/StackSlotColoring.cpp +++ b/llvm/lib/CodeGen/StackSlotColoring.cpp @@ -386,8 +386,7 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) { break; int FirstSS, SecondSS; - if (TII->isStackSlotCopy(I, FirstSS, SecondSS) && - FirstSS == SecondSS && + if (TII->isStackSlotCopy(*I, FirstSS, SecondSS) && FirstSS == SecondSS && FirstSS != -1) { ++NumDead; changed = true; @@ -400,8 +399,10 @@ bool StackSlotColoring::RemoveDeadStores(MachineBasicBlock* MBB) { unsigned LoadReg = 0; unsigned StoreReg = 0; - if (!(LoadReg = TII->isLoadFromStackSlot(I, FirstSS))) continue; - if (!(StoreReg = TII->isStoreToStackSlot(NextMI, SecondSS))) continue; + if (!(LoadReg = TII->isLoadFromStackSlot(*I, FirstSS))) + continue; + if (!(StoreReg = TII->isStoreToStackSlot(*NextMI, SecondSS))) + continue; if (FirstSS != SecondSS || LoadReg != StoreReg || FirstSS == -1) continue; ++NumDead; diff --git a/llvm/lib/CodeGen/TailDuplicator.cpp b/llvm/lib/CodeGen/TailDuplicator.cpp index e7628c760b5..05421ac64a4 100644 --- a/llvm/lib/CodeGen/TailDuplicator.cpp +++ b/llvm/lib/CodeGen/TailDuplicator.cpp @@ -341,7 +341,7 @@ void TailDuplicator::duplicateInstruction( MachineFunction &MF, DenseMap<unsigned, RegSubRegPair> &LocalVRMap, const DenseSet<unsigned> &UsedByPhi) { - MachineInstr *NewMI = TII->duplicate(MI, MF); + MachineInstr *NewMI = TII->duplicate(*MI, MF); if (PreRegAlloc) { for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { MachineOperand &MO = NewMI->getOperand(i); diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 6d90f9dd819..4500e890beb 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -119,13 +119,12 @@ TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MBB->addSuccessor(NewDest); } -MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI, - bool NewMI, - unsigned Idx1, +MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, + bool NewMI, unsigned Idx1, unsigned Idx2) const { - const MCInstrDesc &MCID = MI->getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); bool HasDef = MCID.getNumDefs(); - if (HasDef && !MI->getOperand(0).isReg()) + if (HasDef && !MI.getOperand(0).isReg()) // No idea how to commute this instruction. Target should implement its own. return nullptr; @@ -134,60 +133,62 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI, assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); - assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() && + assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && "This only knows how to commute register operands so far"); - unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0; - unsigned Reg1 = MI->getOperand(Idx1).getReg(); - unsigned Reg2 = MI->getOperand(Idx2).getReg(); - unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0; - unsigned SubReg1 = MI->getOperand(Idx1).getSubReg(); - unsigned SubReg2 = MI->getOperand(Idx2).getSubReg(); - bool Reg1IsKill = MI->getOperand(Idx1).isKill(); - bool Reg2IsKill = MI->getOperand(Idx2).isKill(); - bool Reg1IsUndef = MI->getOperand(Idx1).isUndef(); - bool Reg2IsUndef = MI->getOperand(Idx2).isUndef(); - bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead(); - bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead(); + unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0; + unsigned Reg1 = MI.getOperand(Idx1).getReg(); + unsigned Reg2 = MI.getOperand(Idx2).getReg(); + unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; + unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); + unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); + bool Reg1IsKill = MI.getOperand(Idx1).isKill(); + bool Reg2IsKill = MI.getOperand(Idx2).isKill(); + bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); + bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); + bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); + bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); // If destination is tied to either of the commuted source register, then // it must be updated. if (HasDef && Reg0 == Reg1 && - MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { + MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { Reg2IsKill = false; Reg0 = Reg2; SubReg0 = SubReg2; } else if (HasDef && Reg0 == Reg2 && - MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { + MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { Reg1IsKill = false; Reg0 = Reg1; SubReg0 = SubReg1; } + MachineInstr *CommutedMI = nullptr; if (NewMI) { // Create a new instruction. - MachineFunction &MF = *MI->getParent()->getParent(); - MI = MF.CloneMachineInstr(MI); + MachineFunction &MF = *MI.getParent()->getParent(); + CommutedMI = MF.CloneMachineInstr(&MI); + } else { + CommutedMI = &MI; } if (HasDef) { - MI->getOperand(0).setReg(Reg0); - MI->getOperand(0).setSubReg(SubReg0); + CommutedMI->getOperand(0).setReg(Reg0); + CommutedMI->getOperand(0).setSubReg(SubReg0); } - MI->getOperand(Idx2).setReg(Reg1); - MI->getOperand(Idx1).setReg(Reg2); - MI->getOperand(Idx2).setSubReg(SubReg1); - MI->getOperand(Idx1).setSubReg(SubReg2); - MI->getOperand(Idx2).setIsKill(Reg1IsKill); - MI->getOperand(Idx1).setIsKill(Reg2IsKill); - MI->getOperand(Idx2).setIsUndef(Reg1IsUndef); - MI->getOperand(Idx1).setIsUndef(Reg2IsUndef); - MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); - MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); - return MI; + CommutedMI->getOperand(Idx2).setReg(Reg1); + CommutedMI->getOperand(Idx1).setReg(Reg2); + CommutedMI->getOperand(Idx2).setSubReg(SubReg1); + CommutedMI->getOperand(Idx1).setSubReg(SubReg2); + CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); + CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); + CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); + CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); + CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); + CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); + return CommutedMI; } -MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, - bool NewMI, +MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const { // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose @@ -195,7 +196,7 @@ MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, // called below. if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { - assert(MI->isCommutable() && + assert(MI.isCommutable() && "Precondition violation: MI must be commutable."); return nullptr; } @@ -233,13 +234,13 @@ bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, return true; } -bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI, +bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const { - assert(!MI->isBundle() && + assert(!MI.isBundle() && "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); - const MCInstrDesc &MCID = MI->getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); if (!MCID.isCommutable()) return false; @@ -251,8 +252,7 @@ bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI, CommutableOpIdx1, CommutableOpIdx2)) return false; - if (!MI->getOperand(SrcOpIdx1).isReg() || - !MI->getOperand(SrcOpIdx2).isReg()) + if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) // No idea. return false; return true; @@ -299,13 +299,12 @@ bool TargetInstrInfo::PredicateInstruction( return MadeChange; } -bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, +bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const { - for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), - oe = MI->memoperands_end(); - o != oe; - ++o) { + for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), + oe = MI.memoperands_end(); + o != oe; ++o) { if ((*o)->isLoad()) { if (const FixedStackPseudoSourceValue *Value = dyn_cast_or_null<FixedStackPseudoSourceValue>( @@ -319,13 +318,12 @@ bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, return false; } -bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI, +bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const { - for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), - oe = MI->memoperands_end(); - o != oe; - ++o) { + for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), + oe = MI.memoperands_end(); + o != oe; ++o) { if ((*o)->isStore()) { if (const FixedStackPseudoSourceValue *Value = dyn_cast_or_null<FixedStackPseudoSourceValue>( @@ -372,40 +370,37 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - unsigned DestReg, - unsigned SubIdx, - const MachineInstr *Orig, + unsigned DestReg, unsigned SubIdx, + const MachineInstr &Orig, const TargetRegisterInfo &TRI) const { - MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); + MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); MBB.insert(I, MI); } -bool -TargetInstrInfo::produceSameValue(const MachineInstr *MI0, - const MachineInstr *MI1, - const MachineRegisterInfo *MRI) const { - return MI0->isIdenticalTo(*MI1, MachineInstr::IgnoreVRegDefs); +bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, + const MachineInstr &MI1, + const MachineRegisterInfo *MRI) const { + return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); } -MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig, +MachineInstr *TargetInstrInfo::duplicate(MachineInstr &Orig, MachineFunction &MF) const { - assert(!Orig->isNotDuplicable() && - "Instruction cannot be duplicated"); - return MF.CloneMachineInstr(Orig); + assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); + return MF.CloneMachineInstr(&Orig); } // If the COPY instruction in MI can be folded to a stack operation, return // the register class to use. -static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, +static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, unsigned FoldIdx) { - assert(MI->isCopy() && "MI must be a COPY instruction"); - if (MI->getNumOperands() != 2) + assert(MI.isCopy() && "MI must be a COPY instruction"); + if (MI.getNumOperands() != 2) return nullptr; assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); - const MachineOperand &FoldOp = MI->getOperand(FoldIdx); - const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx); + const MachineOperand &FoldOp = MI.getOperand(FoldIdx); + const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); if (FoldOp.getSubReg() || LiveOp.getSubReg()) return nullptr; @@ -416,7 +411,7 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, assert(TargetRegisterInfo::isVirtualRegister(FoldReg) && "Cannot fold physregs"); - const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); + const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg())) @@ -433,17 +428,17 @@ void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { llvm_unreachable("Not a MachO target"); } -static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, +static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, int FrameIndex, const TargetInstrInfo &TII) { unsigned StartIdx = 0; - switch (MI->getOpcode()) { + switch (MI.getOpcode()) { case TargetOpcode::STACKMAP: StartIdx = 2; // Skip ID, nShadowBytes. break; case TargetOpcode::PATCHPOINT: { // For PatchPoint, the call args are not foldable. - PatchPointOpers opers(MI); + PatchPointOpers opers(&MI); StartIdx = opers.getVarIdx(); break; } @@ -459,15 +454,15 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, } MachineInstr *NewMI = - MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true); + MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); MachineInstrBuilder MIB(MF, NewMI); // No need to fold return, the meta data, and function arguments for (unsigned i = 0; i < StartIdx; ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.addOperand(MI.getOperand(i)); - for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) { - MachineOperand &MO = MI->getOperand(i); + for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) { + MachineOperand &MO = MI.getOperand(i); if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) { unsigned SpillSize; unsigned SpillOffset; @@ -495,25 +490,24 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, /// operand folded, otherwise NULL is returned. The client is responsible for /// removing the old instruction and adding the new one in the instruction /// stream. -MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, - ArrayRef<unsigned> Ops, - int FI, +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, + ArrayRef<unsigned> Ops, int FI, LiveIntervals *LIS) const { unsigned Flags = 0; for (unsigned i = 0, e = Ops.size(); i != e; ++i) - if (MI->getOperand(Ops[i]).isDef()) + if (MI.getOperand(Ops[i]).isDef()) Flags |= MachineMemOperand::MOStore; else Flags |= MachineMemOperand::MOLoad; - MachineBasicBlock *MBB = MI->getParent(); + MachineBasicBlock *MBB = MI.getParent(); assert(MBB && "foldMemoryOperand needs an inserted instruction"); MachineFunction &MF = *MBB->getParent(); MachineInstr *NewMI = nullptr; - if (MI->getOpcode() == TargetOpcode::STACKMAP || - MI->getOpcode() == TargetOpcode::PATCHPOINT) { + if (MI.getOpcode() == TargetOpcode::STACKMAP || + MI.getOpcode() == TargetOpcode::PATCHPOINT) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); if (NewMI) @@ -524,7 +518,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, } if (NewMI) { - NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); // Add a memory operand, foldMemoryOperandImpl doesn't do that. assert((!(Flags & MachineMemOperand::MOStore) || NewMI->mayStore()) && @@ -543,14 +537,14 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, } // Straight COPY may fold as load/store. - if (!MI->isCopy() || Ops.size() != 1) + if (!MI.isCopy() || Ops.size() != 1) return nullptr; const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); if (!RC) return nullptr; - const MachineOperand &MO = MI->getOperand(1-Ops[0]); + const MachineOperand &MO = MI.getOperand(1 - Ops[0]); MachineBasicBlock::iterator Pos = MI; const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); @@ -777,24 +771,24 @@ void TargetInstrInfo::genAlternativeCodeSequence( /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. -MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, - MachineInstr *LoadMI, + MachineInstr &LoadMI, LiveIntervals *LIS) const { - assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); + assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); #ifndef NDEBUG for (unsigned i = 0, e = Ops.size(); i != e; ++i) - assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!"); + assert(MI.getOperand(Ops[i]).isUse() && "Folding load into def!"); #endif - MachineBasicBlock &MBB = *MI->getParent(); + MachineBasicBlock &MBB = *MI.getParent(); MachineFunction &MF = *MBB.getParent(); // Ask the target to do the actual folding. MachineInstr *NewMI = nullptr; int FrameIndex = 0; - if ((MI->getOpcode() == TargetOpcode::STACKMAP || - MI->getOpcode() == TargetOpcode::PATCHPOINT) && + if ((MI.getOpcode() == TargetOpcode::STACKMAP || + MI.getOpcode() == TargetOpcode::PATCHPOINT) && isLoadFromStackSlot(LoadMI, FrameIndex)) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); @@ -808,39 +802,37 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, if (!NewMI) return nullptr; // Copy the memoperands from the load to the folded instruction. - if (MI->memoperands_empty()) { - NewMI->setMemRefs(LoadMI->memoperands_begin(), - LoadMI->memoperands_end()); + if (MI.memoperands_empty()) { + NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end()); } else { // Handle the rare case of folding multiple loads. - NewMI->setMemRefs(MI->memoperands_begin(), - MI->memoperands_end()); - for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(), - E = LoadMI->memoperands_end(); I != E; ++I) { + NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), + E = LoadMI.memoperands_end(); + I != E; ++I) { NewMI->addMemOperand(MF, *I); } } return NewMI; } -bool TargetInstrInfo:: -isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, - AliasAnalysis *AA) const { - const MachineFunction &MF = *MI->getParent()->getParent(); +bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( + const MachineInstr &MI, AliasAnalysis *AA) const { + const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); // Remat clients assume operand 0 is the defined register. - if (!MI->getNumOperands() || !MI->getOperand(0).isReg()) + if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) return false; - unsigned DefReg = MI->getOperand(0).getReg(); + unsigned DefReg = MI.getOperand(0).getReg(); // A sub-register definition can only be rematerialized if the instruction // doesn't read the other parts of the register. Otherwise it is really a // read-modify-write operation on the full virtual register which cannot be // moved safely. if (TargetRegisterInfo::isVirtualRegister(DefReg) && - MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg)) + MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg)) return false; // A load from a fixed stack slot can be rematerialized. This may be @@ -852,23 +844,22 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, return true; // Avoid instructions obviously unsafe for remat. - if (MI->isNotDuplicable() || MI->mayStore() || - MI->hasUnmodeledSideEffects()) + if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects()) return false; // Don't remat inline asm. We have no idea how expensive it is // even if it's side effect free. - if (MI->isInlineAsm()) + if (MI.isInlineAsm()) return false; // Avoid instructions which load from potentially varying memory. - if (MI->mayLoad() && !MI->isInvariantLoad(AA)) + if (MI.mayLoad() && !MI.isInvariantLoad(AA)) return false; // If any of the registers accessed are non-constant, conservatively assume // the instruction is not rematerializable. - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); if (Reg == 0) @@ -905,8 +896,8 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, return true; } -int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { - const MachineFunction *MF = MI->getParent()->getParent(); +int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { + const MachineFunction *MF = MI.getParent()->getParent(); const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); bool StackGrowsDown = TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; @@ -914,15 +905,15 @@ int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); - if (MI->getOpcode() != FrameSetupOpcode && - MI->getOpcode() != FrameDestroyOpcode) + if (MI.getOpcode() != FrameSetupOpcode && + MI.getOpcode() != FrameDestroyOpcode) return 0; - - int SPAdj = MI->getOperand(0).getImm(); + + int SPAdj = MI.getOperand(0).getImm(); SPAdj = TFI->alignSPAdjust(SPAdj); - if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) || - (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode)) + if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || + (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) SPAdj = -SPAdj; return SPAdj; @@ -931,11 +922,11 @@ int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { /// isSchedulingBoundary - Test if the given instruction should be /// considered a scheduling boundary. This primarily includes labels /// and terminators. -bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI, +bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const { // Terminators and labels can't be scheduled around. - if (MI->isTerminator() || MI->isPosition()) + if (MI.isTerminator() || MI.isPosition()) return true; // Don't attempt to schedule around any instruction that defines @@ -945,7 +936,7 @@ bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI, // modification. const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); - return MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); + return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); } // Provide a global flag for disabling the PreRA hazard recognizer that targets @@ -1014,13 +1005,12 @@ int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, // MachineInstr latency interface. //===----------------------------------------------------------------------===// -unsigned -TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, - const MachineInstr *MI) const { +unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, + const MachineInstr &MI) const { if (!ItinData || ItinData->isEmpty()) return 1; - unsigned Class = MI->getDesc().getSchedClass(); + unsigned Class = MI.getDesc().getSchedClass(); int UOps = ItinData->Itineraries[Class].NumMicroOps; if (UOps >= 0) return UOps; @@ -1032,12 +1022,12 @@ TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, /// Return the default expected latency for a def based on it's opcode. unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, - const MachineInstr *DefMI) const { - if (DefMI->isTransient()) + const MachineInstr &DefMI) const { + if (DefMI.isTransient()) return 0; - if (DefMI->mayLoad()) + if (DefMI.mayLoad()) return SchedModel.LoadLatency; - if (isHighLatencyDef(DefMI->getOpcode())) + if (isHighLatencyDef(DefMI.getOpcode())) return SchedModel.HighLatency; return 1; } @@ -1046,46 +1036,45 @@ unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { return 0; } -unsigned TargetInstrInfo:: -getInstrLatency(const InstrItineraryData *ItinData, - const MachineInstr *MI, - unsigned *PredCost) const { +unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, + const MachineInstr &MI, + unsigned *PredCost) const { // Default to one cycle for no itinerary. However, an "empty" itinerary may // still have a MinLatency property, which getStageLatency checks. if (!ItinData) - return MI->mayLoad() ? 2 : 1; + return MI.mayLoad() ? 2 : 1; - return ItinData->getStageLatency(MI->getDesc().getSchedClass()); + return ItinData->getStageLatency(MI.getDesc().getSchedClass()); } bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, - const MachineInstr *DefMI, + const MachineInstr &DefMI, unsigned DefIdx) const { const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); if (!ItinData || ItinData->isEmpty()) return false; - unsigned DefClass = DefMI->getDesc().getSchedClass(); + unsigned DefClass = DefMI.getDesc().getSchedClass(); int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); return (DefCycle != -1 && DefCycle <= 1); } /// Both DefMI and UseMI must be valid. By default, call directly to the /// itinerary. This may be overriden by the target. -int TargetInstrInfo:: -getOperandLatency(const InstrItineraryData *ItinData, - const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *UseMI, unsigned UseIdx) const { - unsigned DefClass = DefMI->getDesc().getSchedClass(); - unsigned UseClass = UseMI->getDesc().getSchedClass(); +int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, + const MachineInstr &DefMI, + unsigned DefIdx, + const MachineInstr &UseMI, + unsigned UseIdx) const { + unsigned DefClass = DefMI.getDesc().getSchedClass(); + unsigned UseClass = UseMI.getDesc().getSchedClass(); return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); } /// If we can determine the operand latency from the def only, without itinerary /// lookup, do so. Otherwise return -1. int TargetInstrInfo::computeDefOperandLatency( - const InstrItineraryData *ItinData, - const MachineInstr *DefMI) const { + const InstrItineraryData *ItinData, const MachineInstr &DefMI) const { // Let the target hook getInstrLatency handle missing itineraries. if (!ItinData) @@ -1098,21 +1087,9 @@ int TargetInstrInfo::computeDefOperandLatency( return -1; } -/// computeOperandLatency - Compute and return the latency of the given data -/// dependent def and use when the operand indices are already known. UseMI may -/// be NULL for an unknown use. -/// -/// FindMin may be set to get the minimum vs. expected latency. Minimum -/// latency is used for scheduling groups, while expected latency is for -/// instruction cost and critical path. -/// -/// Depending on the subtarget's itinerary properties, this may or may not need -/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or -/// UseIdx to compute min latency. -unsigned TargetInstrInfo:: -computeOperandLatency(const InstrItineraryData *ItinData, - const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *UseMI, unsigned UseIdx) const { +unsigned TargetInstrInfo::computeOperandLatency( + const InstrItineraryData *ItinData, const MachineInstr &DefMI, + unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const { int DefLatency = computeDefOperandLatency(ItinData, DefMI); if (DefLatency >= 0) @@ -1122,9 +1099,9 @@ computeOperandLatency(const InstrItineraryData *ItinData, int OperLatency = 0; if (UseMI) - OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); + OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, *UseMI, UseIdx); else { - unsigned DefClass = DefMI->getDesc().getSchedClass(); + unsigned DefClass = DefMI.getDesc().getSchedClass(); OperLatency = ItinData->getOperandCycle(DefClass, DefIdx); } if (OperLatency >= 0) diff --git a/llvm/lib/CodeGen/TargetSchedule.cpp b/llvm/lib/CodeGen/TargetSchedule.cpp index 19300070dfb..022e912aa84 100644 --- a/llvm/lib/CodeGen/TargetSchedule.cpp +++ b/llvm/lib/CodeGen/TargetSchedule.cpp @@ -77,7 +77,7 @@ unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI, const MCSchedClassDesc *SC) const { if (hasInstrItineraries()) { int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass()); - return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, MI); + return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *MI); } if (hasInstrSchedModel()) { if (!SC) @@ -156,13 +156,13 @@ unsigned TargetSchedModel::computeOperandLatency( const MachineInstr *UseMI, unsigned UseOperIdx) const { if (!hasInstrSchedModel() && !hasInstrItineraries()) - return TII->defaultDefLatency(SchedModel, DefMI); + return TII->defaultDefLatency(SchedModel, *DefMI); if (hasInstrItineraries()) { int OperLatency = 0; if (UseMI) { - OperLatency = TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx, - UseMI, UseOperIdx); + OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx, + *UseMI, UseOperIdx); } else { unsigned DefClass = DefMI->getDesc().getSchedClass(); @@ -172,15 +172,15 @@ unsigned TargetSchedModel::computeOperandLatency( return OperLatency; // No operand latency was found. - unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI); + unsigned InstrLatency = TII->getInstrLatency(&InstrItins, *DefMI); // Expected latency is the max of the stage latency and itinerary props. // Rather than directly querying InstrItins stage latency, we call a TII // hook to allow subtargets to specialize latency. This hook is only // applicable to the InstrItins model. InstrSchedModel should model all // special cases without TII hooks. - InstrLatency = std::max(InstrLatency, - TII->defaultDefLatency(SchedModel, DefMI)); + InstrLatency = + std::max(InstrLatency, TII->defaultDefLatency(SchedModel, *DefMI)); return InstrLatency; } // hasInstrSchedModel() @@ -219,7 +219,7 @@ unsigned TargetSchedModel::computeOperandLatency( // FIXME: Automatically giving all implicit defs defaultDefLatency is // undesirable. We should only do it for defs that are known to the MC // desc like flags. Truly implicit defs should get 1 cycle latency. - return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, DefMI); + return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, *DefMI); } unsigned @@ -254,14 +254,14 @@ TargetSchedModel::computeInstrLatency(const MachineInstr *MI, // Allow subtargets to compute Bundle latencies outside the machine model. if (hasInstrItineraries() || MI->isBundle() || (!hasInstrSchedModel() && !UseDefaultDefLatency)) - return TII->getInstrLatency(&InstrItins, MI); + return TII->getInstrLatency(&InstrItins, *MI); if (hasInstrSchedModel()) { const MCSchedClassDesc *SCDesc = resolveSchedClass(MI); if (SCDesc->isValid()) return computeInstrLatency(*SCDesc); } - return TII->defaultDefLatency(SchedModel, MI); + return TII->defaultDefLatency(SchedModel, *MI); } unsigned TargetSchedModel:: diff --git a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp index 1a20824f23e..649a6b906fc 100644 --- a/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp +++ b/llvm/lib/CodeGen/TwoAddressInstructionPass.cpp @@ -647,7 +647,7 @@ bool TwoAddressInstructionPass::commuteInstruction(MachineInstr *MI, unsigned Dist) { unsigned RegC = MI->getOperand(RegCIdx).getReg(); DEBUG(dbgs() << "2addr: COMMUTING : " << *MI); - MachineInstr *NewMI = TII->commuteInstruction(MI, false, RegBIdx, RegCIdx); + MachineInstr *NewMI = TII->commuteInstruction(*MI, false, RegBIdx, RegCIdx); if (NewMI == nullptr) { DEBUG(dbgs() << "2addr: COMMUTING FAILED!\n"); @@ -695,7 +695,7 @@ TwoAddressInstructionPass::convertInstTo3Addr(MachineBasicBlock::iterator &mi, unsigned Dist) { // FIXME: Why does convertToThreeAddress() need an iterator reference? MachineFunction::iterator MFI = MBB->getIterator(); - MachineInstr *NewMI = TII->convertToThreeAddress(MFI, mi, LV); + MachineInstr *NewMI = TII->convertToThreeAddress(MFI, *mi, LV); assert(MBB->getIterator() == MFI && "convertToThreeAddress changed iterator reference"); if (!NewMI) @@ -861,7 +861,7 @@ rescheduleMIBelowKill(MachineBasicBlock::iterator &mi, if (!MI->isSafeToMove(AA, SeenStore)) return false; - if (TII->getInstrLatency(InstrItins, MI) > 1) + if (TII->getInstrLatency(InstrItins, *MI) > 1) // FIXME: Needs more sophisticated heuristics. return false; @@ -993,7 +993,7 @@ bool TwoAddressInstructionPass::isDefTooClose(unsigned Reg, unsigned Dist, return true; // Below MI unsigned DefDist = DDI->second; assert(Dist > DefDist && "Visited def already?"); - if (TII->getInstrLatency(InstrItins, &DefMI) > (Dist - DefDist)) + if (TII->getInstrLatency(InstrItins, DefMI) > (Dist - DefDist)) return true; } return false; @@ -1174,7 +1174,7 @@ bool TwoAddressInstructionPass::tryInstructionCommute(MachineInstr *MI, // other commutable operands and does not change the values of passed // variables. if (OtherOpIdx == BaseOpIdx || - !TII->findCommutedOpIndices(MI, BaseOpIdx, OtherOpIdx)) + !TII->findCommutedOpIndices(*MI, BaseOpIdx, OtherOpIdx)) continue; unsigned OtherOpReg = MI->getOperand(OtherOpIdx).getReg(); @@ -1307,9 +1307,9 @@ tryInstructionTransform(MachineBasicBlock::iterator &mi, TII->getRegClass(UnfoldMCID, LoadRegIndex, TRI, *MF)); unsigned Reg = MRI->createVirtualRegister(RC); SmallVector<MachineInstr *, 2> NewMIs; - if (!TII->unfoldMemoryOperand(*MF, &MI, Reg, - /*UnfoldLoad=*/true,/*UnfoldStore=*/false, - NewMIs)) { + if (!TII->unfoldMemoryOperand(*MF, MI, Reg, + /*UnfoldLoad=*/true, + /*UnfoldStore=*/false, NewMIs)) { DEBUG(dbgs() << "2addr: ABANDONING UNFOLD\n"); return false; } |