diff options
Diffstat (limited to 'llvm/lib/CodeGen/TargetInstrInfo.cpp')
-rw-r--r-- | llvm/lib/CodeGen/TargetInstrInfo.cpp | 313 |
1 files changed, 145 insertions, 168 deletions
diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 6d90f9dd819..4500e890beb 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -119,13 +119,12 @@ TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, MBB->addSuccessor(NewDest); } -MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI, - bool NewMI, - unsigned Idx1, +MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI, + bool NewMI, unsigned Idx1, unsigned Idx2) const { - const MCInstrDesc &MCID = MI->getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); bool HasDef = MCID.getNumDefs(); - if (HasDef && !MI->getOperand(0).isReg()) + if (HasDef && !MI.getOperand(0).isReg()) // No idea how to commute this instruction. Target should implement its own. return nullptr; @@ -134,60 +133,62 @@ MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr *MI, assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) && CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 && "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands."); - assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() && + assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() && "This only knows how to commute register operands so far"); - unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0; - unsigned Reg1 = MI->getOperand(Idx1).getReg(); - unsigned Reg2 = MI->getOperand(Idx2).getReg(); - unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0; - unsigned SubReg1 = MI->getOperand(Idx1).getSubReg(); - unsigned SubReg2 = MI->getOperand(Idx2).getSubReg(); - bool Reg1IsKill = MI->getOperand(Idx1).isKill(); - bool Reg2IsKill = MI->getOperand(Idx2).isKill(); - bool Reg1IsUndef = MI->getOperand(Idx1).isUndef(); - bool Reg2IsUndef = MI->getOperand(Idx2).isUndef(); - bool Reg1IsInternal = MI->getOperand(Idx1).isInternalRead(); - bool Reg2IsInternal = MI->getOperand(Idx2).isInternalRead(); + unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0; + unsigned Reg1 = MI.getOperand(Idx1).getReg(); + unsigned Reg2 = MI.getOperand(Idx2).getReg(); + unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0; + unsigned SubReg1 = MI.getOperand(Idx1).getSubReg(); + unsigned SubReg2 = MI.getOperand(Idx2).getSubReg(); + bool Reg1IsKill = MI.getOperand(Idx1).isKill(); + bool Reg2IsKill = MI.getOperand(Idx2).isKill(); + bool Reg1IsUndef = MI.getOperand(Idx1).isUndef(); + bool Reg2IsUndef = MI.getOperand(Idx2).isUndef(); + bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead(); + bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead(); // If destination is tied to either of the commuted source register, then // it must be updated. if (HasDef && Reg0 == Reg1 && - MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { + MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { Reg2IsKill = false; Reg0 = Reg2; SubReg0 = SubReg2; } else if (HasDef && Reg0 == Reg2 && - MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { + MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { Reg1IsKill = false; Reg0 = Reg1; SubReg0 = SubReg1; } + MachineInstr *CommutedMI = nullptr; if (NewMI) { // Create a new instruction. - MachineFunction &MF = *MI->getParent()->getParent(); - MI = MF.CloneMachineInstr(MI); + MachineFunction &MF = *MI.getParent()->getParent(); + CommutedMI = MF.CloneMachineInstr(&MI); + } else { + CommutedMI = &MI; } if (HasDef) { - MI->getOperand(0).setReg(Reg0); - MI->getOperand(0).setSubReg(SubReg0); + CommutedMI->getOperand(0).setReg(Reg0); + CommutedMI->getOperand(0).setSubReg(SubReg0); } - MI->getOperand(Idx2).setReg(Reg1); - MI->getOperand(Idx1).setReg(Reg2); - MI->getOperand(Idx2).setSubReg(SubReg1); - MI->getOperand(Idx1).setSubReg(SubReg2); - MI->getOperand(Idx2).setIsKill(Reg1IsKill); - MI->getOperand(Idx1).setIsKill(Reg2IsKill); - MI->getOperand(Idx2).setIsUndef(Reg1IsUndef); - MI->getOperand(Idx1).setIsUndef(Reg2IsUndef); - MI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); - MI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); - return MI; + CommutedMI->getOperand(Idx2).setReg(Reg1); + CommutedMI->getOperand(Idx1).setReg(Reg2); + CommutedMI->getOperand(Idx2).setSubReg(SubReg1); + CommutedMI->getOperand(Idx1).setSubReg(SubReg2); + CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill); + CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill); + CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef); + CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef); + CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal); + CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal); + return CommutedMI; } -MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, - bool NewMI, +MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const { // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose @@ -195,7 +196,7 @@ MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, // called below. if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) && !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) { - assert(MI->isCommutable() && + assert(MI.isCommutable() && "Precondition violation: MI must be commutable."); return nullptr; } @@ -233,13 +234,13 @@ bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1, return true; } -bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI, +bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const { - assert(!MI->isBundle() && + assert(!MI.isBundle() && "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); - const MCInstrDesc &MCID = MI->getDesc(); + const MCInstrDesc &MCID = MI.getDesc(); if (!MCID.isCommutable()) return false; @@ -251,8 +252,7 @@ bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI, CommutableOpIdx1, CommutableOpIdx2)) return false; - if (!MI->getOperand(SrcOpIdx1).isReg() || - !MI->getOperand(SrcOpIdx2).isReg()) + if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg()) // No idea. return false; return true; @@ -299,13 +299,12 @@ bool TargetInstrInfo::PredicateInstruction( return MadeChange; } -bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, +bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const { - for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), - oe = MI->memoperands_end(); - o != oe; - ++o) { + for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), + oe = MI.memoperands_end(); + o != oe; ++o) { if ((*o)->isLoad()) { if (const FixedStackPseudoSourceValue *Value = dyn_cast_or_null<FixedStackPseudoSourceValue>( @@ -319,13 +318,12 @@ bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, return false; } -bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI, +bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr &MI, const MachineMemOperand *&MMO, int &FrameIndex) const { - for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), - oe = MI->memoperands_end(); - o != oe; - ++o) { + for (MachineInstr::mmo_iterator o = MI.memoperands_begin(), + oe = MI.memoperands_end(); + o != oe; ++o) { if ((*o)->isStore()) { if (const FixedStackPseudoSourceValue *Value = dyn_cast_or_null<FixedStackPseudoSourceValue>( @@ -372,40 +370,37 @@ bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, - unsigned DestReg, - unsigned SubIdx, - const MachineInstr *Orig, + unsigned DestReg, unsigned SubIdx, + const MachineInstr &Orig, const TargetRegisterInfo &TRI) const { - MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); + MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig); MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); MBB.insert(I, MI); } -bool -TargetInstrInfo::produceSameValue(const MachineInstr *MI0, - const MachineInstr *MI1, - const MachineRegisterInfo *MRI) const { - return MI0->isIdenticalTo(*MI1, MachineInstr::IgnoreVRegDefs); +bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0, + const MachineInstr &MI1, + const MachineRegisterInfo *MRI) const { + return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); } -MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig, +MachineInstr *TargetInstrInfo::duplicate(MachineInstr &Orig, MachineFunction &MF) const { - assert(!Orig->isNotDuplicable() && - "Instruction cannot be duplicated"); - return MF.CloneMachineInstr(Orig); + assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated"); + return MF.CloneMachineInstr(&Orig); } // If the COPY instruction in MI can be folded to a stack operation, return // the register class to use. -static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, +static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI, unsigned FoldIdx) { - assert(MI->isCopy() && "MI must be a COPY instruction"); - if (MI->getNumOperands() != 2) + assert(MI.isCopy() && "MI must be a COPY instruction"); + if (MI.getNumOperands() != 2) return nullptr; assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); - const MachineOperand &FoldOp = MI->getOperand(FoldIdx); - const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx); + const MachineOperand &FoldOp = MI.getOperand(FoldIdx); + const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx); if (FoldOp.getSubReg() || LiveOp.getSubReg()) return nullptr; @@ -416,7 +411,7 @@ static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, assert(TargetRegisterInfo::isVirtualRegister(FoldReg) && "Cannot fold physregs"); - const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); + const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg())) @@ -433,17 +428,17 @@ void TargetInstrInfo::getNoopForMachoTarget(MCInst &NopInst) const { llvm_unreachable("Not a MachO target"); } -static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, +static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, int FrameIndex, const TargetInstrInfo &TII) { unsigned StartIdx = 0; - switch (MI->getOpcode()) { + switch (MI.getOpcode()) { case TargetOpcode::STACKMAP: StartIdx = 2; // Skip ID, nShadowBytes. break; case TargetOpcode::PATCHPOINT: { // For PatchPoint, the call args are not foldable. - PatchPointOpers opers(MI); + PatchPointOpers opers(&MI); StartIdx = opers.getVarIdx(); break; } @@ -459,15 +454,15 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, } MachineInstr *NewMI = - MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true); + MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true); MachineInstrBuilder MIB(MF, NewMI); // No need to fold return, the meta data, and function arguments for (unsigned i = 0; i < StartIdx; ++i) - MIB.addOperand(MI->getOperand(i)); + MIB.addOperand(MI.getOperand(i)); - for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) { - MachineOperand &MO = MI->getOperand(i); + for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) { + MachineOperand &MO = MI.getOperand(i); if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) { unsigned SpillSize; unsigned SpillOffset; @@ -495,25 +490,24 @@ static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr *MI, /// operand folded, otherwise NULL is returned. The client is responsible for /// removing the old instruction and adding the new one in the instruction /// stream. -MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, - ArrayRef<unsigned> Ops, - int FI, +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, + ArrayRef<unsigned> Ops, int FI, LiveIntervals *LIS) const { unsigned Flags = 0; for (unsigned i = 0, e = Ops.size(); i != e; ++i) - if (MI->getOperand(Ops[i]).isDef()) + if (MI.getOperand(Ops[i]).isDef()) Flags |= MachineMemOperand::MOStore; else Flags |= MachineMemOperand::MOLoad; - MachineBasicBlock *MBB = MI->getParent(); + MachineBasicBlock *MBB = MI.getParent(); assert(MBB && "foldMemoryOperand needs an inserted instruction"); MachineFunction &MF = *MBB->getParent(); MachineInstr *NewMI = nullptr; - if (MI->getOpcode() == TargetOpcode::STACKMAP || - MI->getOpcode() == TargetOpcode::PATCHPOINT) { + if (MI.getOpcode() == TargetOpcode::STACKMAP || + MI.getOpcode() == TargetOpcode::PATCHPOINT) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); if (NewMI) @@ -524,7 +518,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, } if (NewMI) { - NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); + NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); // Add a memory operand, foldMemoryOperandImpl doesn't do that. assert((!(Flags & MachineMemOperand::MOStore) || NewMI->mayStore()) && @@ -543,14 +537,14 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, } // Straight COPY may fold as load/store. - if (!MI->isCopy() || Ops.size() != 1) + if (!MI.isCopy() || Ops.size() != 1) return nullptr; const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); if (!RC) return nullptr; - const MachineOperand &MO = MI->getOperand(1-Ops[0]); + const MachineOperand &MO = MI.getOperand(1 - Ops[0]); MachineBasicBlock::iterator Pos = MI; const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); @@ -777,24 +771,24 @@ void TargetInstrInfo::genAlternativeCodeSequence( /// foldMemoryOperand - Same as the previous version except it allows folding /// of any load and store from / to any address, not just from a specific /// stack slot. -MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, +MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI, ArrayRef<unsigned> Ops, - MachineInstr *LoadMI, + MachineInstr &LoadMI, LiveIntervals *LIS) const { - assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); + assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!"); #ifndef NDEBUG for (unsigned i = 0, e = Ops.size(); i != e; ++i) - assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!"); + assert(MI.getOperand(Ops[i]).isUse() && "Folding load into def!"); #endif - MachineBasicBlock &MBB = *MI->getParent(); + MachineBasicBlock &MBB = *MI.getParent(); MachineFunction &MF = *MBB.getParent(); // Ask the target to do the actual folding. MachineInstr *NewMI = nullptr; int FrameIndex = 0; - if ((MI->getOpcode() == TargetOpcode::STACKMAP || - MI->getOpcode() == TargetOpcode::PATCHPOINT) && + if ((MI.getOpcode() == TargetOpcode::STACKMAP || + MI.getOpcode() == TargetOpcode::PATCHPOINT) && isLoadFromStackSlot(LoadMI, FrameIndex)) { // Fold stackmap/patchpoint. NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); @@ -808,39 +802,37 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, if (!NewMI) return nullptr; // Copy the memoperands from the load to the folded instruction. - if (MI->memoperands_empty()) { - NewMI->setMemRefs(LoadMI->memoperands_begin(), - LoadMI->memoperands_end()); + if (MI.memoperands_empty()) { + NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end()); } else { // Handle the rare case of folding multiple loads. - NewMI->setMemRefs(MI->memoperands_begin(), - MI->memoperands_end()); - for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(), - E = LoadMI->memoperands_end(); I != E; ++I) { + NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); + for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(), + E = LoadMI.memoperands_end(); + I != E; ++I) { NewMI->addMemOperand(MF, *I); } } return NewMI; } -bool TargetInstrInfo:: -isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, - AliasAnalysis *AA) const { - const MachineFunction &MF = *MI->getParent()->getParent(); +bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric( + const MachineInstr &MI, AliasAnalysis *AA) const { + const MachineFunction &MF = *MI.getParent()->getParent(); const MachineRegisterInfo &MRI = MF.getRegInfo(); // Remat clients assume operand 0 is the defined register. - if (!MI->getNumOperands() || !MI->getOperand(0).isReg()) + if (!MI.getNumOperands() || !MI.getOperand(0).isReg()) return false; - unsigned DefReg = MI->getOperand(0).getReg(); + unsigned DefReg = MI.getOperand(0).getReg(); // A sub-register definition can only be rematerialized if the instruction // doesn't read the other parts of the register. Otherwise it is really a // read-modify-write operation on the full virtual register which cannot be // moved safely. if (TargetRegisterInfo::isVirtualRegister(DefReg) && - MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg)) + MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg)) return false; // A load from a fixed stack slot can be rematerialized. This may be @@ -852,23 +844,22 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, return true; // Avoid instructions obviously unsafe for remat. - if (MI->isNotDuplicable() || MI->mayStore() || - MI->hasUnmodeledSideEffects()) + if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects()) return false; // Don't remat inline asm. We have no idea how expensive it is // even if it's side effect free. - if (MI->isInlineAsm()) + if (MI.isInlineAsm()) return false; // Avoid instructions which load from potentially varying memory. - if (MI->mayLoad() && !MI->isInvariantLoad(AA)) + if (MI.mayLoad() && !MI.isInvariantLoad(AA)) return false; // If any of the registers accessed are non-constant, conservatively assume // the instruction is not rematerializable. - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - const MachineOperand &MO = MI->getOperand(i); + for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); if (Reg == 0) @@ -905,8 +896,8 @@ isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, return true; } -int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { - const MachineFunction *MF = MI->getParent()->getParent(); +int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const { + const MachineFunction *MF = MI.getParent()->getParent(); const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); bool StackGrowsDown = TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown; @@ -914,15 +905,15 @@ int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); - if (MI->getOpcode() != FrameSetupOpcode && - MI->getOpcode() != FrameDestroyOpcode) + if (MI.getOpcode() != FrameSetupOpcode && + MI.getOpcode() != FrameDestroyOpcode) return 0; - - int SPAdj = MI->getOperand(0).getImm(); + + int SPAdj = MI.getOperand(0).getImm(); SPAdj = TFI->alignSPAdjust(SPAdj); - if ((!StackGrowsDown && MI->getOpcode() == FrameSetupOpcode) || - (StackGrowsDown && MI->getOpcode() == FrameDestroyOpcode)) + if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) || + (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode)) SPAdj = -SPAdj; return SPAdj; @@ -931,11 +922,11 @@ int TargetInstrInfo::getSPAdjust(const MachineInstr *MI) const { /// isSchedulingBoundary - Test if the given instruction should be /// considered a scheduling boundary. This primarily includes labels /// and terminators. -bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI, +bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const { // Terminators and labels can't be scheduled around. - if (MI->isTerminator() || MI->isPosition()) + if (MI.isTerminator() || MI.isPosition()) return true; // Don't attempt to schedule around any instruction that defines @@ -945,7 +936,7 @@ bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI, // modification. const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); - return MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); + return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI); } // Provide a global flag for disabling the PreRA hazard recognizer that targets @@ -1014,13 +1005,12 @@ int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, // MachineInstr latency interface. //===----------------------------------------------------------------------===// -unsigned -TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, - const MachineInstr *MI) const { +unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, + const MachineInstr &MI) const { if (!ItinData || ItinData->isEmpty()) return 1; - unsigned Class = MI->getDesc().getSchedClass(); + unsigned Class = MI.getDesc().getSchedClass(); int UOps = ItinData->Itineraries[Class].NumMicroOps; if (UOps >= 0) return UOps; @@ -1032,12 +1022,12 @@ TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, /// Return the default expected latency for a def based on it's opcode. unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel, - const MachineInstr *DefMI) const { - if (DefMI->isTransient()) + const MachineInstr &DefMI) const { + if (DefMI.isTransient()) return 0; - if (DefMI->mayLoad()) + if (DefMI.mayLoad()) return SchedModel.LoadLatency; - if (isHighLatencyDef(DefMI->getOpcode())) + if (isHighLatencyDef(DefMI.getOpcode())) return SchedModel.HighLatency; return 1; } @@ -1046,46 +1036,45 @@ unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const { return 0; } -unsigned TargetInstrInfo:: -getInstrLatency(const InstrItineraryData *ItinData, - const MachineInstr *MI, - unsigned *PredCost) const { +unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, + const MachineInstr &MI, + unsigned *PredCost) const { // Default to one cycle for no itinerary. However, an "empty" itinerary may // still have a MinLatency property, which getStageLatency checks. if (!ItinData) - return MI->mayLoad() ? 2 : 1; + return MI.mayLoad() ? 2 : 1; - return ItinData->getStageLatency(MI->getDesc().getSchedClass()); + return ItinData->getStageLatency(MI.getDesc().getSchedClass()); } bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel, - const MachineInstr *DefMI, + const MachineInstr &DefMI, unsigned DefIdx) const { const InstrItineraryData *ItinData = SchedModel.getInstrItineraries(); if (!ItinData || ItinData->isEmpty()) return false; - unsigned DefClass = DefMI->getDesc().getSchedClass(); + unsigned DefClass = DefMI.getDesc().getSchedClass(); int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); return (DefCycle != -1 && DefCycle <= 1); } /// Both DefMI and UseMI must be valid. By default, call directly to the /// itinerary. This may be overriden by the target. -int TargetInstrInfo:: -getOperandLatency(const InstrItineraryData *ItinData, - const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *UseMI, unsigned UseIdx) const { - unsigned DefClass = DefMI->getDesc().getSchedClass(); - unsigned UseClass = UseMI->getDesc().getSchedClass(); +int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, + const MachineInstr &DefMI, + unsigned DefIdx, + const MachineInstr &UseMI, + unsigned UseIdx) const { + unsigned DefClass = DefMI.getDesc().getSchedClass(); + unsigned UseClass = UseMI.getDesc().getSchedClass(); return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); } /// If we can determine the operand latency from the def only, without itinerary /// lookup, do so. Otherwise return -1. int TargetInstrInfo::computeDefOperandLatency( - const InstrItineraryData *ItinData, - const MachineInstr *DefMI) const { + const InstrItineraryData *ItinData, const MachineInstr &DefMI) const { // Let the target hook getInstrLatency handle missing itineraries. if (!ItinData) @@ -1098,21 +1087,9 @@ int TargetInstrInfo::computeDefOperandLatency( return -1; } -/// computeOperandLatency - Compute and return the latency of the given data -/// dependent def and use when the operand indices are already known. UseMI may -/// be NULL for an unknown use. -/// -/// FindMin may be set to get the minimum vs. expected latency. Minimum -/// latency is used for scheduling groups, while expected latency is for -/// instruction cost and critical path. -/// -/// Depending on the subtarget's itinerary properties, this may or may not need -/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or -/// UseIdx to compute min latency. -unsigned TargetInstrInfo:: -computeOperandLatency(const InstrItineraryData *ItinData, - const MachineInstr *DefMI, unsigned DefIdx, - const MachineInstr *UseMI, unsigned UseIdx) const { +unsigned TargetInstrInfo::computeOperandLatency( + const InstrItineraryData *ItinData, const MachineInstr &DefMI, + unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const { int DefLatency = computeDefOperandLatency(ItinData, DefMI); if (DefLatency >= 0) @@ -1122,9 +1099,9 @@ computeOperandLatency(const InstrItineraryData *ItinData, int OperLatency = 0; if (UseMI) - OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); + OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, *UseMI, UseIdx); else { - unsigned DefClass = DefMI->getDesc().getSchedClass(); + unsigned DefClass = DefMI.getDesc().getSchedClass(); OperLatency = ItinData->getOperandCycle(DefClass, DefIdx); } if (OperLatency >= 0) |