summaryrefslogtreecommitdiffstats
path: root/llvm/lib/CodeGen/TargetSchedule.cpp
diff options
context:
space:
mode:
authorDuncan P. N. Exon Smith <dexonsmith@apple.com>2016-06-30 00:01:54 +0000
committerDuncan P. N. Exon Smith <dexonsmith@apple.com>2016-06-30 00:01:54 +0000
commit9cfc75c214d42eebd74f9f5f5d20d453404d5db4 (patch)
tree0f9f9110f564b6287a4db4cdf9e6097d19085c7e /llvm/lib/CodeGen/TargetSchedule.cpp
parentc3701e8b9252498e2ed27a99238f71cb07dd43a4 (diff)
downloadbcm5719-llvm-9cfc75c214d42eebd74f9f5f5d20d453404d5db4.tar.gz
bcm5719-llvm-9cfc75c214d42eebd74f9f5f5d20d453404d5db4.zip
CodeGen: Use MachineInstr& in TargetInstrInfo, NFC
This is mostly a mechanical change to make TargetInstrInfo API take MachineInstr& (instead of MachineInstr* or MachineBasicBlock::iterator) when the argument is expected to be a valid MachineInstr. This is a general API improvement. Although it would be possible to do this one function at a time, that would demand a quadratic amount of churn since many of these functions call each other. Instead I've done everything as a block and just updated what was necessary. This is mostly mechanical fixes: adding and removing `*` and `&` operators. The only non-mechanical change is to split ARMBaseInstrInfo::getOperandLatencyImpl out from ARMBaseInstrInfo::getOperandLatency. Previously, the latter took a `MachineInstr*` which it updated to the instruction bundle leader; now, the latter calls the former either with the same `MachineInstr&` or the bundle leader. As a side effect, this removes a bunch of MachineInstr* to MachineBasicBlock::iterator implicit conversions, a necessary step toward fixing PR26753. Note: I updated WebAssembly, Lanai, and AVR (despite being off-by-default) since it turned out to be easy. I couldn't run tests for AVR since llc doesn't link with it turned on. llvm-svn: 274189
Diffstat (limited to 'llvm/lib/CodeGen/TargetSchedule.cpp')
-rw-r--r--llvm/lib/CodeGen/TargetSchedule.cpp20
1 files changed, 10 insertions, 10 deletions
diff --git a/llvm/lib/CodeGen/TargetSchedule.cpp b/llvm/lib/CodeGen/TargetSchedule.cpp
index 19300070dfb..022e912aa84 100644
--- a/llvm/lib/CodeGen/TargetSchedule.cpp
+++ b/llvm/lib/CodeGen/TargetSchedule.cpp
@@ -77,7 +77,7 @@ unsigned TargetSchedModel::getNumMicroOps(const MachineInstr *MI,
const MCSchedClassDesc *SC) const {
if (hasInstrItineraries()) {
int UOps = InstrItins.getNumMicroOps(MI->getDesc().getSchedClass());
- return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, MI);
+ return (UOps >= 0) ? UOps : TII->getNumMicroOps(&InstrItins, *MI);
}
if (hasInstrSchedModel()) {
if (!SC)
@@ -156,13 +156,13 @@ unsigned TargetSchedModel::computeOperandLatency(
const MachineInstr *UseMI, unsigned UseOperIdx) const {
if (!hasInstrSchedModel() && !hasInstrItineraries())
- return TII->defaultDefLatency(SchedModel, DefMI);
+ return TII->defaultDefLatency(SchedModel, *DefMI);
if (hasInstrItineraries()) {
int OperLatency = 0;
if (UseMI) {
- OperLatency = TII->getOperandLatency(&InstrItins, DefMI, DefOperIdx,
- UseMI, UseOperIdx);
+ OperLatency = TII->getOperandLatency(&InstrItins, *DefMI, DefOperIdx,
+ *UseMI, UseOperIdx);
}
else {
unsigned DefClass = DefMI->getDesc().getSchedClass();
@@ -172,15 +172,15 @@ unsigned TargetSchedModel::computeOperandLatency(
return OperLatency;
// No operand latency was found.
- unsigned InstrLatency = TII->getInstrLatency(&InstrItins, DefMI);
+ unsigned InstrLatency = TII->getInstrLatency(&InstrItins, *DefMI);
// Expected latency is the max of the stage latency and itinerary props.
// Rather than directly querying InstrItins stage latency, we call a TII
// hook to allow subtargets to specialize latency. This hook is only
// applicable to the InstrItins model. InstrSchedModel should model all
// special cases without TII hooks.
- InstrLatency = std::max(InstrLatency,
- TII->defaultDefLatency(SchedModel, DefMI));
+ InstrLatency =
+ std::max(InstrLatency, TII->defaultDefLatency(SchedModel, *DefMI));
return InstrLatency;
}
// hasInstrSchedModel()
@@ -219,7 +219,7 @@ unsigned TargetSchedModel::computeOperandLatency(
// FIXME: Automatically giving all implicit defs defaultDefLatency is
// undesirable. We should only do it for defs that are known to the MC
// desc like flags. Truly implicit defs should get 1 cycle latency.
- return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, DefMI);
+ return DefMI->isTransient() ? 0 : TII->defaultDefLatency(SchedModel, *DefMI);
}
unsigned
@@ -254,14 +254,14 @@ TargetSchedModel::computeInstrLatency(const MachineInstr *MI,
// Allow subtargets to compute Bundle latencies outside the machine model.
if (hasInstrItineraries() || MI->isBundle() ||
(!hasInstrSchedModel() && !UseDefaultDefLatency))
- return TII->getInstrLatency(&InstrItins, MI);
+ return TII->getInstrLatency(&InstrItins, *MI);
if (hasInstrSchedModel()) {
const MCSchedClassDesc *SCDesc = resolveSchedClass(MI);
if (SCDesc->isValid())
return computeInstrLatency(*SCDesc);
}
- return TII->defaultDefLatency(SchedModel, MI);
+ return TII->defaultDefLatency(SchedModel, *MI);
}
unsigned TargetSchedModel::
OpenPOWER on IntegriCloud