summaryrefslogtreecommitdiffstats
path: root/llvm/lib/CodeGen/MachineScheduler.cpp
diff options
context:
space:
mode:
authorNirav Dave <niravd@google.com>2018-11-14 21:11:53 +0000
committerNirav Dave <niravd@google.com>2018-11-14 21:11:53 +0000
commit1241dcb3cf469c3de9c5ca35af0a64fbb69cc10b (patch)
tree7d6902d1305394b284b1709dff6e0b997a9888c6 /llvm/lib/CodeGen/MachineScheduler.cpp
parentb0486051d29086f48f3c0db469c1e88747a21057 (diff)
downloadbcm5719-llvm-1241dcb3cf469c3de9c5ca35af0a64fbb69cc10b.tar.gz
bcm5719-llvm-1241dcb3cf469c3de9c5ca35af0a64fbb69cc10b.zip
Bias physical register immediate assignments
The machine scheduler currently biases register copies to/from physical registers to be closer to their point of use / def to minimize their live ranges. This change extends this to also physical register assignments from immediate values. This causes a reduction in reduction in overall register pressure and minor reduction in spills and indirectly fixes an out-of-registers assertion (PR39391). Most test changes are from minor instruction reorderings and register name selection changes and direct consequences of that. Reviewers: MatzeB, qcolombet, myatsina, pcc Subscribers: nemanjai, jvesely, nhaehnle, eraman, hiraditya, javed.absar, arphaman, jfb, jsji, llvm-commits Differential Revision: https://reviews.llvm.org/D54218 llvm-svn: 346894
Diffstat (limited to 'llvm/lib/CodeGen/MachineScheduler.cpp')
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp67
1 files changed, 42 insertions, 25 deletions
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index d4b9f851599..3b163fa6340 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -2516,7 +2516,7 @@ const char *GenericSchedulerBase::getReasonStr(
switch (Reason) {
case NoCand: return "NOCAND ";
case Only1: return "ONLY1 ";
- case PhysRegCopy: return "PREG-COPY ";
+ case PhysReg: return "PHYS-REG ";
case RegExcess: return "REG-EXCESS";
case RegCritical: return "REG-CRIT ";
case Stall: return "STALL ";
@@ -2852,24 +2852,41 @@ unsigned getWeakLeft(const SUnit *SU, bool isTop) {
/// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
/// with the operation that produces or consumes the physreg. We'll do this when
/// regalloc has support for parallel copies.
-int biasPhysRegCopy(const SUnit *SU, bool isTop) {
+int biasPhysReg(const SUnit *SU, bool isTop) {
const MachineInstr *MI = SU->getInstr();
- if (!MI->isCopy())
- return 0;
- unsigned ScheduledOper = isTop ? 1 : 0;
- unsigned UnscheduledOper = isTop ? 0 : 1;
- // If we have already scheduled the physreg produce/consumer, immediately
- // schedule the copy.
- if (TargetRegisterInfo::isPhysicalRegister(
- MI->getOperand(ScheduledOper).getReg()))
- return 1;
- // If the physreg is at the boundary, defer it. Otherwise schedule it
- // immediately to free the dependent. We can hoist the copy later.
- bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
- if (TargetRegisterInfo::isPhysicalRegister(
- MI->getOperand(UnscheduledOper).getReg()))
- return AtBoundary ? -1 : 1;
+ if (MI->isCopy()) {
+ unsigned ScheduledOper = isTop ? 1 : 0;
+ unsigned UnscheduledOper = isTop ? 0 : 1;
+ // If we have already scheduled the physreg produce/consumer, immediately
+ // schedule the copy.
+ if (TargetRegisterInfo::isPhysicalRegister(
+ MI->getOperand(ScheduledOper).getReg()))
+ return 1;
+ // If the physreg is at the boundary, defer it. Otherwise schedule it
+ // immediately to free the dependent. We can hoist the copy later.
+ bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
+ if (TargetRegisterInfo::isPhysicalRegister(
+ MI->getOperand(UnscheduledOper).getReg()))
+ return AtBoundary ? -1 : 1;
+ }
+
+ if (MI->isMoveImmediate()) {
+ // If we have a move immediate and all successors have been assigned, bias
+ // towards scheduling this later. Make sure all register defs are to
+ // physical registers.
+ bool DoBias = true;
+ for (const MachineOperand &Op : MI->defs()) {
+ if (Op.isReg() && !TargetRegisterInfo::isPhysicalRegister(Op.getReg())) {
+ DoBias = false;
+ break;
+ }
+ }
+
+ if (DoBias)
+ return isTop ? -1 : 1;
+ }
+
return 0;
}
} // end namespace llvm
@@ -2930,9 +2947,9 @@ void GenericScheduler::tryCandidate(SchedCandidate &Cand,
return;
}
- if (tryGreater(biasPhysRegCopy(TryCand.SU, TryCand.AtTop),
- biasPhysRegCopy(Cand.SU, Cand.AtTop),
- TryCand, Cand, PhysRegCopy))
+ // Bias PhysReg Defs and copies to their uses and defined respectively.
+ if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop),
+ biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg))
return;
// Avoid exceeding the target's limit.
@@ -3179,7 +3196,7 @@ SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
return SU;
}
-void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
+void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) {
MachineBasicBlock::iterator InsertPos = SU->getInstr();
if (!isTop)
++InsertPos;
@@ -3194,7 +3211,7 @@ void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
continue;
MachineInstr *Copy = DepSU->getInstr();
- if (!Copy->isCopy())
+ if (!Copy->isCopy() && !Copy->isMoveImmediate())
continue;
LLVM_DEBUG(dbgs() << " Rescheduling physreg copy ";
DAG->dumpNode(*Dep.getSUnit()));
@@ -3208,18 +3225,18 @@ void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
/// does.
///
/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
-/// them here. See comments in biasPhysRegCopy.
+/// them here. See comments in biasPhysReg.
void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
if (IsTopNode) {
SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
Top.bumpNode(SU);
if (SU->hasPhysRegUses)
- reschedulePhysRegCopies(SU, true);
+ reschedulePhysReg(SU, true);
} else {
SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
Bot.bumpNode(SU);
if (SU->hasPhysRegDefs)
- reschedulePhysRegCopies(SU, false);
+ reschedulePhysReg(SU, false);
}
}
OpenPOWER on IntegriCloud