diff options
author | Andrew Trick <atrick@apple.com> | 2013-08-23 17:48:43 +0000 |
---|---|---|
committer | Andrew Trick <atrick@apple.com> | 2013-08-23 17:48:43 +0000 |
commit | c01b00400d85342fe365995269fa8c7182f1b440 (patch) | |
tree | 96a9047b9bd8541c575e4931786dd01b102e5190 /llvm/lib/CodeGen/ScheduleDAGInstrs.cpp | |
parent | 11f54e86bb975bcb000b0d287772f92fe8d23c85 (diff) | |
download | bcm5719-llvm-c01b00400d85342fe365995269fa8c7182f1b440.tar.gz bcm5719-llvm-c01b00400d85342fe365995269fa8c7182f1b440.zip |
Adds cyclic critical path computation and heuristics, temporarily disabled.
Estimate the cyclic critical path within a single block loop. If the
acyclic critical path is longer, then the loop will exhaust OOO
resources after some number of iterations. If lag between the acyclic
critical path and cyclic critical path is longer the the time it takes
to issue those loop iterations, then aggressively schedule for
latency.
llvm-svn: 189120
Diffstat (limited to 'llvm/lib/CodeGen/ScheduleDAGInstrs.cpp')
-rw-r--r-- | llvm/lib/CodeGen/ScheduleDAGInstrs.cpp | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp index 24714089da4..0b5eb0ebe89 100644 --- a/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -36,6 +36,8 @@ #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" +#include <queue> + using namespace llvm; static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden, @@ -979,6 +981,65 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, PendingLoads.clear(); } +/// Compute the max cyclic critical path through the DAG. For loops that span +/// basic blocks, MachineTraceMetrics should be used for this instead. +unsigned ScheduleDAGInstrs::computeCyclicCriticalPath() { + // This only applies to single block loop. + if (!BB->isSuccessor(BB)) + return 0; + + unsigned MaxCyclicLatency = 0; + // Visit each live out vreg def to find def/use pairs that cross iterations. + for (SUnit::const_pred_iterator + PI = ExitSU.Preds.begin(), PE = ExitSU.Preds.end(); PI != PE; ++PI) { + MachineInstr *MI = PI->getSUnit()->getInstr(); + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isDef()) + break; + unsigned Reg = MO.getReg(); + if (!Reg || TRI->isPhysicalRegister(Reg)) + continue; + + const LiveInterval &LI = LIS->getInterval(Reg); + unsigned LiveOutHeight = PI->getSUnit()->getHeight(); + unsigned LiveOutDepth = PI->getSUnit()->getDepth() + PI->getLatency(); + // Visit all local users of the vreg def. + for (VReg2UseMap::iterator + UI = VRegUses.find(Reg); UI != VRegUses.end(); ++UI) { + if (UI->SU == &ExitSU) + continue; + + // Only consider uses of the phi. + LiveRangeQuery LRQ(LI, LIS->getInstructionIndex(UI->SU->getInstr())); + if (!LRQ.valueIn()->isPHIDef()) + continue; + + // Cheat a bit and assume that a path spanning two iterations is a + // cycle, which could overestimate in strange cases. This allows cyclic + // latency to be estimated as the minimum height or depth slack. + unsigned CyclicLatency = 0; + if (LiveOutDepth > UI->SU->getDepth()) + CyclicLatency = LiveOutDepth - UI->SU->getDepth(); + unsigned LiveInHeight = UI->SU->getHeight() + PI->getLatency(); + if (LiveInHeight > LiveOutHeight) { + if (LiveInHeight - LiveOutHeight < CyclicLatency) + CyclicLatency = LiveInHeight - LiveOutHeight; + } + else + CyclicLatency = 0; + DEBUG(dbgs() << "Cyclic Path: SU(" << PI->getSUnit()->NodeNum + << ") -> SU(" << UI->SU->NodeNum << ") = " + << CyclicLatency << "\n"); + if (CyclicLatency > MaxCyclicLatency) + MaxCyclicLatency = CyclicLatency; + } + } + } + DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "\n"); + return MaxCyclicLatency; +} + void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) SU->getInstr()->dump(); |