summaryrefslogtreecommitdiffstats
path: root/llvm/lib/MCA
diff options
context:
space:
mode:
authorAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2019-03-04 11:52:34 +0000
committerAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2019-03-04 11:52:34 +0000
commitbe3281a281e36c416df469ed81a4e398132da953 (patch)
tree27b3b7c0c61326410c4a693836e43068bc0f77db /llvm/lib/MCA
parent09d8ea5282505251a3da5cebb6ec7c7e0e685db2 (diff)
downloadbcm5719-llvm-be3281a281e36c416df469ed81a4e398132da953.tar.gz
bcm5719-llvm-be3281a281e36c416df469ed81a4e398132da953.zip
[MCA] Highlight kernel bottlenecks in the summary view.
This patch adds a new flag named -bottleneck-analysis to print out information about throughput bottlenecks. MCA knows how to identify and classify dynamic dispatch stalls. However, it doesn't know how to analyze and highlight kernel bottlenecks. The goal of this patch is to teach MCA how to correlate increases in backend pressure to backend stalls (and therefore, the loss of throughput). From a Scheduler point of view, backend pressure is a function of the scheduler buffer usage (i.e. how the number of uOps in the scheduler buffers changes over time). Backend pressure increases (or decreases) when there is a mismatch between the number of opcodes dispatched, and the number of opcodes issued in the same cycle. Since buffer resources are limited, continuous increases in backend pressure would eventually leads to dispatch stalls. So, there is a strong correlation between dispatch stalls, and how backpressure changed over time. This patch teaches how to identify situations where backend pressure increases due to: - unavailable pipeline resources. - data dependencies. Data dependencies may delay execution of instructions and therefore increase the time that uOps have to spend in the scheduler buffers. That often translates to an increase in backend pressure which may eventually lead to a bottleneck. Contention on pipeline resources may also delay execution of instructions, and lead to a temporary increase in backend pressure. Internally, the Scheduler classifies instructions based on whether register / memory operands are available or not. An instruction is marked as "ready to execute" only if data dependencies are fully resolved. Every cycle, the Scheduler attempts to execute all instructions that are ready to execute. If an instruction cannot execute because of unavailable pipeline resources, then the Scheduler internally updates a BusyResourceUnits mask with the ID of each unavailable resource. ExecuteStage is responsible for tracking changes in backend pressure. If backend pressure increases during a cycle because of contention on pipeline resources, then ExecuteStage sends a "backend pressure" event to the listeners. That event would contain information about instructions delayed by resource pressure, as well as the BusyResourceUnits mask. Note that ExecuteStage also knows how to identify situations where backpressure increased because of delays introduced by data dependencies. The SummaryView observes "backend pressure" events and prints out a "bottleneck report". Example of bottleneck report: ``` Cycles with backend pressure increase [ 99.89% ] Throughput Bottlenecks: Resource Pressure [ 0.00% ] Data Dependencies: [ 99.89% ] - Register Dependencies [ 0.00% ] - Memory Dependencies [ 99.89% ] ``` A bottleneck report is printed out only if increases in backend pressure eventually caused backend stalls. About the time complexity: Time complexity is linear in the number of instructions in the Scheduler::PendingSet. The average slowdown tends to be in the range of ~5-6%. For memory intensive kernels, the slowdown can be significant if flag -noalias=false is specified. In the worst case scenario I have observed a slowdown of ~30% when flag -noalias=false was specified. We can definitely recover part of that slowdown if we optimize class LSUnit (by doing extra bookkeeping to speedup queries). For now, this new analysis is disabled by default, and it can be enabled via flag -bottleneck-analysis. Users of MCA as a library can enable the generation of pressure events through the constructor of ExecuteStage. This patch partially addresses https://bugs.llvm.org/show_bug.cgi?id=37494 Differential Revision: https://reviews.llvm.org/D58728 llvm-svn: 355308
Diffstat (limited to 'llvm/lib/MCA')
-rw-r--r--llvm/lib/MCA/Context.cpp3
-rw-r--r--llvm/lib/MCA/HardwareUnits/Scheduler.cpp28
-rw-r--r--llvm/lib/MCA/Stages/ExecuteStage.cpp43
3 files changed, 70 insertions, 4 deletions
diff --git a/llvm/lib/MCA/Context.cpp b/llvm/lib/MCA/Context.cpp
index 18489ccdf1f..8de675389df 100644
--- a/llvm/lib/MCA/Context.cpp
+++ b/llvm/lib/MCA/Context.cpp
@@ -42,7 +42,8 @@ Context::createDefaultPipeline(const PipelineOptions &Opts, InstrBuilder &IB,
auto Fetch = llvm::make_unique<EntryStage>(SrcMgr);
auto Dispatch = llvm::make_unique<DispatchStage>(STI, MRI, Opts.DispatchWidth,
*RCU, *PRF);
- auto Execute = llvm::make_unique<ExecuteStage>(*HWS);
+ auto Execute =
+ llvm::make_unique<ExecuteStage>(*HWS, Opts.EnableBottleneckAnalysis);
auto Retire = llvm::make_unique<RetireStage>(*RCU, *PRF);
// Pass the ownership of all the hardware units to this Context.
diff --git a/llvm/lib/MCA/HardwareUnits/Scheduler.cpp b/llvm/lib/MCA/HardwareUnits/Scheduler.cpp
index 1a428ac10a0..5b2527b886e 100644
--- a/llvm/lib/MCA/HardwareUnits/Scheduler.cpp
+++ b/llvm/lib/MCA/HardwareUnits/Scheduler.cpp
@@ -183,9 +183,9 @@ InstRef Scheduler::select() {
InstRef &IR = ReadySet[I];
if (QueueIndex == ReadySet.size() ||
Strategy->compare(IR, ReadySet[QueueIndex])) {
- const InstrDesc &D = IR.getInstruction()->getDesc();
- uint64_t BusyResourceMask = Resources->checkAvailability(D);
- IR.getInstruction()->updateCriticalResourceMask(BusyResourceMask);
+ Instruction &IS = *IR.getInstruction();
+ uint64_t BusyResourceMask = Resources->checkAvailability(IS.getDesc());
+ IS.setCriticalResourceMask(BusyResourceMask);
BusyResourceUnits |= BusyResourceMask;
if (!BusyResourceMask)
QueueIndex = I;
@@ -227,6 +227,28 @@ void Scheduler::updateIssuedSet(SmallVectorImpl<InstRef> &Executed) {
IssuedSet.resize(IssuedSet.size() - RemovedElements);
}
+uint64_t Scheduler::analyzeResourcePressure(SmallVectorImpl<InstRef> &Insts) {
+ Insts.insert(Insts.end(), ReadySet.begin(), ReadySet.end());
+ return BusyResourceUnits;
+}
+
+void Scheduler::analyzeDataDependencies(SmallVectorImpl<InstRef> &RegDeps,
+ SmallVectorImpl<InstRef> &MemDeps) {
+ const auto EndIt = PendingSet.end() - NumDispatchedToThePendingSet;
+ for (InstRef &IR : make_range(PendingSet.begin(), EndIt)) {
+ Instruction &IS = *IR.getInstruction();
+ if (Resources->checkAvailability(IS.getDesc()))
+ continue;
+
+ if (IS.isReady() ||
+ (IS.isMemOp() && LSU.isReady(IR) != IR.getSourceIndex())) {
+ MemDeps.emplace_back(IR);
+ } else {
+ RegDeps.emplace_back(IR);
+ }
+ }
+}
+
void Scheduler::cycleEvent(SmallVectorImpl<ResourceRef> &Freed,
SmallVectorImpl<InstRef> &Executed,
SmallVectorImpl<InstRef> &Ready) {
diff --git a/llvm/lib/MCA/Stages/ExecuteStage.cpp b/llvm/lib/MCA/Stages/ExecuteStage.cpp
index 27ac8f8306b..49210e06cd4 100644
--- a/llvm/lib/MCA/Stages/ExecuteStage.cpp
+++ b/llvm/lib/MCA/Stages/ExecuteStage.cpp
@@ -54,6 +54,7 @@ Error ExecuteStage::issueInstruction(InstRef &IR) {
SmallVector<std::pair<ResourceRef, ResourceCycles>, 4> Used;
SmallVector<InstRef, 4> Ready;
HWS.issueInstruction(IR, Used, Ready);
+ NumIssuedOpcodes += IR.getInstruction()->getDesc().NumMicroOps;
notifyReservedOrReleasedBuffers(IR, /* Reserved */ false);
@@ -89,6 +90,8 @@ Error ExecuteStage::cycleStart() {
SmallVector<InstRef, 4> Ready;
HWS.cycleEvent(Freed, Executed, Ready);
+ NumDispatchedOpcodes = 0;
+ NumIssuedOpcodes = 0;
for (const ResourceRef &RR : Freed)
notifyResourceAvailable(RR);
@@ -106,6 +109,45 @@ Error ExecuteStage::cycleStart() {
return issueReadyInstructions();
}
+Error ExecuteStage::cycleEnd() {
+ if (!EnablePressureEvents)
+ return ErrorSuccess();
+
+ // Always conservatively report any backpressure events if the dispatch logic
+ // was stalled due to unavailable scheduler resources.
+ if (!HWS.hadTokenStall() && NumDispatchedOpcodes <= NumIssuedOpcodes)
+ return ErrorSuccess();
+
+ SmallVector<InstRef, 8> Insts;
+ uint64_t Mask = HWS.analyzeResourcePressure(Insts);
+ if (Mask) {
+ LLVM_DEBUG(dbgs() << "[E] Backpressure increased because of unavailable "
+ "pipeline resources: "
+ << format_hex(Mask, 16) << '\n');
+ HWPressureEvent Ev(HWPressureEvent::RESOURCES, Insts, Mask);
+ notifyEvent(Ev);
+ return ErrorSuccess();
+ }
+
+ SmallVector<InstRef, 8> RegDeps;
+ SmallVector<InstRef, 8> MemDeps;
+ HWS.analyzeDataDependencies(RegDeps, MemDeps);
+ if (RegDeps.size()) {
+ LLVM_DEBUG(
+ dbgs() << "[E] Backpressure increased by register dependencies\n");
+ HWPressureEvent Ev(HWPressureEvent::REGISTER_DEPS, RegDeps);
+ notifyEvent(Ev);
+ }
+
+ if (MemDeps.size()) {
+ LLVM_DEBUG(dbgs() << "[E] Backpressure increased by memory dependencies\n");
+ HWPressureEvent Ev(HWPressureEvent::MEMORY_DEPS, MemDeps);
+ notifyEvent(Ev);
+ }
+
+ return ErrorSuccess();
+}
+
#ifndef NDEBUG
static void verifyInstructionEliminated(const InstRef &IR) {
const Instruction &Inst = *IR.getInstruction();
@@ -147,6 +189,7 @@ Error ExecuteStage::execute(InstRef &IR) {
// be released after MCIS is issued, and all the ResourceCycles for those
// units have been consumed.
bool IsReadyInstruction = HWS.dispatch(IR);
+ NumDispatchedOpcodes += IR.getInstruction()->getDesc().NumMicroOps;
notifyReservedOrReleasedBuffers(IR, /* Reserved */ true);
if (!IsReadyInstruction)
return ErrorSuccess();
OpenPOWER on IntegriCloud