diff options
author | Andrea Di Biagio <Andrea_DiBiagio@sn.scee.net> | 2018-03-08 13:05:02 +0000 |
---|---|---|
committer | Andrea Di Biagio <Andrea_DiBiagio@sn.scee.net> | 2018-03-08 13:05:02 +0000 |
commit | 3a6b09201781ad803afd2d5e0de135eb31dee9c5 (patch) | |
tree | 4f28fce1ae91026b979236fb1dba7c2dcc3edabf /llvm/tools/llvm-mca/Backend.cpp | |
parent | a71f626eac2bf3ffb1d45a68273a83e630957ea9 (diff) | |
download | bcm5719-llvm-3a6b09201781ad803afd2d5e0de135eb31dee9c5.tar.gz bcm5719-llvm-3a6b09201781ad803afd2d5e0de135eb31dee9c5.zip |
[llvm-mca] LLVM Machine Code Analyzer.
llvm-mca is an LLVM based performance analysis tool that can be used to
statically measure the performance of code, and to help triage potential
problems with target scheduling models.
llvm-mca uses information which is already available in LLVM (e.g. scheduling
models) to statically measure the performance of machine code in a specific cpu.
Performance is measured in terms of throughput as well as processor resource
consumption. The tool currently works for processors with an out-of-order
backend, for which there is a scheduling model available in LLVM.
The main goal of this tool is not just to predict the performance of the code
when run on the target, but also help with diagnosing potential performance
issues.
Given an assembly code sequence, llvm-mca estimates the IPC (instructions per
cycle), as well as hardware resources pressure. The analysis and reporting style
were mostly inspired by the IACA tool from Intel.
This patch is related to the RFC on llvm-dev visible at this link:
http://lists.llvm.org/pipermail/llvm-dev/2018-March/121490.html
Differential Revision: https://reviews.llvm.org/D43951
llvm-svn: 326998
Diffstat (limited to 'llvm/tools/llvm-mca/Backend.cpp')
-rw-r--r-- | llvm/tools/llvm-mca/Backend.cpp | 132 |
1 files changed, 132 insertions, 0 deletions
diff --git a/llvm/tools/llvm-mca/Backend.cpp b/llvm/tools/llvm-mca/Backend.cpp new file mode 100644 index 00000000000..890cbc14d86 --- /dev/null +++ b/llvm/tools/llvm-mca/Backend.cpp @@ -0,0 +1,132 @@ +//===--------------------- Backend.cpp --------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// \file +/// +/// Implementation of class Backend which emulates an hardware OoO backend. +/// +//===----------------------------------------------------------------------===// + +#include "Backend.h" +#include "HWEventListener.h" +#include "llvm/CodeGen/TargetSchedule.h" +#include "llvm/Support/Debug.h" + +namespace mca { + +#define DEBUG_TYPE "llvm-mca" + +using namespace llvm; + +void Backend::addEventListener(HWEventListener *Listener) { + if (Listener) + Listeners.insert(Listener); +} + +void Backend::runCycle(unsigned Cycle) { + notifyCycleBegin(Cycle); + + if (!SM->hasNext()) { + notifyCycleEnd(Cycle); + return; + } + + InstRef IR = SM->peekNext(); + const InstrDesc *Desc = &IB->getOrCreateInstrDesc(STI, *IR.second); + while (DU->isAvailable(Desc->NumMicroOps) && DU->canDispatch(*Desc)) { + Instruction *NewIS = IB->createInstruction(STI, *DU, IR.first, *IR.second); + Instructions[IR.first] = std::unique_ptr<Instruction>(NewIS); + NewIS->setRCUTokenID(DU->dispatch(IR.first, NewIS)); + + // If this is a zero latency instruction, then we don't need to dispatch + // it. Instead, we can mark it as executed. + if (NewIS->isZeroLatency()) + notifyInstructionExecuted(IR.first); + + // Check if we have dispatched all the instructions. + SM->updateNext(); + if (!SM->hasNext()) + break; + + // Prepare for the next round. + IR = SM->peekNext(); + Desc = &IB->getOrCreateInstrDesc(STI, *IR.second); + } + + notifyCycleEnd(Cycle); +} + +void Backend::notifyCycleBegin(unsigned Cycle) { + DEBUG(dbgs() << "[E] Cycle begin: " << Cycle << '\n'); + for (HWEventListener *Listener : Listeners) + Listener->onCycleBegin(Cycle); + + DU->cycleEvent(Cycle); + HWS->cycleEvent(Cycle); +} + +void Backend::notifyInstructionDispatched(unsigned Index) { + DEBUG(dbgs() << "[E] Instruction Dispatched: " << Index << '\n'); + for (HWEventListener *Listener : Listeners) + Listener->onInstructionDispatched(Index); +} + +void Backend::notifyInstructionReady(unsigned Index) { + DEBUG(dbgs() << "[E] Instruction Ready: " << Index << '\n'); + for (HWEventListener *Listener : Listeners) + Listener->onInstructionReady(Index); +} + +void Backend::notifyInstructionIssued( + unsigned Index, const ArrayRef<std::pair<ResourceRef, unsigned>> &Used) { + DEBUG( + dbgs() << "[E] Instruction Issued: " << Index << '\n'; + for (const std::pair<ResourceRef, unsigned> &Resource : Used) { + dbgs() << "[E] Resource Used: [" << Resource.first.first << '.' + << Resource.first.second << "]\n"; + dbgs() << " cycles: " << Resource.second << '\n'; + } + ); + + for (HWEventListener *Listener : Listeners) + Listener->onInstructionIssued(Index, Used); +} + +void Backend::notifyInstructionExecuted(unsigned Index) { + DEBUG(dbgs() << "[E] Instruction Executed: " << Index << '\n'); + for (HWEventListener *Listener : Listeners) + Listener->onInstructionExecuted(Index); + + const Instruction &IS = *Instructions[Index]; + DU->onInstructionExecuted(IS.getRCUTokenID()); +} + +void Backend::notifyInstructionRetired(unsigned Index) { + DEBUG(dbgs() << "[E] Instruction Retired: " << Index << '\n'); + for (HWEventListener *Listener : Listeners) + Listener->onInstructionRetired(Index); + + const Instruction &IS = *Instructions[Index]; + DU->invalidateRegisterMappings(IS); + Instructions.erase(Index); +} + +void Backend::notifyResourceAvailable(const ResourceRef &RR) { + DEBUG(dbgs() << "[E] Resource Available: [" << RR.first << '.' << RR.second + << "]\n"); + for (HWEventListener *Listener : Listeners) + Listener->onResourceAvailable(RR); +} + +void Backend::notifyCycleEnd(unsigned Cycle) { + DEBUG(dbgs() << "[E] Cycle end: " << Cycle << "\n\n"); + for (HWEventListener *Listener : Listeners) + Listener->onCycleEnd(Cycle); +} + +} // namespace mca. |