summaryrefslogtreecommitdiffstats
path: root/llvm/lib/MCA
diff options
context:
space:
mode:
authorAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2019-05-23 13:42:47 +0000
committerAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2019-05-23 13:42:47 +0000
commitdd0d9e01eeaab849701cce1a22342b04e79df54b (patch)
tree73805d34451952fc5f697152356d5424bf25cf9c /llvm/lib/MCA
parent46806749ac3a334bdcabb59b7082471aaf19a28a (diff)
downloadbcm5719-llvm-dd0d9e01eeaab849701cce1a22342b04e79df54b.tar.gz
bcm5719-llvm-dd0d9e01eeaab849701cce1a22342b04e79df54b.zip
[MCA] Introduce class LSUnitBase and let LSUnit derive from it.
Class LSUnitBase provides a abstract interface for all the concrete LS units in llvm-mca. Methods exposed by the public abstract LSUnitBase interface are: - Status isAvailable(const InstRef&); - void dispatch(const InstRef &); - const InstRef &isReady(const InstRef &); LSUnitBase standardises the API, but not the data structures internally used by LS units. This allows for more flexibility. Previously, only method `isReady()` was declared virtual by class LSUnit. Also, derived classes had to inherit all the internal data members of LSUnit. No functional change intended. llvm-svn: 361496
Diffstat (limited to 'llvm/lib/MCA')
-rw-r--r--llvm/lib/MCA/HardwareUnits/LSUnit.cpp131
-rw-r--r--llvm/lib/MCA/HardwareUnits/Scheduler.cpp17
2 files changed, 76 insertions, 72 deletions
diff --git a/llvm/lib/MCA/HardwareUnits/LSUnit.cpp b/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
index 4f49fbd2bb4..c3866d6bba7 100644
--- a/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
+++ b/llvm/lib/MCA/HardwareUnits/LSUnit.cpp
@@ -21,48 +21,48 @@
namespace llvm {
namespace mca {
-LSUnit::LSUnit(const MCSchedModel &SM, unsigned LQ, unsigned SQ,
- bool AssumeNoAlias)
- : LQ_Size(LQ), SQ_Size(SQ), NoAlias(AssumeNoAlias) {
+LSUnitBase::LSUnitBase(const MCSchedModel &SM, unsigned LQ, unsigned SQ,
+ bool AssumeNoAlias)
+ : LQSize(LQ), SQSize(SQ), NoAlias(AssumeNoAlias) {
if (SM.hasExtraProcessorInfo()) {
const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
- if (!LQ_Size && EPI.LoadQueueID) {
+ if (!LQSize && EPI.LoadQueueID) {
const MCProcResourceDesc &LdQDesc = *SM.getProcResource(EPI.LoadQueueID);
- LQ_Size = LdQDesc.BufferSize;
+ LQSize = LdQDesc.BufferSize;
}
- if (!SQ_Size && EPI.StoreQueueID) {
+ if (!SQSize && EPI.StoreQueueID) {
const MCProcResourceDesc &StQDesc = *SM.getProcResource(EPI.StoreQueueID);
- SQ_Size = StQDesc.BufferSize;
+ SQSize = StQDesc.BufferSize;
}
}
}
+LSUnitBase::~LSUnitBase() {}
+
#ifndef NDEBUG
void LSUnit::dump() const {
- dbgs() << "[LSUnit] LQ_Size = " << LQ_Size << '\n';
- dbgs() << "[LSUnit] SQ_Size = " << SQ_Size << '\n';
+ dbgs() << "[LSUnit] LQ_Size = " << getLoadQueueSize() << '\n';
+ dbgs() << "[LSUnit] SQ_Size = " << getStoreQueueSize() << '\n';
dbgs() << "[LSUnit] NextLQSlotIdx = " << LoadQueue.size() << '\n';
dbgs() << "[LSUnit] NextSQSlotIdx = " << StoreQueue.size() << '\n';
}
#endif
-void LSUnit::assignLQSlot(unsigned Index) {
- assert(!isLQFull());
- assert(LoadQueue.count(Index) == 0);
+void LSUnit::assignLQSlot(const InstRef &IR) {
+ assert(!isLQFull() && "Load Queue is full!");
- LLVM_DEBUG(dbgs() << "[LSUnit] - AssignLQSlot <Idx=" << Index
+ LLVM_DEBUG(dbgs() << "[LSUnit] - AssignLQSlot <Idx=" << IR.getSourceIndex()
<< ",slot=" << LoadQueue.size() << ">\n");
- LoadQueue.insert(Index);
+ LoadQueue.insert(IR);
}
-void LSUnit::assignSQSlot(unsigned Index) {
- assert(!isSQFull());
- assert(StoreQueue.count(Index) == 0);
+void LSUnit::assignSQSlot(const InstRef &IR) {
+ assert(!isSQFull() && "Store Queue is full!");
- LLVM_DEBUG(dbgs() << "[LSUnit] - AssignSQSlot <Idx=" << Index
+ LLVM_DEBUG(dbgs() << "[LSUnit] - AssignSQSlot <Idx=" << IR.getSourceIndex()
<< ",slot=" << StoreQueue.size() << ">\n");
- StoreQueue.insert(Index);
+ StoreQueue.insert(IR);
}
void LSUnit::dispatch(const InstRef &IR) {
@@ -70,17 +70,16 @@ void LSUnit::dispatch(const InstRef &IR) {
unsigned IsMemBarrier = Desc.HasSideEffects;
assert((Desc.MayLoad || Desc.MayStore) && "Not a memory operation!");
- const unsigned Index = IR.getSourceIndex();
if (Desc.MayLoad) {
if (IsMemBarrier)
- LoadBarriers.insert(Index);
- assignLQSlot(Index);
+ LoadBarriers.insert(IR);
+ assignLQSlot(IR);
}
if (Desc.MayStore) {
if (IsMemBarrier)
- StoreBarriers.insert(Index);
- assignSQSlot(Index);
+ StoreBarriers.insert(IR);
+ assignSQSlot(IR);
}
}
@@ -93,65 +92,67 @@ LSUnit::Status LSUnit::isAvailable(const InstRef &IR) const {
return LSUnit::LSU_AVAILABLE;
}
-unsigned LSUnit::isReady(const InstRef &IR) const {
+const InstRef &LSUnit::isReady(const InstRef &IR) const {
const InstrDesc &Desc = IR.getInstruction()->getDesc();
const unsigned Index = IR.getSourceIndex();
bool IsALoad = Desc.MayLoad;
bool IsAStore = Desc.MayStore;
assert((IsALoad || IsAStore) && "Not a memory operation!");
- assert((!IsALoad || LoadQueue.count(Index) == 1) && "Load not in queue!");
- assert((!IsAStore || StoreQueue.count(Index) == 1) && "Store not in queue!");
if (IsALoad && !LoadBarriers.empty()) {
- unsigned LoadBarrierIndex = *LoadBarriers.begin();
+ const InstRef &LoadBarrier = *LoadBarriers.begin();
// A younger load cannot pass a older load barrier.
- if (Index > LoadBarrierIndex)
- return LoadBarrierIndex;
+ if (Index > LoadBarrier.getSourceIndex())
+ return LoadBarrier;
// A load barrier cannot pass a older load.
- if (Index == LoadBarrierIndex && Index != *LoadQueue.begin())
- return *LoadQueue.begin();
+ if (Index == LoadBarrier.getSourceIndex()) {
+ const InstRef &Load = *LoadQueue.begin();
+ if (Index != Load.getSourceIndex())
+ return Load;
+ }
}
if (IsAStore && !StoreBarriers.empty()) {
- unsigned StoreBarrierIndex = *StoreBarriers.begin();
+ const InstRef &StoreBarrier = *StoreBarriers.begin();
// A younger store cannot pass a older store barrier.
- if (Index > StoreBarrierIndex)
- return StoreBarrierIndex;
+ if (Index > StoreBarrier.getSourceIndex())
+ return StoreBarrier;
// A store barrier cannot pass a older store.
- if (Index == StoreBarrierIndex && Index != *StoreQueue.begin())
- return *StoreQueue.begin();
+ if (Index == StoreBarrier.getSourceIndex()) {
+ const InstRef &Store = *StoreQueue.begin();
+ if (Index != Store.getSourceIndex())
+ return Store;
+ }
}
// A load may not pass a previous store unless flag 'NoAlias' is set.
// A load may pass a previous load.
- if (NoAlias && IsALoad)
- return Index;
+ if (assumeNoAlias() && IsALoad)
+ return IR;
if (StoreQueue.size()) {
// A load may not pass a previous store.
// A store may not pass a previous store.
- if (Index > *StoreQueue.begin())
- return *StoreQueue.begin();
+ const InstRef &Store = *StoreQueue.begin();
+ if (Index > Store.getSourceIndex())
+ return Store;
}
// Okay, we are older than the oldest store in the queue.
- // If there are no pending loads, then we can say for sure that this
- // instruction is ready.
if (isLQEmpty())
- return Index;
+ return IR;
// Check if there are no older loads.
- if (Index <= *LoadQueue.begin())
- return Index;
+ const InstRef &Load = *LoadQueue.begin();
+ if (Index <= Load.getSourceIndex())
+ return IR;
- // There is at least one younger load.
- //
// A load may pass a previous load.
if (IsALoad)
- return Index;
+ return IR;
// A store may not pass a previous load.
- return *LoadQueue.begin();
+ return Load;
}
void LSUnit::onInstructionExecuted(const InstRef &IR) {
@@ -161,29 +162,35 @@ void LSUnit::onInstructionExecuted(const InstRef &IR) {
bool IsAStore = Desc.MayStore;
if (IsALoad) {
- if (LoadQueue.erase(Index)) {
+ if (LoadQueue.erase(IR)) {
LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
<< " has been removed from the load queue.\n");
}
- if (!LoadBarriers.empty() && Index == *LoadBarriers.begin()) {
- LLVM_DEBUG(
- dbgs() << "[LSUnit]: Instruction idx=" << Index
- << " has been removed from the set of load barriers.\n");
- LoadBarriers.erase(Index);
+ if (!LoadBarriers.empty()) {
+ const InstRef &LoadBarrier = *LoadBarriers.begin();
+ if (Index == LoadBarrier.getSourceIndex()) {
+ LLVM_DEBUG(
+ dbgs() << "[LSUnit]: Instruction idx=" << Index
+ << " has been removed from the set of load barriers.\n");
+ LoadBarriers.erase(IR);
+ }
}
}
if (IsAStore) {
- if (StoreQueue.erase(Index)) {
+ if (StoreQueue.erase(IR)) {
LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << Index
<< " has been removed from the store queue.\n");
}
- if (!StoreBarriers.empty() && Index == *StoreBarriers.begin()) {
- LLVM_DEBUG(
- dbgs() << "[LSUnit]: Instruction idx=" << Index
- << " has been removed from the set of store barriers.\n");
- StoreBarriers.erase(Index);
+ if (!StoreBarriers.empty()) {
+ const InstRef &StoreBarrier = *StoreBarriers.begin();
+ if (Index == StoreBarrier.getSourceIndex()) {
+ LLVM_DEBUG(
+ dbgs() << "[LSUnit]: Instruction idx=" << Index
+ << " has been removed from the set of store barriers.\n");
+ StoreBarriers.erase(IR);
+ }
}
}
}
diff --git a/llvm/lib/MCA/HardwareUnits/Scheduler.cpp b/llvm/lib/MCA/HardwareUnits/Scheduler.cpp
index 9eeea9d0113..bf48d928899 100644
--- a/llvm/lib/MCA/HardwareUnits/Scheduler.cpp
+++ b/llvm/lib/MCA/HardwareUnits/Scheduler.cpp
@@ -119,9 +119,9 @@ bool Scheduler::promoteToReadySet(SmallVectorImpl<InstRef> &Ready) {
// Check if there are still unsolved memory dependencies.
Instruction &IS = *IR.getInstruction();
if (IS.isMemOp()) {
- unsigned CriticalMemDep = LSU.isReady(IR);
- if (CriticalMemDep != IR.getSourceIndex()) {
- IS.setCriticalMemDep(CriticalMemDep);
+ const InstRef &CriticalMemDep = LSU.isReady(IR);
+ if (CriticalMemDep != IR) {
+ IS.setCriticalMemDep(CriticalMemDep.getSourceIndex());
++I;
continue;
}
@@ -158,7 +158,7 @@ bool Scheduler::promoteToPendingSet(SmallVectorImpl<InstRef> &Pending) {
break;
// Check if this instruction is now ready. In case, force
- // a transition in state using method 'update()'.
+ // a transition in state using method 'updateDispatched()'.
Instruction &IS = *IR.getInstruction();
if (IS.isDispatched() && !IS.updateDispatched()) {
++I;
@@ -242,12 +242,10 @@ void Scheduler::analyzeDataDependencies(SmallVectorImpl<InstRef> &RegDeps,
if (Resources->checkAvailability(IS.getDesc()))
continue;
- if (IS.isReady() ||
- (IS.isMemOp() && LSU.isReady(IR) != IR.getSourceIndex())) {
+ if (IS.isReady() || (IS.isMemOp() && LSU.isReady(IR) != IR))
MemDeps.emplace_back(IR);
- } else {
+ else
RegDeps.emplace_back(IR);
- }
}
}
@@ -304,8 +302,7 @@ bool Scheduler::dispatch(const InstRef &IR) {
// Memory operations that are not in a ready state are initially assigned to
// the WaitSet.
- if (!IS.isReady() ||
- (IS.isMemOp() && LSU.isReady(IR) != IR.getSourceIndex())) {
+ if (!IS.isReady() || (IS.isMemOp() && LSU.isReady(IR) != IR)) {
LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding #" << IR << " to the WaitSet\n");
WaitSet.push_back(IR);
return false;
OpenPOWER on IntegriCloud