Adding MPKC & bypass mechanism

This commit is contained in:
Thanh C. Tran
2017-05-15 16:25:26 +02:00
parent ea8213da17
commit 066569c856
2 changed files with 159 additions and 78 deletions

View File

@@ -7,23 +7,21 @@ void SMS::schedule(gp *payload)
{
Thread thread = DramExtension::getExtension(payload).getThread();
buffer[thread].emplace_back(payload);
requestBuffers[thread].emplace_back(payload);
if (inFlightMemRequestCounter.find(thread) == inFlightMemRequestCounter.end()) {
inFlightMemRequestCounter[thread] = 0;
cacheMisses[thread] = 0;
}
inFlightMemRequestCounter[thread]++;
if (readybatches.find(thread) == readybatches.end()) {
readybatches[thread] = new ReadyBatch();
}
cacheMisses[thread]++;
newRequest.notify(SC_ZERO_TIME);
}
std::pair<Command, gp*> SMS::getNextRequest(Bank bank)
{
if (bankbuffer[bank].empty())
if (bankBuffers[bank].empty())
{
debugManager.printDebugMessage(name(),
"Get next request on bank " + to_string(bank.ID()) + " : EMPTY buffer");
@@ -31,19 +29,18 @@ std::pair<Command, gp*> SMS::getNextRequest(Bank bank)
}
else
{
gp* payload = bankbuffer[bank].front();
gp* payload = bankBuffers[bank].front();
Command command = IScheduler::getNextCommand(*payload);
if (command == Command::Read || command == Command::ReadA || command == Command::Write
|| command == Command::WriteA)
{
inFlightMemRequestCounter[DramExtension::getExtension(payload).getThread()]--;
bankbuffer[bank].pop_front();
bankBuffers[bank].pop_front();
}
debugManager.printDebugMessage(name(), "Get next request on bank " + to_string(bank.ID()));
return pair<Command, tlm::tlm_generic_payload*>(command, payload);
}
}
void SMS::batchScheduler()
@@ -52,11 +49,23 @@ void SMS::batchScheduler()
std::default_random_engine generator;
std::bernoulli_distribution distribution((double) SJFprobability / 100.0);
// wait(150, SC_NS); // Test Purpose
while (true)
{
selectRR_SJF(distribution(generator), memClk);
updateMPKCs(memClk);
if (isRequestBuffersEmpty() && !existReadyBatches()) {
wait(newRequest);
} else {
batchFormation(memClk);
if (existReadyBatches()) {
if (!isSystemLightlyLoaded() && (existLowIntensityThread() || distribution(generator))) {
selectSJF(memClk);
} else {
selectRR(memClk);
}
} else {
wait(memClk);
}
}
}
}
@@ -66,25 +75,11 @@ bool SMS::selectSJF(sc_time memClk)
std::vector<Thread> threadsWithNonEmptyReadybatches;
for (auto& readybatch : readybatches)
{
if (!readybatch.second->isEmpty())
if (!readybatch.second.empty())
{
// marked as thread with non-empty ready batch
threadsWithNonEmptyReadybatches.push_back(readybatch.first);
}
else
{
// form ready batch for this thread
Thread thread = readybatch.first;
while (!buffer[thread].empty() && readybatch.second->addTransaction(buffer[thread].front()))
{
buffer[thread].pop_front();
}
// marked as thread with non-empty ready batch
if (!readybatch.second->isEmpty())
{
threadsWithNonEmptyReadybatches.push_back(readybatch.first);
}
}
}
if (!threadsWithNonEmptyReadybatches.empty())
@@ -106,12 +101,12 @@ bool SMS::selectSJF(sc_time memClk)
"[SJF] Select ready batch of thread " + to_string(minThread.ID()));
// drain to bank buffers
std::deque<gp*> &requestPtrs = readybatches[minThread]->getTransactions();
std::deque<gp*> &requestPtrs = readybatches[minThread];
for (auto payloadPtrIterator = requestPtrs.begin(); payloadPtrIterator != requestPtrs.end();
payloadPtrIterator++)
{
Bank bank = DramExtension::getExtension(*payloadPtrIterator).getBank();
bankbuffer[bank].emplace_back(*payloadPtrIterator);
bankBuffers[bank].emplace_back(*payloadPtrIterator);
debugManager.printDebugMessage(name(),
"[SJF] Drain request in the ready batch of thread " + to_string(minThread.ID())
+ " to bankbuffer " + to_string(bank.ID()));
@@ -139,7 +134,7 @@ bool SMS::selectRR(sc_time memClk)
}
// pick the next non-empty ready batch
std::map<Thread, ReadyBatch*>::iterator nextSelectedThread;
std::map<Thread, std::deque<gp*>>::iterator nextSelectedThread;
if(lastSelectedThread == readybatches.end())
{
lastSelectedThread = readybatches.begin();
@@ -152,31 +147,20 @@ bool SMS::selectRR(sc_time memClk)
if(nextSelectedThread == readybatches.end())
nextSelectedThread = readybatches.begin();
}
std::map<Thread, ReadyBatch*>::iterator savedOriginalNextSelectedThread = nextSelectedThread;
while ((*nextSelectedThread).second->isEmpty())
std::map<Thread, std::deque<gp*>>::iterator savedOriginalNextSelectedThread = nextSelectedThread;
while ((*nextSelectedThread).second.empty())
{
// form ready batch for this thread
Thread thread = (*nextSelectedThread).first;
while (!buffer[thread].empty() && (*nextSelectedThread).second->addTransaction(buffer[thread].front()))
nextSelectedThread++;
if (nextSelectedThread == readybatches.end())
{
buffer[thread].pop_front();
nextSelectedThread = readybatches.begin();
}
if ((*nextSelectedThread).second->isEmpty())
if (nextSelectedThread == savedOriginalNextSelectedThread)
{
// cannot form ready batch then move to next thread
nextSelectedThread++;
if (nextSelectedThread == readybatches.end())
{
nextSelectedThread = readybatches.begin();
}
if (nextSelectedThread == savedOriginalNextSelectedThread)
{
// the next thread is the original thread, that mean req buffer are totally empty
// non-existed ready batch to be picked up & drained
return false;
}
// the next thread is the original thread, that mean req buffer are totally empty
// non-existed ready batch to be picked up & drained
return false;
}
}
// save last selected thread
@@ -186,12 +170,12 @@ bool SMS::selectRR(sc_time memClk)
"[RR] Select ready batch of thread " + to_string((*nextSelectedThread).first.ID()));
// drain to bank buffers
std::deque<gp*> &requestPtrs = (*nextSelectedThread).second->getTransactions();
std::deque<gp*> &requestPtrs = (*nextSelectedThread).second;
for (auto payloadPtrIterator = requestPtrs.begin(); payloadPtrIterator != requestPtrs.end();
payloadPtrIterator++)
{
Bank bank = DramExtension::getExtension(*payloadPtrIterator).getBank();
bankbuffer[bank].emplace_back(*payloadPtrIterator);
bankBuffers[bank].emplace_back(*payloadPtrIterator);
debugManager.printDebugMessage(name(),
"[RR] Drained request in the ready batch of thread " + to_string((*nextSelectedThread).first.ID())
+ " to bankbuffer " + to_string(bank.ID()));
@@ -203,24 +187,106 @@ bool SMS::selectRR(sc_time memClk)
return true;
}
void SMS::selectRR_SJF(bool isSJF, sc_time memClk) {
// pick correct policy
bool isSucessfulPick;
if (isSJF)
{
// select by Shortest Job First policy
isSucessfulPick = selectSJF(memClk);
bool SMS::isSystemLightlyLoaded() {
unsigned int totalRequest = 0;
for (auto& bankBuffer : bankBuffers) {
totalRequest += bankBuffer.second.size();
}
else
{
// select by Round Robin policy
isSucessfulPick = selectRR(memClk);
return (totalRequest <= LOW_SYSTEM_LOAD);
}
bool SMS::existLowIntensityThread() {
for (auto& mpkcPerThread : MPKCs) {
if (mpkcPerThread.second < LOW_MPKC) {
return true;
}
}
return false;
}
bool SMS::isThresholdAgeExceeded(Thread thread, sc_time memClk, std::deque<gp*>::iterator begin, std::deque<gp*>::iterator end) {
// find the oldest request in the thread's batch
sc_time oldestGenerationTime = sc_time_stamp();
for (auto reqIter = begin; reqIter != end; reqIter++) {
sc_time reqGenerationTime = GenerationExtension::getExtension(*reqIter).TimeOfGeneration();
if (reqGenerationTime < oldestGenerationTime) {
oldestGenerationTime = reqGenerationTime;
}
}
// otherwise, wait for new request
if (!isSucessfulPick)
{
wait(newRequest);
// check threshold age according to the thread's MPKC
sc_time oldestRequestAge = sc_time_stamp() - oldestGenerationTime;
if ((MPKCs[thread] <= MEDIUM_MPKC) && (oldestRequestAge > (MEDIUM_THRESHOLD_AGE * memClk))) {
return true;
} else if ((MPKCs[thread] > MEDIUM_MPKC) && (oldestRequestAge > (HIGH_THRESHOLD_AGE * memClk))) {
return true;
} else {
return false;
}
}
void SMS::updateMPKCs(sc_time memClk) {
if (sc_time_stamp() % (MPKC_RESET_CYCLE * memClk) <= memClk) {
// reset for every 10k clk cycles
for (auto& mpkc : MPKCs) {
mpkc.second = 0;
}
} else {
// update MPKC for every thread
for (auto& mpkc : MPKCs) {
mpkc.second = (cacheMisses[mpkc.first] * 1000.0 * memClk) / (sc_time_stamp());
}
}
}
bool SMS::isExceededReqBufferSize(Thread thread) {
return requestBuffers[thread].size() == REQUEST_BUFFER_SIZE;
}
void SMS::batchFormation(sc_time memClk) {
for (auto& requestBuffer : requestBuffers) {
if (!requestBuffer.second.empty() && !readybatches[requestBuffer.first].empty()) {
if (MPKCs[requestBuffer.first] < LOW_MPKC || isSystemLightlyLoaded()) {
// bypass requests by forming batch with only one request (threshold age is ZERO)
readybatches[requestBuffer.first].emplace_back(requestBuffer.second.front());
requestBuffer.second.pop_front();
} else {
// forming batch with FIFO size & threshold age constraints
auto firstDifferentRowAccessReqIter = requestBuffer.second.begin();
Row firstRow = DramExtension::getRow(*firstDifferentRowAccessReqIter);
while (firstDifferentRowAccessReqIter != requestBuffer.second.end() && DramExtension::getRow(*firstDifferentRowAccessReqIter) == firstRow) {
firstDifferentRowAccessReqIter++;
}
// deem this batch ready
if ((firstDifferentRowAccessReqIter != requestBuffer.second.end())
|| isExceededReqBufferSize(requestBuffer.first)
|| isThresholdAgeExceeded(requestBuffer.first, memClk, requestBuffer.second.begin(), firstDifferentRowAccessReqIter)) {
do {
readybatches[requestBuffer.first].emplace_back(requestBuffer.second.front());
requestBuffer.second.pop_front();
} while (requestBuffer.second.begin() != firstDifferentRowAccessReqIter);
}
}
}
}
}
bool SMS::isRequestBuffersEmpty() {
for (auto& requestBuffer : requestBuffers) {
if (!requestBuffer.second.empty()) {
return false;
}
}
return true;
}
bool SMS::existReadyBatches() {
for (auto& readybatch : readybatches) {
if (readybatch.second.empty()) {
return true;
}
}
return false;
}

View File

@@ -11,7 +11,13 @@
#include "../../common/dramExtension.h"
#include "../../common/DebugManager.h"
#define MIN_TOTAL_REQ 16
#define LOW_SYSTEM_LOAD 16
#define LOW_MPKC 1
#define MEDIUM_MPKC 10
#define MEDIUM_THRESHOLD_AGE 50
#define HIGH_THRESHOLD_AGE 200
#define MPKC_RESET_CYCLE 10000
#define REQUEST_BUFFER_SIZE 10
using namespace std;
@@ -28,10 +34,6 @@ public:
virtual ~SMS()
{
for (auto& thread_readybatch : readybatches)
{
delete thread_readybatch.second;
}
}
virtual void schedule(gp *payload) override;
@@ -40,19 +42,32 @@ public:
void batchScheduler();
private:
std::map<Thread, std::deque<gp*>> buffer;
std::map<Bank, std::deque<gp*>> bankbuffer;
std::map<Thread, ReadyBatch*> readybatches;
std::map<Thread, std::deque<gp*>> requestBuffers;
std::map<Bank, std::deque<gp*>> bankBuffers;
std::map<Thread, std::deque<gp*>> readybatches;
std::map<Thread, unsigned int> inFlightMemRequestCounter;
std::map<Thread, ReadyBatch*>::iterator lastSelectedThread;
std::map<Thread, unsigned int> cacheMisses;
std::map<Thread, float> MPKCs;
unsigned int SJFprobability;
DebugManager& debugManager;
std::map<Thread, std::deque<gp*>>::iterator lastSelectedThread;
sc_event newRequest;
DebugManager& debugManager;
bool selectSJF(sc_time memClk);
bool selectRR(sc_time memClk);
void selectRR_SJF(bool isSJF, sc_time memClk);
void batchFormation(sc_time memClk);
bool existLowIntensityThread();
bool isSystemLightlyLoaded();
bool isThresholdAgeExceeded(Thread thread, sc_time memClk, std::deque<gp*>::iterator begin, std::deque<gp*>::iterator end);
bool isExceededReqBufferSize(Thread thread);
void updateMPKCs(sc_time memClk);
bool isRequestBuffersEmpty();
bool existReadyBatches();
};
#endif // SMS_H