Merge branch 'rambus_scheduler' into DDR5.

This commit is contained in:
Lukas Steiner
2020-11-03 15:13:19 +01:00
24 changed files with 446 additions and 217 deletions

View File

@@ -97,6 +97,19 @@ void DramExtension::setExtension(tlm::tlm_generic_payload &payload,
threadPayloadID, channelPayloadID);
}
void DramExtension::setPayloadIDs(tlm::tlm_generic_payload *payload, uint64_t threadPayloadID, uint64_t channelPayloadID)
{
DramExtension *extension;
payload->get_extension(extension);
extension->threadPayloadID = threadPayloadID;
extension->channelPayloadID = channelPayloadID;
}
void DramExtension::setPayloadIDs(tlm::tlm_generic_payload &payload, uint64_t threadPayloadID, uint64_t channelPayloadID)
{
DramExtension::setPayloadIDs(&payload, threadPayloadID, channelPayloadID);
}
DramExtension &DramExtension::getExtension(const tlm_generic_payload *payload)
{
DramExtension *result = nullptr;

View File

@@ -185,6 +185,11 @@ public:
static DramExtension &getExtension(const tlm::tlm_generic_payload *payload);
static DramExtension &getExtension(const tlm::tlm_generic_payload &payload);
static void setPayloadIDs(tlm::tlm_generic_payload *payload,
uint64_t threadPayloadID, uint64_t channelPayloadID);
static void setPayloadIDs(tlm::tlm_generic_payload &payload,
uint64_t threadPayloadID, uint64_t channelPayloadID);
// Used for convience, caller could also use getExtension(..) to access these field
static Thread getThread(const tlm::tlm_generic_payload *payload);
static Thread getThread(const tlm::tlm_generic_payload &payload);

View File

@@ -138,6 +138,15 @@ void Configuration::setParameter(std::string name, nlohmann::json value)
else
SC_REPORT_FATAL("Configuration", "Unsupported response queue!");
}
else if (name == "Arbiter")
{
if (value == "Fifo")
arbiter = Arbiter::Fifo;
else if (value == "Reorder")
arbiter = Arbiter::Reorder;
else
SC_REPORT_FATAL("Configuration", "Unsupported arbiter!");
}
else if (name == "RefreshPolicy")
{
if (value == "NoRefresh")
@@ -166,6 +175,8 @@ void Configuration::setParameter(std::string name, nlohmann::json value)
}
else if (name == "PowerDownTimeout")
powerDownTimeout = value;
else if (name == "MaxActiveTransactions")
maxActiveTransactions = value;
//SimConfig------------------------------------------------
else if (name == "SimulationName")
simulationName = value;

View File

@@ -74,12 +74,14 @@ public:
enum class SchedulerBuffer {Bankwise, ReadWrite} schedulerBuffer;
enum class CmdMux {Oldest, Strict} cmdMux;
enum class RespQueue {Fifo, Reorder} respQueue;
enum class Arbiter {Fifo, Reorder} arbiter;
unsigned int requestBufferSize = 8;
enum class RefreshPolicy {NoRefresh, Rankwise, Bankwise, Groupwise} refreshPolicy;
unsigned int refreshMaxPostponed = 0;
unsigned int refreshMaxPulledin = 0;
enum class PowerDownPolicy {NoPowerDown, Staggered} powerDownPolicy;
unsigned int powerDownTimeout = 3;
unsigned int maxActiveTransactions = 64;
// SimConfig
std::string simulationName = "default";

View File

@@ -55,17 +55,17 @@ void BankMachine::updateState(Command command)
switch (command)
{
case Command::ACT:
currentState = BmState::Activated;
currentRow = DramExtension::getRow(currentPayload);
state = State::Activated;
openRow = DramExtension::getRow(currentPayload);
break;
case Command::PRE: case Command::PREA: case Command::PRESB:
currentState = BmState::Precharged;
state = State::Precharged;
break;
case Command::RD: case Command::WR:
currentPayload = nullptr;
break;
case Command::RDA: case Command::WRA:
currentState = BmState::Precharged;
state = State::Precharged;
currentPayload = nullptr;
break;
case Command::PDEA: case Command::PDEP: case Command::SREFEN:
@@ -105,12 +105,12 @@ Bank BankMachine::getBank()
Row BankMachine::getOpenRow()
{
return currentRow;
return openRow;
}
BmState BankMachine::getState()
BankMachine::State BankMachine::getState()
{
return currentState;
return state;
}
bool BankMachine::isIdle()
@@ -133,14 +133,14 @@ sc_time BankMachineOpen::start()
if (currentPayload == nullptr)
return timeToSchedule;
if (currentState == BmState::Precharged && !blocked) // row miss
if (state == State::Precharged && !blocked) // row miss
{
nextCommand = Command::ACT;
timeToSchedule = checker->timeToSatisfyConstraints(nextCommand, rank, bankgroup, bank);
}
else if (currentState == BmState::Activated)
else if (state == State::Activated)
{
if (DramExtension::getRow(currentPayload) == currentRow) // row hit
if (DramExtension::getRow(currentPayload) == openRow) // row hit
{
if (currentPayload->get_command() == TLM_READ_COMMAND)
nextCommand = Command::RD;
@@ -175,12 +175,12 @@ sc_time BankMachineClosed::start()
if (currentPayload == nullptr)
return timeToSchedule;
if (currentState == BmState::Precharged && !blocked) // row miss
if (state == State::Precharged && !blocked) // row miss
{
nextCommand = Command::ACT;
timeToSchedule = checker->timeToSatisfyConstraints(nextCommand, rank, bankgroup, bank);
}
else if (currentState == BmState::Activated)
else if (state == State::Activated)
{
if (currentPayload->get_command() == TLM_READ_COMMAND)
nextCommand = Command::RDA;
@@ -209,16 +209,16 @@ sc_time BankMachineOpenAdaptive::start()
if (currentPayload == nullptr)
return timeToSchedule;
if (currentState == BmState::Precharged && !blocked) // row miss
if (state == State::Precharged && !blocked) // row miss
{
nextCommand = Command::ACT;
timeToSchedule = checker->timeToSatisfyConstraints(nextCommand, rank, bankgroup, bank);
}
else if (currentState == BmState::Activated)
else if (state == State::Activated)
{
if (DramExtension::getRow(currentPayload) == currentRow) // row hit
if (DramExtension::getRow(currentPayload) == openRow) // row hit
{
if (scheduler->hasFurtherRequest(bank) && !scheduler->hasFurtherRowHit(bank, currentRow))
if (scheduler->hasFurtherRequest(bank) && !scheduler->hasFurtherRowHit(bank, openRow))
{
if (currentPayload->get_command() == TLM_READ_COMMAND)
nextCommand = Command::RDA;
@@ -262,16 +262,16 @@ sc_time BankMachineClosedAdaptive::start()
if (currentPayload == nullptr)
return timeToSchedule;
if (currentState == BmState::Precharged && !blocked) // row miss
if (state == State::Precharged && !blocked) // row miss
{
nextCommand = Command::ACT;
timeToSchedule = checker->timeToSatisfyConstraints(nextCommand, rank, bankgroup, bank);
}
else if (currentState == BmState::Activated)
else if (state == State::Activated)
{
if (DramExtension::getRow(currentPayload) == currentRow) // row hit
if (DramExtension::getRow(currentPayload) == openRow) // row hit
{
if (scheduler->hasFurtherRowHit(bank, currentRow))
if (scheduler->hasFurtherRowHit(bank, openRow))
{
if (currentPayload->get_command() == TLM_READ_COMMAND)
nextCommand = Command::RD;

View File

@@ -43,12 +43,6 @@
#include "scheduler/SchedulerIF.h"
#include "checker/CheckerIF.h"
enum class BmState
{
Precharged,
Activated
};
class BankMachine
{
public:
@@ -58,11 +52,13 @@ public:
void updateState(Command);
void block();
enum class State {Precharged, Activated};
Rank getRank();
BankGroup getBankGroup();
Bank getBank();
Row getOpenRow();
BmState getState();
State getState();
bool isIdle();
protected:
@@ -71,8 +67,8 @@ protected:
SchedulerIF *scheduler;
CheckerIF *checker;
Command nextCommand = Command::NOP;
BmState currentState = BmState::Precharged;
Row currentRow;
State state = State::Precharged;
Row openRow;
sc_time timeToSchedule = sc_max_time();
Rank rank = Rank(0);
BankGroup bankgroup = BankGroup(0);

View File

@@ -337,24 +337,22 @@ void Controller::controllerMethod()
tlm_sync_enum Controller::nb_transport_fw(tlm_generic_payload &trans,
tlm_phase &phase, sc_time &delay)
{
sc_time notificationDelay = delay + Configuration::getInstance().memSpec->tCK;
if (phase == BEGIN_REQ)
{
transToAcquire.payload = &trans;
transToAcquire.time = sc_time_stamp() + notificationDelay;
beginReqEvent.notify(notificationDelay);
transToAcquire.time = sc_time_stamp() + delay;
beginReqEvent.notify(delay);
}
else if (phase == END_RESP)
{
transToRelease.time = sc_time_stamp() + notificationDelay;
endRespEvent.notify(notificationDelay);
transToRelease.time = sc_time_stamp() + delay;
endRespEvent.notify(delay);
}
else
SC_REPORT_FATAL("Controller", "nb_transport_fw in controller was triggered with unknown phase");
PRINTDEBUGMESSAGE(name(), "[fw] " + getPhaseName(phase) + " notification in " +
notificationDelay.to_string());
delay.to_string());
return TLM_ACCEPTED;
}

View File

@@ -72,13 +72,13 @@ protected:
virtual void sendToFrontend(tlm::tlm_generic_payload *, tlm::tlm_phase);
virtual void sendToDram(Command, tlm::tlm_generic_payload *);
MemSpec *memSpec;
private:
unsigned totalNumberOfPayloads = 0;
std::vector<unsigned> ranksNumberOfPayloads;
ReadyCommands readyCommands;
MemSpec *memSpec;
std::vector<BankMachine *> bankMachines;
std::vector<std::vector<BankMachine *>> bankMachinesOnRank;
CmdMuxIF *cmdMux;

View File

@@ -65,7 +65,10 @@ void ControllerRecordable::sendToDram(Command command, tlm_generic_payload *payl
TimeInterval dataStrobe = Configuration::getInstance().memSpec->getIntervalOnDataStrobe(command);
tlmRecorder->updateDataStrobe(dataStrobe.start, dataStrobe.end, *payload);
}
Controller::sendToDram(command, payload);
sc_time delay = SC_ZERO_TIME;
tlm_phase phase = commandToPhase(command);
iSocket->nb_transport_fw(*payload, phase, delay);
}
void ControllerRecordable::recordPhase(tlm_generic_payload &trans, tlm_phase phase, sc_time delay)

View File

@@ -47,7 +47,7 @@ void PowerDownManagerStaggered::triggerEntry()
{
controllerIdle = true;
if (state == PdmState::Idle)
if (state == State::Idle)
entryTriggered = true;
}
@@ -57,7 +57,7 @@ void PowerDownManagerStaggered::triggerExit()
enterSelfRefresh = false;
entryTriggered = false;
if (state != PdmState::Idle)
if (state != State::Idle)
exitTriggered = true;
}
@@ -65,7 +65,7 @@ void PowerDownManagerStaggered::triggerInterruption()
{
entryTriggered = false;
if (state != PdmState::Idle)
if (state != State::Idle)
exitTriggered = true;
}
@@ -81,13 +81,13 @@ sc_time PowerDownManagerStaggered::start()
if (exitTriggered)
{
if (state == PdmState::ActivePdn)
if (state == State::ActivePdn)
nextCommand = Command::PDXA;
else if (state == PdmState::PrechargePdn)
else if (state == State::PrechargePdn)
nextCommand = Command::PDXP;
else if (state == PdmState::SelfRefresh)
else if (state == State::SelfRefresh)
nextCommand = Command::SREFEX;
else if (state == PdmState::ExtraRefresh)
else if (state == State::ExtraRefresh)
nextCommand = Command::REFA;
timeToSchedule = checker->timeToSatisfyConstraints(nextCommand, rank, BankGroup(0), Bank(0));
@@ -124,35 +124,35 @@ void PowerDownManagerStaggered::updateState(Command command)
activatedBanks = 0;
break;
case Command::PDEA:
state = PdmState::ActivePdn;
state = State::ActivePdn;
entryTriggered = false;
break;
case Command::PDEP:
state = PdmState::PrechargePdn;
state = State::PrechargePdn;
entryTriggered = false;
break;
case Command::SREFEN:
state = PdmState::SelfRefresh;
state = State::SelfRefresh;
entryTriggered = false;
enterSelfRefresh = false;
break;
case Command::PDXA:
state = PdmState::Idle;
state = State::Idle;
exitTriggered = false;
break;
case Command::PDXP:
state = PdmState::Idle;
state = State::Idle;
exitTriggered = false;
if (controllerIdle)
enterSelfRefresh = true;
break;
case Command::SREFEX:
state = PdmState::ExtraRefresh;
state = State::ExtraRefresh;
break;
case Command::REFA:
if (state == PdmState::ExtraRefresh)
if (state == State::ExtraRefresh)
{
state = PdmState::Idle;
state = State::Idle;
exitTriggered = false;
}
else if (controllerIdle)

View File

@@ -53,7 +53,7 @@ public:
virtual sc_time start() override;
private:
enum class PdmState {Idle, ActivePdn, PrechargePdn, SelfRefresh, ExtraRefresh} state = PdmState::Idle;
enum class State {Idle, ActivePdn, PrechargePdn, SelfRefresh, ExtraRefresh} state = State::Idle;
tlm::tlm_generic_payload powerDownPayload;
Rank rank;
CheckerIF *checker;

View File

@@ -81,10 +81,10 @@ sc_time RefreshManagerBankwise::start()
if (sc_time_stamp() >= timeForNextTrigger + memSpec->getRefreshIntervalPB())
{
timeForNextTrigger += memSpec->getRefreshIntervalPB();
state = RmState::Regular;
state = State::Regular;
}
if (state == RmState::Regular)
if (state == State::Regular)
{
bool forcedRefresh = (flexibilityCounter == maxPostponed);
bool allBanksBusy = true;
@@ -114,7 +114,7 @@ sc_time RefreshManagerBankwise::start()
}
else
{
if (currentBankMachine->getState() == BmState::Activated)
if (currentBankMachine->getState() == BankMachine::State::Activated)
nextCommand = Command::PRE;
else
{
@@ -149,13 +149,13 @@ sc_time RefreshManagerBankwise::start()
if (allBanksBusy)
{
state = RmState::Regular;
state = State::Regular;
timeForNextTrigger += memSpec->getRefreshIntervalPB();
return timeForNextTrigger;
}
else
{
if (currentBankMachine->getState() == BmState::Activated)
if (currentBankMachine->getState() == BankMachine::State::Activated)
nextCommand = Command::PRE;
else
nextCommand = Command::REFB;
@@ -179,20 +179,20 @@ void RefreshManagerBankwise::updateState(Command command)
if (remainingBankMachines.empty())
remainingBankMachines = allBankMachines;
if (state == RmState::Pulledin)
if (state == State::Pulledin)
flexibilityCounter--;
else
state = RmState::Pulledin;
state = State::Pulledin;
if (flexibilityCounter == maxPulledin)
{
state = RmState::Regular;
state = State::Regular;
timeForNextTrigger += memSpec->getRefreshIntervalPB();
}
break;
case Command::REFA:
// Refresh command after SREFEX
state = RmState::Regular; // TODO: check if this assignment is necessary
state = State::Regular; // TODO: check if this assignment is necessary
timeForNextTrigger = sc_time_stamp() + memSpec->getRefreshIntervalPB();
sleeping = false;
break;

View File

@@ -53,7 +53,7 @@ public:
virtual void updateState(Command) override;
private:
enum class RmState {Regular, Pulledin} state = RmState::Regular;
enum class State {Regular, Pulledin} state = State::Regular;
const MemSpec *memSpec;
std::vector<BankMachine *> &bankMachinesOnRank;
PowerDownManagerIF *powerDownManager;

View File

@@ -70,9 +70,9 @@ RefreshManagerGroupwise::RefreshManagerGroupwise(std::vector<BankMachine *> &ban
maxPulledin = -static_cast<int>(config.refreshMaxPulledin * memSpec->banksPerGroup);
}
std::tuple<Command, tlm_generic_payload *, sc_time> RefreshManagerGroupwise::getNextCommand()
CommandTuple::Type RefreshManagerGroupwise::getNextCommand()
{
return std::tuple<Command, tlm_generic_payload *, sc_time>
return CommandTuple::Type
(nextCommand, &refreshPayloads[currentIterator->front()->getBank().ID()
% memSpec->banksPerGroup], timeToSchedule);
}
@@ -91,10 +91,10 @@ sc_time RefreshManagerGroupwise::start()
if (sc_time_stamp() >= timeForNextTrigger + memSpec->getRefreshIntervalSB())
{
timeForNextTrigger += memSpec->getRefreshIntervalSB();
state = RmState::Regular;
state = State::Regular;
}
if (state == RmState::Regular)
if (state == State::Regular)
{
bool forcedRefresh = (flexibilityCounter == maxPostponed);
bool allBanksBusy = true;
@@ -133,7 +133,7 @@ sc_time RefreshManagerGroupwise::start()
nextCommand = Command::REFSB;
for (auto it : *currentIterator)
{
if (it->getState() == BmState::Activated)
if (it->getState() == BankMachine::State::Activated)
{
nextCommand = Command::PRESB;
break;
@@ -178,7 +178,7 @@ sc_time RefreshManagerGroupwise::start()
if (allBanksBusy)
{
state = RmState::Regular;
state = State::Regular;
timeForNextTrigger += memSpec->getRefreshIntervalSB();
return timeForNextTrigger;
}
@@ -187,7 +187,7 @@ sc_time RefreshManagerGroupwise::start()
nextCommand = Command::REFSB;
for (auto it : *currentIterator)
{
if (it->getState() == BmState::Activated)
if (it->getState() == BankMachine::State::Activated)
{
nextCommand = Command::PRESB;
break;
@@ -215,20 +215,20 @@ void RefreshManagerGroupwise::updateState(Command command)
remainingBankMachines = allBankMachines;
currentIterator = remainingBankMachines.begin();
if (state == RmState::Pulledin)
if (state == State::Pulledin)
flexibilityCounter--;
else
state = RmState::Pulledin;
state = State::Pulledin;
if (flexibilityCounter == maxPulledin)
{
state = RmState::Regular;
state = State::Regular;
timeForNextTrigger += memSpec->getRefreshIntervalSB();
}
break;
case Command::REFA:
// Refresh command after SREFEX
state = RmState::Regular; // TODO: check if this assignment is necessary
state = State::Regular; // TODO: check if this assignment is necessary
timeForNextTrigger = sc_time_stamp() + memSpec->getRefreshIntervalSB();
sleeping = false;
break;

View File

@@ -48,12 +48,12 @@ class RefreshManagerGroupwise final : public RefreshManagerIF
public:
RefreshManagerGroupwise(std::vector<BankMachine *> &, PowerDownManagerIF *, Rank, CheckerIF *);
virtual std::tuple<Command, tlm::tlm_generic_payload *, sc_time> getNextCommand() override;
virtual CommandTuple::Type getNextCommand() override;
virtual sc_time start() override;
virtual void updateState(Command) override;
private:
enum class RmState {Regular, Pulledin} state = RmState::Regular;
enum class State {Regular, Pulledin} state = State::Regular;
const MemSpec *memSpec;
std::vector<BankMachine *> &bankMachinesOnRank;
PowerDownManagerIF *powerDownManager;
@@ -64,15 +64,9 @@ private:
CheckerIF *checker;
Command nextCommand = Command::NOP;
//std::list<BankMachine *> remainingBankMachines;
//std::list<BankMachine *> allBankMachines;
//std::list<BankMachine *>::iterator currentIterator;
//BankMachine *currentBankMachines;
std::list<std::vector<BankMachine *>> remainingBankMachines;
std::list<std::vector<BankMachine *>> allBankMachines;
std::list<std::vector<BankMachine *>>::iterator currentIterator;
//std::vector<BankMachine *> *currentBankMachines;
int flexibilityCounter = 0;
int maxPostponed = 0;

View File

@@ -71,10 +71,10 @@ sc_time RefreshManagerRankwise::start()
if (sc_time_stamp() >= timeForNextTrigger + memSpec->getRefreshIntervalAB())
{
timeForNextTrigger += memSpec->getRefreshIntervalAB();
state = RmState::Regular;
state = State::Regular;
}
if (state == RmState::Regular)
if (state == State::Regular)
{
if (flexibilityCounter == maxPostponed) // forced refresh
{
@@ -122,7 +122,7 @@ sc_time RefreshManagerRankwise::start()
if (controllerBusy)
{
state = RmState::Regular;
state = State::Regular;
timeForNextTrigger += memSpec->getRefreshIntervalAB();
return timeForNextTrigger;
}
@@ -155,20 +155,20 @@ void RefreshManagerRankwise::updateState(Command command)
if (sleeping)
{
// Refresh command after SREFEX
state = RmState::Regular; // TODO: check if this assignment is necessary
state = State::Regular; // TODO: check if this assignment is necessary
timeForNextTrigger = sc_time_stamp() + memSpec->getRefreshIntervalAB();
sleeping = false;
}
else
{
if (state == RmState::Pulledin)
if (state == State::Pulledin)
flexibilityCounter--;
else
state = RmState::Pulledin;
state = State::Pulledin;
if (flexibilityCounter == maxPulledin)
{
state = RmState::Regular;
state = State::Regular;
timeForNextTrigger += memSpec->getRefreshIntervalAB();
}
}

View File

@@ -51,7 +51,7 @@ public:
virtual void updateState(Command) override;
private:
enum class RmState {Regular, Pulledin} state = RmState::Regular;
enum class State {Regular, Pulledin} state = State::Regular;
const MemSpec *memSpec;
std::vector<BankMachine *> &bankMachinesOnRank;
PowerDownManagerIF *powerDownManager;

View File

@@ -80,7 +80,7 @@ tlm_generic_payload *SchedulerFrFcfs::getNextRequest(BankMachine *bankMachine) c
unsigned bankID = bankMachine->getBank().ID();
if (!buffer[bankID].empty())
{
if (bankMachine->getState() == BmState::Activated)
if (bankMachine->getState() == BankMachine::State::Activated)
{
// Search for row hit
Row openRow = bankMachine->getOpenRow();

View File

@@ -81,7 +81,7 @@ tlm_generic_payload *SchedulerFrFcfsGrp::getNextRequest(BankMachine *bankMachine
unsigned bankID = bankMachine->getBank().ID();
if (!buffer[bankID].empty())
{
if (bankMachine->getState() == BmState::Activated)
if (bankMachine->getState() == BankMachine::State::Activated)
{
// Filter all row hits
Row openRow = bankMachine->getOpenRow();

View File

@@ -40,7 +40,6 @@
#include "../../common/dramExtensions.h"
#include "../../common/DebugManager.h"
enum class BmState;
class BankMachine;
class SchedulerIF

View File

@@ -33,6 +33,7 @@
* Robert Gernhardt
* Matthias Jung
* Eder F. Zulian
* Lukas Steiner
*/
#include "Arbiter.h"
@@ -42,56 +43,87 @@
using namespace tlm;
Arbiter::Arbiter(sc_module_name name, std::string pathToAddressMapping) :
sc_module(name), payloadEventQueue(this, &Arbiter::peqCallback)
sc_module(name), payloadEventQueue(this, &Arbiter::peqCallback),
maxActiveTransactions(Configuration::getInstance().maxActiveTransactions),
tCK(Configuration::getInstance().memSpec->tCK)
{
// The arbiter communicates with one or more memory unity through one or more sockets (one or more memory channels).
// Each of the arbiter's initiator sockets is bound to a memory controller's target socket.
// Anytime an transaction comes from a memory unity to the arbiter the "bw" callback is called.
iSocket.register_nb_transport_bw(this, &Arbiter::nb_transport_bw);
for (size_t i = 0; i < Configuration::getInstance().memSpec->numberOfChannels; ++i)
{
channelIsBusy.push_back(false);
pendingRequests.push_back(std::queue<tlm_generic_payload *>());
nextChannelPayloadIDToAppend.push_back(0);
}
// One or more devices can accesss all the memory units through the arbiter.
// Devices' initiator sockets are bound to arbiter's target sockets.
// As soon the arbiter receives a request in any of its target sockets it should treat and forward it to the proper memory channel.
tSocket.register_nb_transport_fw(this, &Arbiter::nb_transport_fw);
tSocket.register_transport_dbg(this, &Arbiter::transport_dbg);
addressDecoder = new AddressDecoder(pathToAddressMapping);
addressDecoder->print();
}
// Initiated by initiator side
// This function is called when an arbiter's target socket receives a transaction from a device
ArbiterFifo::ArbiterFifo(sc_module_name name, std::string pathToAddressMapping) :
Arbiter(name, pathToAddressMapping) {}
ArbiterReorder::ArbiterReorder(sc_module_name name, std::string pathToAddressMapping) :
Arbiter(name, pathToAddressMapping) {}
Arbiter::~Arbiter()
{
delete addressDecoder;
}
void Arbiter::end_of_elaboration()
{
for (unsigned i = 0; i < tSocket.size(); i++) // initiator side
{
threadIsBusy.push_back(false);
nextThreadPayloadIDToAppend.push_back(0);
activeTransactions.push_back(0);
outstandingEndReq.push_back(nullptr);
}
for (unsigned i = 0; i < iSocket.size(); i++) // channel side
{
channelIsBusy.push_back(false);
pendingRequests.push_back(std::queue<tlm_generic_payload *>());
nextChannelPayloadIDToAppend.push_back(0);
}
}
void ArbiterFifo::end_of_elaboration()
{
Arbiter::end_of_elaboration();
for (unsigned i = 0; i < tSocket.size(); i++) // initiator side
pendingResponses.push_back(std::queue<tlm_generic_payload *>());
}
void ArbiterReorder::end_of_elaboration()
{
Arbiter::end_of_elaboration();
for (unsigned i = 0; i < tSocket.size(); i++) // initiator side
{
pendingResponses.push_back(std::set<tlm_generic_payload *, ThreadPayloadIDCompare>());
nextThreadPayloadIDToReturn.push_back(0);
}
}
tlm_sync_enum Arbiter::nb_transport_fw(int id, tlm_generic_payload &payload,
tlm_phase &phase, sc_time &fwDelay)
{
sc_time notDelay = std::ceil((sc_time_stamp() + fwDelay) / Configuration::getInstance().memSpec->tCK)
* Configuration::getInstance().memSpec->tCK - sc_time_stamp();
sc_time notDelay = std::ceil((sc_time_stamp() + fwDelay) / tCK)
* tCK - sc_time_stamp();
if (phase == BEGIN_REQ)
{
// TODO: do not adjust address permanently
// adjust address offset:
payload.set_address(payload.get_address() -
Configuration::getInstance().addressOffset);
uint64_t adjustedAddress = payload.get_address() - Configuration::getInstance().addressOffset;
payload.set_address(adjustedAddress);
// In the begin request phase the socket ID is appended to the payload.
// It will extracted from the payload and used later.
appendDramExtension(id, payload, fwDelay);
DecodedAddress decodedAddress = addressDecoder->decodeAddress(adjustedAddress);
DramExtension::setExtension(payload, Thread(static_cast<unsigned int>(id)),
Channel(decodedAddress.channel), Rank(decodedAddress.rank),
BankGroup(decodedAddress.bankgroup), Bank(decodedAddress.bank),
Row(decodedAddress.row), Column(decodedAddress.column),
payload.get_streaming_width(), 0, 0);
payload.acquire();
}
else if (phase == END_RESP)
{
// TODO: why one additional cycle?
notDelay += Configuration::getInstance().memSpec->tCK;
}
PRINTDEBUGMESSAGE(name(), "[fw] " + getPhaseName(phase) + " notification in " +
notDelay.to_string());
@@ -99,8 +131,6 @@ tlm_sync_enum Arbiter::nb_transport_fw(int id, tlm_generic_payload &payload,
return TLM_ACCEPTED;
}
// Initiated by dram side
// This function is called when an arbiter's initiator socket receives a transaction from a memory controller
tlm_sync_enum Arbiter::nb_transport_bw(int, tlm_generic_payload &payload,
tlm_phase &phase, sc_time &bwDelay)
{
@@ -112,7 +142,6 @@ tlm_sync_enum Arbiter::nb_transport_bw(int, tlm_generic_payload &payload,
unsigned int Arbiter::transport_dbg(int /*id*/, tlm::tlm_generic_payload &trans)
{
// adjust address offset:
trans.set_address(trans.get_address() -
Configuration::getInstance().addressOffset);
@@ -120,92 +149,116 @@ unsigned int Arbiter::transport_dbg(int /*id*/, tlm::tlm_generic_payload &trans)
return iSocket[static_cast<int>(decodedAddress.channel)]->transport_dbg(trans);
}
void Arbiter::peqCallback(tlm_generic_payload &payload, const tlm_phase &phase)
void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &cbPhase)
{
unsigned int threadId = DramExtension::getExtension(payload).getThread().ID();
unsigned int channelId = DramExtension::getExtension(payload).getChannel().ID();
unsigned int threadId = DramExtension::getExtension(cbPayload).getThread().ID();
unsigned int channelId = DramExtension::getExtension(cbPayload).getChannel().ID();
// Phases initiated by the intiator side from arbiter's point of view (devices performing memory requests to the arbiter)
if (phase == BEGIN_REQ)
if (cbPhase == BEGIN_REQ) // from initiator
{
if (!channelIsBusy[channelId])
{
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(payload, tPhase, tDelay);
GenerationExtension::setExtension(cbPayload, sc_time_stamp());
DramExtension::setPayloadIDs(cbPayload,
nextThreadPayloadIDToAppend[threadId]++, nextChannelPayloadIDToAppend[channelId]++);
activeTransactions[threadId]++;
// This channel was available. Forward the new transaction to the memory controller.
channelIsBusy[channelId] = true;
}
else
{
// This channel is busy. Enqueue the new transaction which phase is BEGIN_REQ.
pendingRequests[channelId].push(&payload);
}
}
// Phases initiated by the target side from arbiter's point of view (memory side)
else if (phase == END_REQ)
{
// The arbiter receives a transaction which phase is END_REQ from memory controller and forwards it to the requester device.
if (activeTransactions[threadId] < maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tSocket[static_cast<int>(threadId)]->nb_transport_bw(payload, tPhase, tDelay);
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
}
else
{
outstandingEndReq[threadId] = &cbPayload;
}
// This channel is now free! Dispatch a new transaction (phase is BEGIN_REQ) from the queue, if any. Send it to the memory controller.
if (!pendingRequests[channelId].empty())
pendingRequests[channelId].push(&cbPayload);
if (!channelIsBusy[channelId])
{
// Send ONE of the enqueued new transactions (phase is BEGIN_REQ) through this channel.
tlm_generic_payload &payloadToSend = *pendingRequests[channelId].front();
channelIsBusy[channelId] = true;
tlm_generic_payload *tPayload = pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(payloadToSend, tPhase, tDelay);
sc_time tDelay = tCK;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(*tPayload, tPhase, tDelay);
}
}
else if (cbPhase == END_REQ) // from memory controller
{
if (!pendingRequests[channelId].empty())
{
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
else
{
channelIsBusy[channelId] = false;
}
}
else if (phase == BEGIN_RESP)
else if (cbPhase == BEGIN_RESP) // from memory controller
{
if (!threadIsBusy[threadId])
{
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(payload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(payload, tPhase, tDelay);
threadIsBusy[threadId] = true;
}
else
{
pendingResponses[threadId].push(&payload);
}
}
else if (phase == END_RESP)
{
// Send the END_RESP message to the memory
// TODO: use early completion
{
tlm_phase tPhase = END_RESP;
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(payload, tPhase, tDelay);
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbPayload, tPhase, tDelay);
}
// Drop one element of the queue of BEGIN_RESP from memory to this device
payload.release();
// Check if there are queued transactoins with phase BEGIN_RESP from memory to this device
if (!pendingResponses[threadId].empty())
pendingResponses[threadId].push(&cbPayload);
if (!threadIsBusy[threadId])
{
// The queue is not empty.
tlm_generic_payload &payloadToSend = *pendingResponses[threadId].front();
threadIsBusy[threadId] = true;
tlm_generic_payload *tPayload = pendingResponses[threadId].front();
pendingResponses[threadId].pop();
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(payloadToSend, tPhase, tDelay);
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(payloadToSend, tPhase, tDelay);
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
}
else if (cbPhase == END_RESP) // from initiator
{
cbPayload.release();
if (!pendingResponses[threadId].empty())
{
tlm_generic_payload *tPayload = pendingResponses[threadId].front();
pendingResponses[threadId].pop();
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
else
{
@@ -217,17 +270,131 @@ void Arbiter::peqCallback(tlm_generic_payload &payload, const tlm_phase &phase)
"Payload event queue in arbiter was triggered with unknown phase");
}
void Arbiter::appendDramExtension(int socketId, tlm_generic_payload &payload, sc_time delay)
void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &cbPhase)
{
// Set Generation Extension and DRAM Extension
GenerationExtension::setExtension(&payload, sc_time_stamp() + delay);
unsigned int threadId = DramExtension::getExtension(cbPayload).getThread().ID();
unsigned int channelId = DramExtension::getExtension(cbPayload).getChannel().ID();
unsigned int burstlength = payload.get_streaming_width();
DecodedAddress decodedAddress = addressDecoder->decodeAddress(payload.get_address());
DramExtension::setExtension(payload, Thread(static_cast<unsigned int>(socketId)),
Channel(decodedAddress.channel), Rank(decodedAddress.rank),
BankGroup(decodedAddress.bankgroup), Bank(decodedAddress.bank),
Row(decodedAddress.row), Column(decodedAddress.column),
burstlength, nextThreadPayloadIDToAppend[static_cast<unsigned int>(socketId)]++,
nextChannelPayloadIDToAppend[decodedAddress.channel]++);
if (cbPhase == BEGIN_REQ) // from initiator
{
GenerationExtension::setExtension(cbPayload, sc_time_stamp());
DramExtension::setPayloadIDs(cbPayload,
nextThreadPayloadIDToAppend[threadId]++, nextChannelPayloadIDToAppend[channelId]++);
activeTransactions[threadId]++;
if (activeTransactions[threadId] < maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
}
else
{
outstandingEndReq[threadId] = &cbPayload;
}
pendingRequests[channelId].push(&cbPayload);
if (!channelIsBusy[channelId])
{
channelIsBusy[channelId] = true;
tlm_generic_payload *tPayload = pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(*tPayload, tPhase, tDelay);
}
}
else if (cbPhase == END_REQ) // from memory controller
{
if (!pendingRequests[channelId].empty())
{
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
else
{
channelIsBusy[channelId] = false;
}
}
else if (cbPhase == BEGIN_RESP) // from memory controller
{
// TODO: use early completion
{
tlm_phase tPhase = END_RESP;
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbPayload, tPhase, tDelay);
}
pendingResponses[threadId].insert(&cbPayload);
if (!threadIsBusy[threadId])
{
tlm_generic_payload *tPayload = *pendingResponses[threadId].begin();
if (DramExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
{
nextThreadPayloadIDToReturn[threadId]++;
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
threadIsBusy[threadId] = true;
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
}
}
else if (cbPhase == END_RESP) // from initiator
{
cbPayload.release();
tlm_generic_payload *tPayload = *pendingResponses[threadId].begin();
if (!pendingResponses[threadId].empty() &&
DramExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
{
nextThreadPayloadIDToReturn[threadId]++;
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
else
{
threadIsBusy[threadId] = false;
}
}
else
SC_REPORT_FATAL(0,
"Payload event queue in arbiter was triggered with unknown phase");
}

View File

@@ -33,6 +33,7 @@
* Robert Gernhardt
* Matthias Jung
* Eder F. Zulian
* Lukas Steiner
*/
#ifndef ARBITER_H
@@ -43,7 +44,7 @@
#include <iostream>
#include <vector>
#include <queue>
#include <unordered_map>
#include <set>
#include <tlm_utils/multi_passthrough_target_socket.h>
#include <tlm_utils/multi_passthrough_initiator_socket.h>
#include <tlm_utils/peq_with_cb_and_phase.h>
@@ -56,41 +57,75 @@ public:
tlm_utils::multi_passthrough_initiator_socket<Arbiter> iSocket;
tlm_utils::multi_passthrough_target_socket<Arbiter> tSocket;
virtual ~Arbiter() override;
protected:
Arbiter(sc_module_name, std::string);
SC_HAS_PROCESS(Arbiter);
private:
virtual void end_of_elaboration() override;
AddressDecoder *addressDecoder;
tlm_utils::peq_with_cb_and_phase<Arbiter> payloadEventQueue;
virtual void peqCallback(tlm::tlm_generic_payload &payload, const tlm::tlm_phase &phase) = 0;
std::vector<bool> threadIsBusy;
std::vector<bool> channelIsBusy;
// used to account for the request_accept_delay in the dram controllers
// This is a queue of new transactions. The phase of a new request is BEGIN_REQ.
std::vector<std::queue<tlm::tlm_generic_payload *>> pendingRequests;
// used to account for the response_accept_delay in the initiators (traceplayer, core etc.)
// This is a queue of responses comming from the memory side. The phase of these transactions is BEGIN_RESP.
std::unordered_map<unsigned int, std::queue<tlm::tlm_generic_payload *>> pendingResponses;
std::unordered_map<unsigned int, bool> threadIsBusy;
// Initiated by initiator side
// This function is called when an arbiter's target socket receives a transaction from a device
std::vector<uint64_t> nextThreadPayloadIDToAppend;
std::vector<uint64_t> nextChannelPayloadIDToAppend;
std::vector<unsigned int> activeTransactions;
const unsigned maxActiveTransactions;
std::vector<tlm::tlm_generic_payload *> outstandingEndReq;
tlm::tlm_sync_enum nb_transport_fw(int id, tlm::tlm_generic_payload &payload,
tlm::tlm_phase &phase, sc_time &fwDelay);
// Initiated by dram side
// This function is called when an arbiter's initiator socket receives a transaction from a memory controller
tlm::tlm_sync_enum nb_transport_bw(int, tlm::tlm_generic_payload &payload,
tlm::tlm_phase &phase, sc_time &bwDelay);
unsigned int transport_dbg(int /*id*/, tlm::tlm_generic_payload &trans);
void peqCallback(tlm::tlm_generic_payload &payload, const tlm::tlm_phase &phase);
sc_time tCK;
};
void appendDramExtension(int socketId, tlm::tlm_generic_payload &payload, sc_time delay);
std::vector<uint64_t> nextChannelPayloadIDToAppend;
std::unordered_map<unsigned int, uint64_t> nextThreadPayloadIDToAppend;
class ArbiterFifo final : public Arbiter
{
public:
ArbiterFifo(sc_module_name, std::string);
SC_HAS_PROCESS(ArbiterFifo);
virtual ~ArbiterFifo() override {}
private:
virtual void end_of_elaboration() override;
virtual void peqCallback(tlm::tlm_generic_payload &payload, const tlm::tlm_phase &phase) override;
std::vector<std::queue<tlm::tlm_generic_payload *>> pendingResponses;
};
class ArbiterReorder final : public Arbiter
{
public:
ArbiterReorder(sc_module_name, std::string);
SC_HAS_PROCESS(ArbiterReorder);
virtual ~ArbiterReorder() override {}
private:
virtual void end_of_elaboration() override;
virtual void peqCallback(tlm::tlm_generic_payload &payload, const tlm::tlm_phase &phase) override;
struct ThreadPayloadIDCompare
{
bool operator() (const tlm::tlm_generic_payload *lhs, const tlm::tlm_generic_payload *rhs) const
{
return DramExtension::getThreadPayloadID(lhs) < DramExtension::getThreadPayloadID(rhs);
}
};
std::vector<std::set<tlm::tlm_generic_payload*, ThreadPayloadIDCompare>> pendingResponses;
std::vector<uint64_t> nextThreadPayloadIDToReturn;
};
#endif // ARBITER_H

View File

@@ -196,7 +196,10 @@ void DRAMSys::instantiateModules(const std::string &pathToResources,
config.pECC = ecc;
// Create arbiter
arbiter = new Arbiter("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
if (config.arbiter == Configuration::Arbiter::Fifo)
arbiter = new ArbiterFifo("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
else if (config.arbiter == Configuration::Arbiter::Reorder)
arbiter = new ArbiterReorder("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
// Create controllers and DRAMs
MemSpec::MemoryType memoryType = config.memSpec->memoryType;

View File

@@ -137,7 +137,10 @@ void DRAMSysRecordable::instantiateModules(const std::string &traceName,
config.pECC = ecc;
// Create arbiter
arbiter = new Arbiter("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
if (config.arbiter == Configuration::Arbiter::Fifo)
arbiter = new ArbiterFifo("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
else if (config.arbiter == Configuration::Arbiter::Reorder)
arbiter = new ArbiterReorder("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
// Create controllers and DRAMs
MemSpec::MemoryType memoryType = config.memSpec->memoryType;