Add reorder arbiter.

This commit is contained in:
Lukas Steiner
2020-10-28 11:18:13 +01:00
parent fe1d8eafdd
commit d2878c62f2
6 changed files with 261 additions and 69 deletions

View File

@@ -137,6 +137,15 @@ void Configuration::setParameter(std::string name, nlohmann::json value)
else
SC_REPORT_FATAL("Configuration", "Unsupported response queue!");
}
else if (name == "Arbiter")
{
if (value == "Fifo")
arbiter = Arbiter::Fifo;
else if (value == "Reorder")
arbiter = Arbiter::Reorder;
else
SC_REPORT_FATAL("Configuration", "Unsupported arbiter!");
}
else if (name == "RefreshPolicy")
{
if (value == "NoRefresh")

View File

@@ -74,6 +74,7 @@ public:
enum class SchedulerBuffer {Bankwise, ReadWrite} schedulerBuffer;
enum class CmdMux {Oldest, Strict} cmdMux;
enum class RespQueue {Fifo, Reorder} respQueue;
enum class Arbiter {Fifo, Reorder} arbiter;
unsigned int requestBufferSize = 8;
enum class RefreshPolicy {NoRefresh, Rankwise, Bankwise} refreshPolicy;
unsigned int refreshMaxPostponed = 0;

View File

@@ -33,6 +33,7 @@
* Robert Gernhardt
* Matthias Jung
* Eder F. Zulian
* Lukas Steiner
*/
#include "Arbiter.h"
@@ -54,6 +55,12 @@ Arbiter::Arbiter(sc_module_name name, std::string pathToAddressMapping) :
addressDecoder->print();
}
ArbiterFifo::ArbiterFifo(sc_module_name name, std::string pathToAddressMapping) :
Arbiter(name, pathToAddressMapping) {}
ArbiterReorder::ArbiterReorder(sc_module_name name, std::string pathToAddressMapping) :
Arbiter(name, pathToAddressMapping) {}
Arbiter::~Arbiter()
{
delete addressDecoder;
@@ -64,7 +71,6 @@ void Arbiter::end_of_elaboration()
for (unsigned i = 0; i < tSocket.size(); i++) // initiator side
{
threadIsBusy.push_back(false);
pendingResponses.push_back(std::queue<tlm_generic_payload *>());
nextThreadPayloadIDToAppend.push_back(0);
activeTransactions.push_back(0);
outstandingEndReq.push_back(nullptr);
@@ -78,6 +84,40 @@ void Arbiter::end_of_elaboration()
}
}
void ArbiterFifo::end_of_elaboration()
{
Arbiter::end_of_elaboration();
for (unsigned i = 0; i < tSocket.size(); i++) // initiator side
pendingResponses.push_back(std::queue<tlm_generic_payload *>());
}
void ArbiterReorder::end_of_elaboration()
{
Arbiter::end_of_elaboration();
for (unsigned i = 0; i < tSocket.size(); i++) // initiator side
{
pendingResponses.push_back(std::set<tlm_generic_payload *, ThreadPayloadIDCompare>());
nextThreadPayloadIDToReturn.push_back(0);
}
}
void Arbiter::appendDramExtension(int socketId, tlm_generic_payload &payload, sc_time delay)
{
// Set Generation Extension and DRAM Extension
GenerationExtension::setExtension(&payload, sc_time_stamp() + delay);
unsigned int burstlength = payload.get_streaming_width();
DecodedAddress decodedAddress = addressDecoder->decodeAddress(payload.get_address());
DramExtension::setExtension(payload, Thread(static_cast<unsigned int>(socketId)),
Channel(decodedAddress.channel), Rank(decodedAddress.rank),
BankGroup(decodedAddress.bankgroup), Bank(decodedAddress.bank),
Row(decodedAddress.row), Column(decodedAddress.column),
burstlength, nextThreadPayloadIDToAppend[static_cast<unsigned int>(socketId)]++,
nextChannelPayloadIDToAppend[decodedAddress.channel]++);
}
tlm_sync_enum Arbiter::nb_transport_fw(int id, tlm_generic_payload &payload,
tlm_phase &phase, sc_time &fwDelay)
{
@@ -119,12 +159,12 @@ unsigned int Arbiter::transport_dbg(int /*id*/, tlm::tlm_generic_payload &trans)
return iSocket[static_cast<int>(decodedAddress.channel)]->transport_dbg(trans);
}
void Arbiter::peqCallback(tlm_generic_payload &payload, const tlm_phase &phase)
void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &cbPhase)
{
unsigned int threadId = DramExtension::getExtension(payload).getThread().ID();
unsigned int channelId = DramExtension::getExtension(payload).getChannel().ID();
unsigned int threadId = DramExtension::getExtension(cbPayload).getThread().ID();
unsigned int channelId = DramExtension::getExtension(cbPayload).getChannel().ID();
if (phase == BEGIN_REQ) // from initiator
if (cbPhase == BEGIN_REQ) // from initiator
{
activeTransactions[threadId]++;
@@ -132,38 +172,28 @@ void Arbiter::peqCallback(tlm_generic_payload &payload, const tlm_phase &phase)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tSocket[static_cast<int>(threadId)]->nb_transport_bw(payload, tPhase, tDelay);
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
}
else
{
outstandingEndReq[threadId] = &payload;
outstandingEndReq[threadId] = &cbPayload;
}
pendingRequests[channelId].push(&cbPayload);
if (!channelIsBusy[channelId])
{
channelIsBusy[channelId] = true;
tlm_generic_payload *tPayload = pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK;
if (pendingRequests[channelId].empty())
{
iSocket[static_cast<int>(channelId)]->nb_transport_fw(payload, tPhase, tDelay);
}
else
{
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
pendingRequests[channelId].push(&payload);
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
}
else
{
pendingRequests[channelId].push(&payload);
iSocket[static_cast<int>(channelId)]->nb_transport_fw(*tPayload, tPhase, tDelay);
}
}
else if (phase == END_REQ) // from memory controller
else if (cbPhase == END_REQ) // from memory controller
{
if (!pendingRequests[channelId].empty())
{
@@ -178,66 +208,64 @@ void Arbiter::peqCallback(tlm_generic_payload &payload, const tlm_phase &phase)
channelIsBusy[channelId] = false;
}
}
else if (phase == BEGIN_RESP) // from memory controller
else if (cbPhase == BEGIN_RESP) // from memory controller
{
// TODO: use early completion
{
tlm_phase tPhase = END_RESP;
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(payload, tPhase, tDelay);
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbPayload, tPhase, tDelay);
}
pendingResponses[threadId].push(&cbPayload);
if (!threadIsBusy[threadId])
{
threadIsBusy[threadId] = true;
tlm_generic_payload *tPayload = pendingResponses[threadId].front();
pendingResponses[threadId].pop();
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
if (pendingResponses[threadId].empty())
{
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(payload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(payload, tPhase, tDelay);
}
else
{
tlm_generic_payload &tPayload = *pendingResponses[threadId].front();
pendingResponses[threadId].pop();
pendingResponses[threadId].push(&payload);
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tlm_generic_payload &tPayload = *outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
else
{
pendingResponses[threadId].push(&payload);
}
}
else if (phase == END_RESP) // from initiator
else if (cbPhase == END_RESP) // from initiator
{
payload.release();
cbPayload.release();
if (!pendingResponses[threadId].empty())
{
tlm_generic_payload &tPayload = *pendingResponses[threadId].front();
tlm_generic_payload *tPayload = pendingResponses[threadId].front();
pendingResponses[threadId].pop();
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
else
{
@@ -249,17 +277,128 @@ void Arbiter::peqCallback(tlm_generic_payload &payload, const tlm_phase &phase)
"Payload event queue in arbiter was triggered with unknown phase");
}
void Arbiter::appendDramExtension(int socketId, tlm_generic_payload &payload, sc_time delay)
void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &cbPhase)
{
// Set Generation Extension and DRAM Extension
GenerationExtension::setExtension(&payload, sc_time_stamp() + delay);
unsigned int threadId = DramExtension::getExtension(cbPayload).getThread().ID();
unsigned int channelId = DramExtension::getExtension(cbPayload).getChannel().ID();
unsigned int burstlength = payload.get_streaming_width();
DecodedAddress decodedAddress = addressDecoder->decodeAddress(payload.get_address());
DramExtension::setExtension(payload, Thread(static_cast<unsigned int>(socketId)),
Channel(decodedAddress.channel), Rank(decodedAddress.rank),
BankGroup(decodedAddress.bankgroup), Bank(decodedAddress.bank),
Row(decodedAddress.row), Column(decodedAddress.column),
burstlength, nextThreadPayloadIDToAppend[static_cast<unsigned int>(socketId)]++,
nextChannelPayloadIDToAppend[decodedAddress.channel]++);
if (cbPhase == BEGIN_REQ) // from initiator
{
activeTransactions[threadId]++;
if (activeTransactions[threadId] < maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
}
else
{
outstandingEndReq[threadId] = &cbPayload;
}
pendingRequests[channelId].push(&cbPayload);
if (!channelIsBusy[channelId])
{
channelIsBusy[channelId] = true;
tlm_generic_payload *tPayload = pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(*tPayload, tPhase, tDelay);
}
}
else if (cbPhase == END_REQ) // from memory controller
{
if (!pendingRequests[channelId].empty())
{
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
else
{
channelIsBusy[channelId] = false;
}
}
else if (cbPhase == BEGIN_RESP) // from memory controller
{
// TODO: use early completion
{
tlm_phase tPhase = END_RESP;
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbPayload, tPhase, tDelay);
}
pendingResponses[threadId].insert(&cbPayload);
if (!threadIsBusy[threadId])
{
tlm_generic_payload *tPayload = *pendingResponses[threadId].begin();
if (DramExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
{
nextThreadPayloadIDToReturn[threadId]++;
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
threadIsBusy[threadId] = true;
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
}
}
else if (cbPhase == END_RESP) // from initiator
{
cbPayload.release();
tlm_generic_payload *tPayload = *pendingResponses[threadId].begin();
if (!pendingResponses[threadId].empty() &&
DramExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
{
nextThreadPayloadIDToReturn[threadId]++;
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
else
{
threadIsBusy[threadId] = false;
}
}
else
SC_REPORT_FATAL(0,
"Payload event queue in arbiter was triggered with unknown phase");
}

View File

@@ -33,6 +33,7 @@
* Robert Gernhardt
* Matthias Jung
* Eder F. Zulian
* Lukas Steiner
*/
#ifndef ARBITER_H
@@ -43,6 +44,7 @@
#include <iostream>
#include <vector>
#include <queue>
#include <set>
#include <tlm_utils/multi_passthrough_target_socket.h>
#include <tlm_utils/multi_passthrough_initiator_socket.h>
#include <tlm_utils/peq_with_cb_and_phase.h>
@@ -55,23 +57,22 @@ public:
tlm_utils::multi_passthrough_initiator_socket<Arbiter> iSocket;
tlm_utils::multi_passthrough_target_socket<Arbiter> tSocket;
virtual ~Arbiter() override;
protected:
Arbiter(sc_module_name, std::string);
SC_HAS_PROCESS(Arbiter);
virtual ~Arbiter() override;
private:
virtual void end_of_elaboration() override;
AddressDecoder *addressDecoder;
tlm_utils::peq_with_cb_and_phase<Arbiter> payloadEventQueue;
void peqCallback(tlm::tlm_generic_payload &payload, const tlm::tlm_phase &phase);
virtual void peqCallback(tlm::tlm_generic_payload &payload, const tlm::tlm_phase &phase) = 0;
std::vector<bool> threadIsBusy;
std::vector<bool> channelIsBusy;
std::vector<std::queue<tlm::tlm_generic_payload *>> pendingResponses;
std::vector<std::queue<tlm::tlm_generic_payload *>> pendingRequests;
std::vector<uint64_t> nextThreadPayloadIDToAppend;
@@ -93,4 +94,40 @@ private:
sc_time tCK;
};
class ArbiterFifo final : public Arbiter
{
public:
ArbiterFifo(sc_module_name, std::string);
SC_HAS_PROCESS(ArbiterFifo);
virtual ~ArbiterFifo() override {}
private:
virtual void end_of_elaboration() override;
virtual void peqCallback(tlm::tlm_generic_payload &payload, const tlm::tlm_phase &phase) override;
std::vector<std::queue<tlm::tlm_generic_payload *>> pendingResponses;
};
class ArbiterReorder final : public Arbiter
{
public:
ArbiterReorder(sc_module_name, std::string);
SC_HAS_PROCESS(ArbiterReorder);
virtual ~ArbiterReorder() override {}
private:
virtual void end_of_elaboration() override;
virtual void peqCallback(tlm::tlm_generic_payload &payload, const tlm::tlm_phase &phase) override;
struct ThreadPayloadIDCompare
{
bool operator() (const tlm::tlm_generic_payload *lhs, const tlm::tlm_generic_payload *rhs) const
{
return DramExtension::getThreadPayloadID(lhs) < DramExtension::getThreadPayloadID(rhs);
}
};
std::vector<std::set<tlm::tlm_generic_payload*, ThreadPayloadIDCompare>> pendingResponses;
std::vector<uint64_t> nextThreadPayloadIDToReturn;
};
#endif // ARBITER_H

View File

@@ -195,7 +195,10 @@ void DRAMSys::instantiateModules(const std::string &pathToResources,
config.pECC = ecc;
// Create arbiter
arbiter = new Arbiter("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
if (config.arbiter == Configuration::Arbiter::Fifo)
arbiter = new ArbiterFifo("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
else if (config.arbiter == Configuration::Arbiter::Reorder)
arbiter = new ArbiterReorder("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
// Create controllers and DRAMs
MemSpec::MemoryType memoryType = config.memSpec->memoryType;

View File

@@ -136,7 +136,10 @@ void DRAMSysRecordable::instantiateModules(const std::string &traceName,
config.pECC = ecc;
// Create arbiter
arbiter = new Arbiter("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
if (config.arbiter == Configuration::Arbiter::Fifo)
arbiter = new ArbiterFifo("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
else if (config.arbiter == Configuration::Arbiter::Reorder)
arbiter = new ArbiterReorder("arbiter", pathToResources + "configs/amconfigs/" + amconfig);
// Create controllers and DRAMs
MemSpec::MemoryType memoryType = config.memSpec->memoryType;