Use scoped enums for DRAM types.
This commit is contained in:
@@ -126,13 +126,15 @@ CheckerDDR5::CheckerDDR5(const Configuration& config)
|
||||
|
||||
sc_time CheckerDDR5::timeToSatisfyConstraints(Command command, const tlm_generic_payload& payload) const
|
||||
{
|
||||
LogicalRank logicalRank(ControllerExtension::getRank(payload).ID());
|
||||
PhysicalRank physicalRank(logicalRank.ID() / memSpec->logicalRanksPerPhysicalRank);
|
||||
DimmRank dimmRank(physicalRank.ID() / memSpec->physicalRanksPerDimmRank);
|
||||
LogicalRank logicalRank = LogicalRank(ControllerExtension::getRank(payload));
|
||||
PhysicalRank physicalRank = PhysicalRank(
|
||||
static_cast<std::size_t>(logicalRank) / memSpec->logicalRanksPerPhysicalRank);
|
||||
DimmRank dimmRank = DimmRank(static_cast<std::size_t>(physicalRank) / memSpec->physicalRanksPerDimmRank);
|
||||
BankGroup bankGroup = ControllerExtension::getBankGroup(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
Bank bankInGroup = Bank(logicalRank.ID() * memSpec->banksPerGroup + bank.ID() % memSpec->banksPerGroup);
|
||||
|
||||
Bank bankInGroup = Bank(static_cast<std::size_t>(logicalRank) * memSpec->banksPerGroup
|
||||
+ static_cast<std::size_t>(bank) % memSpec->banksPerGroup);
|
||||
|
||||
sc_time lastCommandStart;
|
||||
sc_time earliestTimeToStart = sc_time_stamp();
|
||||
|
||||
@@ -938,15 +940,17 @@ sc_time CheckerDDR5::timeToSatisfyConstraints(Command command, const tlm_generic
|
||||
|
||||
void CheckerDDR5::insert(Command command, const tlm_generic_payload& payload)
|
||||
{
|
||||
LogicalRank logicalRank(ControllerExtension::getRank(payload).ID());
|
||||
PhysicalRank physicalRank(logicalRank.ID() / memSpec->logicalRanksPerPhysicalRank);
|
||||
DimmRank dimmRank(physicalRank.ID() / memSpec->physicalRanksPerDimmRank);
|
||||
LogicalRank logicalRank = LogicalRank(ControllerExtension::getRank(payload));
|
||||
PhysicalRank physicalRank = PhysicalRank(
|
||||
static_cast<std::size_t>(logicalRank) / memSpec->logicalRanksPerPhysicalRank);
|
||||
DimmRank dimmRank = DimmRank(static_cast<std::size_t>(physicalRank) / memSpec->physicalRanksPerDimmRank);
|
||||
BankGroup bankGroup = ControllerExtension::getBankGroup(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
Bank bankInGroup = Bank(logicalRank.ID() * memSpec->banksPerGroup + bank.ID() % memSpec->banksPerGroup);
|
||||
Bank bankInGroup = Bank(static_cast<std::size_t>(logicalRank) * memSpec->banksPerGroup
|
||||
+ static_cast<std::size_t>(bank) % memSpec->banksPerGroup);
|
||||
unsigned burstLength = ControllerExtension::getBurstLength(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerDDR5", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerDDR5", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndDimmRank[command][dimmRank] = sc_time_stamp();
|
||||
|
||||
@@ -632,7 +632,7 @@ sc_time CheckerLPDDR5::timeToSatisfyConstraints(Command command, const tlm_gener
|
||||
}
|
||||
else if (command == Command::REFP2B)
|
||||
{
|
||||
Bank secondBank = Bank(bank.ID() + memSpec->getPer2BankOffset());
|
||||
Bank secondBank = Bank(static_cast<std::size_t>(bank) + memSpec->getPer2BankOffset());
|
||||
|
||||
lastCommandStart = lastScheduledByCommandAndBank[Command::ACT][bank];
|
||||
if (lastCommandStart != scMaxTime)
|
||||
@@ -730,7 +730,7 @@ void CheckerLPDDR5::insert(Command command, const tlm_generic_payload& payload)
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
unsigned burstLength = ControllerExtension::getBurstLength(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerLPDDR5", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerLPDDR5", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndRank[command][rank] = sc_time_stamp();
|
||||
@@ -748,7 +748,7 @@ void CheckerLPDDR5::insert(Command command, const tlm_generic_payload& payload)
|
||||
|
||||
if (command == Command::REFP2B)
|
||||
{
|
||||
Bank secondBank(bank.ID() + memSpec->getPer2BankOffset());
|
||||
Bank secondBank = Bank(static_cast<std::size_t>(bank) + memSpec->getPer2BankOffset());
|
||||
lastScheduledByCommandAndBank[command][secondBank] = sc_time_stamp();
|
||||
}
|
||||
|
||||
|
||||
@@ -450,9 +450,9 @@ void TlmRecorder::insertTransactionInDB(const Transaction &recordingData)
|
||||
sqlite3_bind_int64(insertTransactionStatement, 3, static_cast<int64_t>(recordingData.address));
|
||||
sqlite3_bind_int(insertTransactionStatement, 4, static_cast<int>(recordingData.dataLength));
|
||||
sqlite3_bind_int(insertTransactionStatement, 5,
|
||||
static_cast<int>(recordingData.thread.ID()));
|
||||
static_cast<int>(recordingData.thread));
|
||||
sqlite3_bind_int(insertTransactionStatement, 6,
|
||||
static_cast<int>(recordingData.channel.ID()));
|
||||
static_cast<int>(recordingData.channel));
|
||||
sqlite3_bind_int64(insertTransactionStatement, 7,
|
||||
static_cast<int64_t>(recordingData.timeOfGeneration.value()));
|
||||
sqlite3_bind_text(insertTransactionStatement, 8,
|
||||
@@ -477,11 +477,11 @@ void TlmRecorder::insertPhaseInDB(const Transaction::Phase& phase, uint64_t tran
|
||||
sqlite3_bind_int64(insertPhaseStatement, 3, static_cast<int64_t>(phase.interval.end.value()));
|
||||
sqlite3_bind_int64(insertPhaseStatement, 4, static_cast<int64_t>(phase.intervalOnDataStrobe.start.value()));
|
||||
sqlite3_bind_int64(insertPhaseStatement, 5, static_cast<int64_t>(phase.intervalOnDataStrobe.end.value()));
|
||||
sqlite3_bind_int(insertPhaseStatement, 6, static_cast<int>(phase.rank.ID()));
|
||||
sqlite3_bind_int(insertPhaseStatement, 7, static_cast<int>(phase.bankGroup.ID()));
|
||||
sqlite3_bind_int(insertPhaseStatement, 8, static_cast<int>(phase.bank.ID()));
|
||||
sqlite3_bind_int(insertPhaseStatement, 9, static_cast<int>(phase.row.ID()));
|
||||
sqlite3_bind_int(insertPhaseStatement, 10, static_cast<int>(phase.column.ID()));
|
||||
sqlite3_bind_int(insertPhaseStatement, 6, static_cast<int>(phase.rank));
|
||||
sqlite3_bind_int(insertPhaseStatement, 7, static_cast<int>(phase.bankGroup));
|
||||
sqlite3_bind_int(insertPhaseStatement, 8, static_cast<int>(phase.bank));
|
||||
sqlite3_bind_int(insertPhaseStatement, 9, static_cast<int>(phase.row));
|
||||
sqlite3_bind_int(insertPhaseStatement, 10, static_cast<int>(phase.column));
|
||||
sqlite3_bind_int(insertPhaseStatement, 11, static_cast<int>(phase.burstLength));
|
||||
sqlite3_bind_int64(insertPhaseStatement, 12, static_cast<int64_t>(transactionID));
|
||||
executeSqlStatement(insertPhaseStatement);
|
||||
|
||||
@@ -272,94 +272,6 @@ unsigned ControllerExtension::getBurstLength(const tlm::tlm_generic_payload& tra
|
||||
return trans.get_extension<ControllerExtension>()->burstLength;
|
||||
}
|
||||
|
||||
//THREAD
|
||||
bool operator ==(const Thread &lhs, const Thread &rhs)
|
||||
{
|
||||
return lhs.ID() == rhs.ID();
|
||||
}
|
||||
|
||||
bool operator !=(const Thread &lhs, const Thread &rhs)
|
||||
{
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
bool operator <(const Thread &lhs, const Thread &rhs)
|
||||
{
|
||||
return lhs.ID() < rhs.ID();
|
||||
}
|
||||
|
||||
//CHANNEL
|
||||
bool operator ==(const Channel &lhs, const Channel &rhs)
|
||||
{
|
||||
return lhs.ID() == rhs.ID();
|
||||
}
|
||||
|
||||
bool operator !=(const Channel &lhs, const Channel &rhs)
|
||||
{
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
//RANK
|
||||
bool operator ==(const Rank &lhs, const Rank &rhs)
|
||||
{
|
||||
return lhs.ID() == rhs.ID();
|
||||
}
|
||||
|
||||
bool operator !=(const Rank &lhs, const Rank &rhs)
|
||||
{
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
//BANKGROUP
|
||||
bool operator ==(const BankGroup &lhs, const BankGroup &rhs)
|
||||
{
|
||||
return lhs.ID() == rhs.ID();
|
||||
}
|
||||
|
||||
bool operator !=(const BankGroup &lhs, const BankGroup &rhs)
|
||||
{
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
//BANK
|
||||
bool operator ==(const Bank &lhs, const Bank &rhs)
|
||||
{
|
||||
return lhs.ID() == rhs.ID();
|
||||
}
|
||||
|
||||
bool operator !=(const Bank &lhs, const Bank &rhs)
|
||||
{
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
bool operator <(const Bank &lhs, const Bank &rhs)
|
||||
{
|
||||
return lhs.ID() < rhs.ID();
|
||||
}
|
||||
|
||||
//ROW
|
||||
bool operator ==(const Row &lhs, const Row &rhs)
|
||||
{
|
||||
return lhs.ID() == rhs.ID();
|
||||
}
|
||||
|
||||
bool operator !=(const Row &lhs, const Row &rhs)
|
||||
{
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
//COLUMN
|
||||
bool operator ==(const Column &lhs, const Column &rhs)
|
||||
{
|
||||
return lhs.ID() == rhs.ID();
|
||||
}
|
||||
|
||||
bool operator !=(const Column &lhs, const Column &rhs)
|
||||
{
|
||||
return !(lhs == rhs);
|
||||
}
|
||||
|
||||
|
||||
tlm::tlm_extension_base* ChildExtension::clone() const
|
||||
{
|
||||
return new ChildExtension(*parentTrans);
|
||||
|
||||
@@ -46,167 +46,35 @@
|
||||
namespace DRAMSys
|
||||
{
|
||||
|
||||
class Thread
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit Thread(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
class Channel
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit Channel(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
class Rank
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit Rank(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
class LogicalRank
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit LogicalRank(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class PhysicalRank
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit PhysicalRank(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
class DimmRank
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit DimmRank(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
class BankGroup
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit BankGroup(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
class Bank
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit Bank(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
class Row
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit Row(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
|
||||
class Column
|
||||
{
|
||||
private:
|
||||
std::size_t id;
|
||||
|
||||
public:
|
||||
explicit Column(std::size_t id) : id(id) {}
|
||||
|
||||
std::size_t ID() const
|
||||
{
|
||||
return id;
|
||||
}
|
||||
};
|
||||
enum class Thread : std::size_t;
|
||||
enum class Channel : std::size_t;
|
||||
enum class Rank : std::size_t;
|
||||
enum class LogicalRank : std::size_t;
|
||||
enum class PhysicalRank : std::size_t;
|
||||
enum class DimmRank : std::size_t;
|
||||
enum class BankGroup : std::size_t;
|
||||
enum class Bank : std::size_t;
|
||||
enum class Row : std::size_t;
|
||||
enum class Column : std::size_t;
|
||||
|
||||
template<typename IndexType, typename ValueType>
|
||||
class ControllerVector : private std::vector<ValueType>
|
||||
{
|
||||
public:
|
||||
ControllerVector(std::size_t size, const ValueType& value) : std::vector<ValueType>(size, value) {}
|
||||
explicit ControllerVector(std::size_t size) : std::vector<ValueType>(size) {}
|
||||
ControllerVector() : std::vector<ValueType>() {}
|
||||
|
||||
using std::vector<ValueType>::vector;
|
||||
using std::vector<ValueType>::push_back;
|
||||
using std::vector<ValueType>::begin;
|
||||
using std::vector<ValueType>::end;
|
||||
using std::vector<ValueType>::front;
|
||||
|
||||
typename std::vector<ValueType>::const_reference operator[](IndexType index) const
|
||||
{
|
||||
return std::vector<ValueType>::operator[](index.ID());
|
||||
return std::vector<ValueType>::operator[](static_cast<std::size_t>(index));
|
||||
}
|
||||
|
||||
typename std::vector<ValueType>::reference operator[](IndexType index)
|
||||
{
|
||||
return std::vector<ValueType>::operator[](index.ID());
|
||||
return std::vector<ValueType>::operator[](static_cast<std::size_t>(index));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -284,30 +152,6 @@ private:
|
||||
unsigned burstLength;
|
||||
};
|
||||
|
||||
|
||||
bool operator==(const Thread &lhs, const Thread &rhs);
|
||||
bool operator!=(const Thread &lhs, const Thread &rhs);
|
||||
bool operator<(const Thread &lhs, const Thread &rhs);
|
||||
|
||||
bool operator==(const Channel &lhs, const Channel &rhs);
|
||||
bool operator!=(const Channel &lhs, const Channel &rhs);
|
||||
|
||||
bool operator==(const Rank &lhs, const Rank &rhs);
|
||||
bool operator!=(const Rank &lhs, const Rank &rhs);
|
||||
|
||||
bool operator==(const BankGroup &lhs, const BankGroup &rhs);
|
||||
bool operator!=(const BankGroup &lhs, const BankGroup &rhs);
|
||||
|
||||
bool operator==(const Bank &lhs, const Bank &rhs);
|
||||
bool operator!=(const Bank &lhs, const Bank &rhs);
|
||||
bool operator<(const Bank &lhs, const Bank &rhs);
|
||||
|
||||
bool operator==(const Row &lhs, const Row &rhs);
|
||||
bool operator!=(const Row &lhs, const Row &rhs);
|
||||
|
||||
bool operator==(const Column &lhs, const Column &rhs);
|
||||
bool operator!=(const Column &lhs, const Column &rhs);
|
||||
|
||||
class ChildExtension : public tlm::tlm_extension<ChildExtension>
|
||||
{
|
||||
private:
|
||||
|
||||
@@ -46,7 +46,8 @@ namespace DRAMSys
|
||||
|
||||
BankMachine::BankMachine(const Configuration& config, const SchedulerIF& scheduler, Bank bank)
|
||||
: scheduler(scheduler), memSpec(*config.memSpec), bank(bank),
|
||||
bankgroup(BankGroup(bank.ID() / memSpec.banksPerGroup)), rank(Rank(bank.ID() / memSpec.banksPerRank)),
|
||||
bankgroup(BankGroup(static_cast<std::size_t>(bank) / memSpec.banksPerGroup)),
|
||||
rank(Rank(static_cast<std::size_t>(bank) / memSpec.banksPerRank)),
|
||||
refreshManagement(config.refreshManagement)
|
||||
{}
|
||||
|
||||
|
||||
@@ -281,7 +281,7 @@ void Controller::controllerMethod()
|
||||
for (unsigned rankID = 0; rankID < memSpec.ranksPerChannel; rankID++)
|
||||
{
|
||||
// (4.1) Check for power-down commands (PDEA/PDEP/SREFEN or PDXA/PDXP/SREFEX)
|
||||
Rank rank(rankID);
|
||||
Rank rank = Rank(rankID);
|
||||
commandTuple = powerDownManagers[rank]->getNextCommand();
|
||||
if (std::get<CommandTuple::Command>(commandTuple) != Command::NOP)
|
||||
readyCommands.emplace_back(commandTuple);
|
||||
@@ -327,14 +327,14 @@ void Controller::controllerMethod()
|
||||
}
|
||||
else if (command.isGroupCommand())
|
||||
{
|
||||
for (unsigned bankID = (bank.ID() % memSpec.banksPerGroup);
|
||||
for (std::size_t bankID = (static_cast<std::size_t>(bank) % memSpec.banksPerGroup);
|
||||
bankID < memSpec.banksPerRank; bankID += memSpec.banksPerGroup)
|
||||
bankMachinesOnRank[rank][Bank(bankID)]->update(command);
|
||||
}
|
||||
else if (command.is2BankCommand())
|
||||
{
|
||||
bankMachines[bank]->update(command);
|
||||
bankMachines[Bank(bank.ID() + memSpec.getPer2BankOffset())]->update(command);
|
||||
bankMachines[Bank(static_cast<std::size_t>(bank) + memSpec.getPer2BankOffset())]->update(command);
|
||||
}
|
||||
else // if (isBankCommand(command))
|
||||
bankMachines[bank]->update(command);
|
||||
|
||||
@@ -433,7 +433,7 @@ void CheckerDDR3::insert(Command command, const tlm_generic_payload& payload)
|
||||
Rank rank = ControllerExtension::getRank(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerDDR3", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerDDR3", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndRank[command][rank] = sc_time_stamp();
|
||||
|
||||
@@ -467,7 +467,7 @@ void CheckerDDR4::insert(Command command, const tlm_generic_payload& payload)
|
||||
BankGroup bankGroup = ControllerExtension::getBankGroup(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerDDR4", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerDDR4", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndBank[command][bank] = sc_time_stamp();
|
||||
|
||||
@@ -549,7 +549,7 @@ void CheckerGDDR5::insert(Command command, const tlm_generic_payload& payload)
|
||||
BankGroup bankGroup = ControllerExtension::getBankGroup(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerGDDR5", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerGDDR5", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndBank[command][bank] = sc_time_stamp();
|
||||
|
||||
@@ -553,7 +553,7 @@ void CheckerGDDR5X::insert(Command command, const tlm_generic_payload& payload)
|
||||
BankGroup bankGroup = ControllerExtension::getBankGroup(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerGDDR5X", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerGDDR5X", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndBank[command][bank] = sc_time_stamp();
|
||||
|
||||
@@ -570,7 +570,7 @@ void CheckerGDDR6::insert(Command command, const tlm_generic_payload& payload)
|
||||
BankGroup bankGroup = ControllerExtension::getBankGroup(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerGDDR6", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerGDDR6", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndBank[command][bank] = sc_time_stamp();
|
||||
|
||||
@@ -528,7 +528,7 @@ void CheckerHBM2::insert(Command command, const tlm_generic_payload& payload)
|
||||
BankGroup bankGroup = ControllerExtension::getBankGroup(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerHBM2", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerHBM2", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndBank[command][bank] = sc_time_stamp();
|
||||
|
||||
@@ -521,7 +521,7 @@ void CheckerLPDDR4::insert(Command command, const tlm_generic_payload& payload)
|
||||
Rank rank = ControllerExtension::getRank(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerLPDDR4", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerLPDDR4", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndBank[command][bank] = sc_time_stamp();
|
||||
|
||||
@@ -389,7 +389,7 @@ void CheckerSTTMRAM::insert(Command command, const tlm_generic_payload& payload)
|
||||
Rank rank = ControllerExtension::getRank(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerSTTMRAM", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerSTTMRAM", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndRank[command][rank] = sc_time_stamp();
|
||||
|
||||
@@ -410,7 +410,7 @@ void CheckerWideIO::insert(Command command, const tlm_generic_payload& payload)
|
||||
Rank rank = ControllerExtension::getRank(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerWideIO", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerWideIO", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndBank[command][bank] = sc_time_stamp();
|
||||
|
||||
@@ -488,7 +488,7 @@ void CheckerWideIO2::insert(Command command, const tlm_generic_payload& payload)
|
||||
Rank rank = ControllerExtension::getRank(payload);
|
||||
Bank bank = ControllerExtension::getBank(payload);
|
||||
|
||||
PRINTDEBUGMESSAGE("CheckerWideIO2", "Changing state on bank " + std::to_string(bank.ID())
|
||||
PRINTDEBUGMESSAGE("CheckerWideIO2", "Changing state on bank " + std::to_string(static_cast<std::size_t>(bank))
|
||||
+ " command is " + command.toString());
|
||||
|
||||
lastScheduledByCommandAndBank[command][bank] = sc_time_stamp();
|
||||
|
||||
@@ -55,7 +55,7 @@ protected:
|
||||
Rank rank, unsigned numberOfRanks)
|
||||
{
|
||||
// Calculate bit-reversal rank ID
|
||||
unsigned rankID = rank.ID();
|
||||
auto rankID = static_cast<unsigned>(rank);
|
||||
unsigned reverseRankID = 0;
|
||||
unsigned rankBits = 0;
|
||||
unsigned rankShift = numberOfRanks;
|
||||
|
||||
@@ -58,8 +58,8 @@ RefreshManagerPer2Bank::RefreshManagerPer2Bank(const Configuration& config,
|
||||
{
|
||||
for (unsigned bankID = outerID; bankID < (outerID + memSpec.getPer2BankOffset()); bankID++)
|
||||
{
|
||||
Bank firstBank(bankID);
|
||||
Bank secondBank(bankID + memSpec.getPer2BankOffset());
|
||||
Bank firstBank = Bank(bankID);
|
||||
Bank secondBank = Bank(bankID + memSpec.getPer2BankOffset());
|
||||
setUpDummy(refreshPayloads[bankMachinesOnRank[firstBank]], 0, rank,
|
||||
bankMachinesOnRank[firstBank]->getBankGroup(), bankMachinesOnRank[firstBank]->getBank());
|
||||
setUpDummy(refreshPayloads[bankMachinesOnRank[secondBank]], 0, rank,
|
||||
|
||||
@@ -80,7 +80,7 @@ RefreshManagerSameBank::RefreshManagerSameBank(const Configuration& config,
|
||||
CommandTuple::Type RefreshManagerSameBank::getNextCommand()
|
||||
{
|
||||
return {nextCommand,
|
||||
&refreshPayloads[currentIterator->front()->getBank().ID() % memSpec.banksPerGroup],
|
||||
&refreshPayloads[static_cast<std::size_t>(currentIterator->front()->getBank()) % memSpec.banksPerGroup],
|
||||
SC_ZERO_TIME};
|
||||
}
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ bool BufferCounterBankwise::hasBufferSpace() const
|
||||
|
||||
void BufferCounterBankwise::storeRequest(const tlm_generic_payload& trans)
|
||||
{
|
||||
lastBankID = ControllerExtension::getBank(trans).ID();
|
||||
lastBankID = static_cast<std::size_t>(ControllerExtension::getBank(trans));
|
||||
numRequestsOnBank[lastBankID]++;
|
||||
if (trans.is_read())
|
||||
numReadRequests++;
|
||||
@@ -64,7 +64,7 @@ void BufferCounterBankwise::storeRequest(const tlm_generic_payload& trans)
|
||||
|
||||
void BufferCounterBankwise::removeRequest(const tlm_generic_payload& trans)
|
||||
{
|
||||
numRequestsOnBank[ControllerExtension::getBank(trans).ID()]--;
|
||||
numRequestsOnBank[static_cast<std::size_t>(ControllerExtension::getBank(trans))]--;
|
||||
if (trans.is_read())
|
||||
numReadRequests--;
|
||||
else
|
||||
|
||||
@@ -57,7 +57,7 @@ public:
|
||||
private:
|
||||
const unsigned requestBufferSize;
|
||||
std::vector<unsigned> numRequestsOnBank;
|
||||
unsigned lastBankID = 0;
|
||||
std::size_t lastBankID = 0;
|
||||
unsigned numReadRequests = 0;
|
||||
unsigned numWriteRequests = 0;
|
||||
};
|
||||
|
||||
@@ -45,7 +45,7 @@ namespace DRAMSys
|
||||
|
||||
SchedulerFifo::SchedulerFifo(const Configuration& config)
|
||||
{
|
||||
buffer = std::vector<std::deque<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
buffer = ControllerVector<Bank, std::deque<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
|
||||
if (config.schedulerBuffer == Configuration::SchedulerBuffer::Bankwise)
|
||||
bufferCounter = std::make_unique<BufferCounterBankwise>(config.requestBufferSize, config.memSpec->banksPerChannel);
|
||||
@@ -62,30 +62,30 @@ bool SchedulerFifo::hasBufferSpace() const
|
||||
|
||||
void SchedulerFifo::storeRequest(tlm_generic_payload& payload)
|
||||
{
|
||||
buffer[ControllerExtension::getBank(payload).ID()].push_back(&payload);
|
||||
buffer[ControllerExtension::getBank(payload)].push_back(&payload);
|
||||
bufferCounter->storeRequest(payload);
|
||||
}
|
||||
|
||||
void SchedulerFifo::removeRequest(tlm_generic_payload& payload)
|
||||
{
|
||||
buffer[ControllerExtension::getBank(payload).ID()].pop_front();
|
||||
buffer[ControllerExtension::getBank(payload)].pop_front();
|
||||
bufferCounter->removeRequest(payload);
|
||||
}
|
||||
|
||||
tlm_generic_payload* SchedulerFifo::getNextRequest(const BankMachine& bankMachine) const
|
||||
{
|
||||
unsigned bankID = bankMachine.getBank().ID();
|
||||
if (!buffer[bankID].empty())
|
||||
return buffer[bankID].front();
|
||||
Bank bank = bankMachine.getBank();
|
||||
if (!buffer[bank].empty())
|
||||
return buffer[bank].front();
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool SchedulerFifo::hasFurtherRowHit(Bank bank, Row row, tlm_command command) const
|
||||
{
|
||||
if (buffer[bank.ID()].size() >= 2)
|
||||
if (buffer[bank].size() >= 2)
|
||||
{
|
||||
tlm_generic_payload& nextRequest = *buffer[bank.ID()][1];
|
||||
tlm_generic_payload& nextRequest = *buffer[bank][1];
|
||||
if (ControllerExtension::getRow(nextRequest) == row)
|
||||
return true;
|
||||
}
|
||||
@@ -94,7 +94,7 @@ bool SchedulerFifo::hasFurtherRowHit(Bank bank, Row row, tlm_command command) co
|
||||
|
||||
bool SchedulerFifo::hasFurtherRequest(Bank bank, tlm_command command) const
|
||||
{
|
||||
if (buffer[bank.ID()].size() >= 2)
|
||||
if (buffer[bank].size() >= 2)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
|
||||
@@ -61,7 +61,7 @@ public:
|
||||
[[nodiscard]] const std::vector<unsigned>& getBufferDepth() const override;
|
||||
|
||||
private:
|
||||
std::vector<std::deque<tlm::tlm_generic_payload*>> buffer;
|
||||
ControllerVector<Bank, std::deque<tlm::tlm_generic_payload*>> buffer;
|
||||
std::unique_ptr<BufferCounterIF> bufferCounter;
|
||||
};
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ namespace DRAMSys
|
||||
|
||||
SchedulerFrFcfs::SchedulerFrFcfs(const Configuration& config)
|
||||
{
|
||||
buffer = std::vector<std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
buffer = ControllerVector<Bank, std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
|
||||
if (config.schedulerBuffer == Configuration::SchedulerBuffer::Bankwise)
|
||||
bufferCounter = std::make_unique<BufferCounterBankwise>(config.requestBufferSize, config.memSpec->banksPerChannel);
|
||||
@@ -62,19 +62,19 @@ bool SchedulerFrFcfs::hasBufferSpace() const
|
||||
|
||||
void SchedulerFrFcfs::storeRequest(tlm_generic_payload& trans)
|
||||
{
|
||||
buffer[ControllerExtension::getBank(trans).ID()].push_back(&trans);
|
||||
buffer[ControllerExtension::getBank(trans)].push_back(&trans);
|
||||
bufferCounter->storeRequest(trans);
|
||||
}
|
||||
|
||||
void SchedulerFrFcfs::removeRequest(tlm_generic_payload& trans)
|
||||
{
|
||||
bufferCounter->removeRequest(trans);
|
||||
unsigned bankID = ControllerExtension::getBank(trans).ID();
|
||||
for (auto it = buffer[bankID].begin(); it != buffer[bankID].end(); it++)
|
||||
Bank bank = ControllerExtension::getBank(trans);
|
||||
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++)
|
||||
{
|
||||
if (*it == &trans)
|
||||
{
|
||||
buffer[bankID].erase(it);
|
||||
buffer[bank].erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -82,21 +82,21 @@ void SchedulerFrFcfs::removeRequest(tlm_generic_payload& trans)
|
||||
|
||||
tlm_generic_payload* SchedulerFrFcfs::getNextRequest(const BankMachine& bankMachine) const
|
||||
{
|
||||
unsigned bankID = bankMachine.getBank().ID();
|
||||
if (!buffer[bankID].empty())
|
||||
Bank bank = bankMachine.getBank();
|
||||
if (!buffer[bank].empty())
|
||||
{
|
||||
if (bankMachine.isActivated())
|
||||
{
|
||||
// Search for row hit
|
||||
Row openRow = bankMachine.getOpenRow();
|
||||
for (auto it : buffer[bankID])
|
||||
for (auto it : buffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == openRow)
|
||||
return it;
|
||||
}
|
||||
}
|
||||
// No row hit found or bank precharged
|
||||
return buffer[bankID].front();
|
||||
return buffer[bank].front();
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
@@ -104,7 +104,7 @@ tlm_generic_payload* SchedulerFrFcfs::getNextRequest(const BankMachine& bankMach
|
||||
bool SchedulerFrFcfs::hasFurtherRowHit(Bank bank, Row row, tlm_command command) const
|
||||
{
|
||||
unsigned rowHitCounter = 0;
|
||||
for (auto it : buffer[bank.ID()])
|
||||
for (auto it : buffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == row)
|
||||
{
|
||||
@@ -118,7 +118,7 @@ bool SchedulerFrFcfs::hasFurtherRowHit(Bank bank, Row row, tlm_command command)
|
||||
|
||||
bool SchedulerFrFcfs::hasFurtherRequest(Bank bank, tlm_command command) const
|
||||
{
|
||||
return (buffer[bank.ID()].size() >= 2);
|
||||
return (buffer[bank].size() >= 2);
|
||||
}
|
||||
|
||||
const std::vector<unsigned>& SchedulerFrFcfs::getBufferDepth() const
|
||||
|
||||
@@ -61,7 +61,7 @@ public:
|
||||
[[nodiscard]] const std::vector<unsigned>& getBufferDepth() const override;
|
||||
|
||||
private:
|
||||
std::vector<std::list<tlm::tlm_generic_payload*>> buffer;
|
||||
ControllerVector<Bank, std::list<tlm::tlm_generic_payload*>> buffer;
|
||||
std::unique_ptr<BufferCounterIF> bufferCounter;
|
||||
};
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ namespace DRAMSys
|
||||
|
||||
SchedulerFrFcfsGrp::SchedulerFrFcfsGrp(const Configuration& config)
|
||||
{
|
||||
buffer = std::vector<std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
buffer = ControllerVector<Bank, std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
|
||||
if (config.schedulerBuffer == Configuration::SchedulerBuffer::Bankwise)
|
||||
bufferCounter = std::make_unique<BufferCounterBankwise>(config.requestBufferSize, config.memSpec->banksPerChannel);
|
||||
@@ -62,7 +62,7 @@ bool SchedulerFrFcfsGrp::hasBufferSpace() const
|
||||
|
||||
void SchedulerFrFcfsGrp::storeRequest(tlm_generic_payload& trans)
|
||||
{
|
||||
buffer[ControllerExtension::getBank(trans).ID()].push_back(&trans);
|
||||
buffer[ControllerExtension::getBank(trans)].push_back(&trans);
|
||||
bufferCounter->storeRequest(trans);
|
||||
}
|
||||
|
||||
@@ -70,12 +70,12 @@ void SchedulerFrFcfsGrp::removeRequest(tlm_generic_payload& trans)
|
||||
{
|
||||
bufferCounter->removeRequest(trans);
|
||||
lastCommand = trans.get_command();
|
||||
unsigned bankID = ControllerExtension::getBank(trans).ID();
|
||||
for (auto it = buffer[bankID].begin(); it != buffer[bankID].end(); it++)
|
||||
Bank bank = ControllerExtension::getBank(trans);
|
||||
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++)
|
||||
{
|
||||
if (*it == &trans)
|
||||
{
|
||||
buffer[bankID].erase(it);
|
||||
buffer[bank].erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -83,15 +83,15 @@ void SchedulerFrFcfsGrp::removeRequest(tlm_generic_payload& trans)
|
||||
|
||||
tlm_generic_payload* SchedulerFrFcfsGrp::getNextRequest(const BankMachine& bankMachine) const
|
||||
{
|
||||
unsigned bankID = bankMachine.getBank().ID();
|
||||
if (!buffer[bankID].empty())
|
||||
Bank bank = bankMachine.getBank();
|
||||
if (!buffer[bank].empty())
|
||||
{
|
||||
if (bankMachine.isActivated())
|
||||
{
|
||||
// Filter all row hits
|
||||
Row openRow = bankMachine.getOpenRow();
|
||||
std::list<tlm_generic_payload *> rowHits;
|
||||
for (auto it : buffer[bankID])
|
||||
for (auto it : buffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == openRow)
|
||||
rowHits.push_back(it);
|
||||
@@ -121,7 +121,7 @@ tlm_generic_payload* SchedulerFrFcfsGrp::getNextRequest(const BankMachine& bankM
|
||||
}
|
||||
}
|
||||
// No row hit found or bank precharged
|
||||
return buffer[bankID].front();
|
||||
return buffer[bank].front();
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
@@ -129,7 +129,7 @@ tlm_generic_payload* SchedulerFrFcfsGrp::getNextRequest(const BankMachine& bankM
|
||||
bool SchedulerFrFcfsGrp::hasFurtherRowHit(Bank bank, Row row, tlm_command command) const
|
||||
{
|
||||
unsigned rowHitCounter = 0;
|
||||
for (auto it : buffer[bank.ID()])
|
||||
for (auto it : buffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == row)
|
||||
{
|
||||
@@ -143,7 +143,7 @@ bool SchedulerFrFcfsGrp::hasFurtherRowHit(Bank bank, Row row, tlm_command comman
|
||||
|
||||
bool SchedulerFrFcfsGrp::hasFurtherRequest(Bank bank, tlm_command command) const
|
||||
{
|
||||
if (buffer[bank.ID()].size() >= 2)
|
||||
if (buffer[bank].size() >= 2)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
|
||||
@@ -61,7 +61,7 @@ public:
|
||||
[[nodiscard]] const std::vector<unsigned>& getBufferDepth() const override;
|
||||
|
||||
private:
|
||||
std::vector<std::list<tlm::tlm_generic_payload *>> buffer;
|
||||
ControllerVector<Bank, std::list<tlm::tlm_generic_payload *>> buffer;
|
||||
tlm::tlm_command lastCommand = tlm::TLM_READ_COMMAND;
|
||||
std::unique_ptr<BufferCounterIF> bufferCounter;
|
||||
};
|
||||
|
||||
@@ -45,8 +45,8 @@ namespace DRAMSys
|
||||
|
||||
SchedulerGrpFrFcfs::SchedulerGrpFrFcfs(const Configuration& config)
|
||||
{
|
||||
readBuffer = std::vector<std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
writeBuffer = std::vector<std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
readBuffer = ControllerVector<Bank, std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
writeBuffer = ControllerVector<Bank, std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
|
||||
if (config.schedulerBuffer == Configuration::SchedulerBuffer::Bankwise)
|
||||
bufferCounter = std::make_unique<BufferCounterBankwise>(config.requestBufferSize, config.memSpec->banksPerChannel);
|
||||
@@ -66,9 +66,9 @@ bool SchedulerGrpFrFcfs::hasBufferSpace() const
|
||||
void SchedulerGrpFrFcfs::storeRequest(tlm_generic_payload& trans)
|
||||
{
|
||||
if (trans.is_read())
|
||||
readBuffer[ControllerExtension::getBank(trans).ID()].push_back(&trans);
|
||||
readBuffer[ControllerExtension::getBank(trans)].push_back(&trans);
|
||||
else
|
||||
writeBuffer[ControllerExtension::getBank(trans).ID()].push_back(&trans);
|
||||
writeBuffer[ControllerExtension::getBank(trans)].push_back(&trans);
|
||||
bufferCounter->storeRequest(trans);
|
||||
}
|
||||
|
||||
@@ -76,86 +76,86 @@ void SchedulerGrpFrFcfs::removeRequest(tlm_generic_payload& trans)
|
||||
{
|
||||
bufferCounter->removeRequest(trans);
|
||||
lastCommand = trans.get_command();
|
||||
unsigned bankID = ControllerExtension::getBank(trans).ID();
|
||||
Bank bank = ControllerExtension::getBank(trans);
|
||||
|
||||
if (trans.is_read())
|
||||
readBuffer[bankID].remove(&trans);
|
||||
readBuffer[bank].remove(&trans);
|
||||
else
|
||||
writeBuffer[bankID].remove(&trans);
|
||||
writeBuffer[bank].remove(&trans);
|
||||
}
|
||||
|
||||
tlm_generic_payload* SchedulerGrpFrFcfs::getNextRequest(const BankMachine& bankMachine) const
|
||||
{
|
||||
// search row hits, search wrd/wr hits
|
||||
// search rd/wr hits, search row hits
|
||||
unsigned bankID = bankMachine.getBank().ID();
|
||||
Bank bank = bankMachine.getBank();
|
||||
|
||||
if (lastCommand == tlm::TLM_READ_COMMAND)
|
||||
{
|
||||
if (!readBuffer[bankID].empty())
|
||||
if (!readBuffer[bank].empty())
|
||||
{
|
||||
if (bankMachine.isActivated())
|
||||
{
|
||||
// Search for read row hit
|
||||
Row openRow = bankMachine.getOpenRow();
|
||||
for (auto it : readBuffer[bankID])
|
||||
for (auto it : readBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == openRow)
|
||||
return it;
|
||||
}
|
||||
}
|
||||
// No read row hit found or bank precharged
|
||||
return readBuffer[bankID].front();
|
||||
return readBuffer[bank].front();
|
||||
}
|
||||
else if (!writeBuffer[bankID].empty())
|
||||
else if (!writeBuffer[bank].empty())
|
||||
{
|
||||
if (bankMachine.isActivated())
|
||||
{
|
||||
// Search for write row hit
|
||||
Row openRow = bankMachine.getOpenRow();
|
||||
for (auto it : writeBuffer[bankID])
|
||||
for (auto it : writeBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == openRow)
|
||||
return it;
|
||||
}
|
||||
}
|
||||
// No write row hit found or bank precharged
|
||||
return writeBuffer[bankID].front();
|
||||
return writeBuffer[bank].front();
|
||||
}
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!writeBuffer[bankID].empty())
|
||||
if (!writeBuffer[bank].empty())
|
||||
{
|
||||
if (bankMachine.isActivated())
|
||||
{
|
||||
// Search for write row hit
|
||||
Row openRow = bankMachine.getOpenRow();
|
||||
for (auto it : writeBuffer[bankID])
|
||||
for (auto it : writeBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == openRow)
|
||||
return it;
|
||||
}
|
||||
}
|
||||
// No write row hit found or bank precharged
|
||||
return writeBuffer[bankID].front();
|
||||
return writeBuffer[bank].front();
|
||||
}
|
||||
else if (!readBuffer[bankID].empty())
|
||||
else if (!readBuffer[bank].empty())
|
||||
{
|
||||
if (bankMachine.isActivated())
|
||||
{
|
||||
// Search for read row hit
|
||||
Row openRow = bankMachine.getOpenRow();
|
||||
for (auto it : readBuffer[bankID])
|
||||
for (auto it : readBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == openRow)
|
||||
return it;
|
||||
}
|
||||
}
|
||||
// No read row hit found or bank precharged
|
||||
return readBuffer[bankID].front();
|
||||
return readBuffer[bank].front();
|
||||
}
|
||||
else
|
||||
return nullptr;
|
||||
@@ -168,7 +168,7 @@ bool SchedulerGrpFrFcfs::hasFurtherRowHit(Bank bank, Row row, tlm_command comman
|
||||
unsigned rowHitCounter = 0;
|
||||
if (command == tlm::TLM_READ_COMMAND)
|
||||
{
|
||||
for (auto it : readBuffer[bank.ID()])
|
||||
for (auto it : readBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == row)
|
||||
{
|
||||
@@ -181,7 +181,7 @@ bool SchedulerGrpFrFcfs::hasFurtherRowHit(Bank bank, Row row, tlm_command comman
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto it : writeBuffer[bank.ID()])
|
||||
for (auto it : writeBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == row)
|
||||
{
|
||||
@@ -198,14 +198,14 @@ bool SchedulerGrpFrFcfs::hasFurtherRequest(Bank bank, tlm_command command) const
|
||||
{
|
||||
if (command == tlm::TLM_READ_COMMAND)
|
||||
{
|
||||
if (readBuffer[bank.ID()].size() >= 2)
|
||||
if (readBuffer[bank].size() >= 2)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (writeBuffer[bank.ID()].size() >= 2)
|
||||
if (writeBuffer[bank].size() >= 2)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
|
||||
@@ -61,8 +61,8 @@ public:
|
||||
[[nodiscard]] const std::vector<unsigned>& getBufferDepth() const override;
|
||||
|
||||
private:
|
||||
std::vector<std::list<tlm::tlm_generic_payload*>> readBuffer;
|
||||
std::vector<std::list<tlm::tlm_generic_payload*>> writeBuffer;
|
||||
ControllerVector<Bank, std::list<tlm::tlm_generic_payload*>> readBuffer;
|
||||
ControllerVector<Bank, std::list<tlm::tlm_generic_payload*>> writeBuffer;
|
||||
tlm::tlm_command lastCommand = tlm::TLM_READ_COMMAND;
|
||||
std::unique_ptr<BufferCounterIF> bufferCounter;
|
||||
};
|
||||
|
||||
@@ -46,8 +46,8 @@ namespace DRAMSys
|
||||
SchedulerGrpFrFcfsWm::SchedulerGrpFrFcfsWm(const Configuration& config)
|
||||
: lowWatermark(config.lowWatermark), highWatermark(config.highWatermark)
|
||||
{
|
||||
readBuffer = std::vector<std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
writeBuffer = std::vector<std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
readBuffer = ControllerVector<Bank, std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
writeBuffer = ControllerVector<Bank, std::list<tlm_generic_payload*>>(config.memSpec->banksPerChannel);
|
||||
|
||||
if (config.schedulerBuffer == Configuration::SchedulerBuffer::Bankwise)
|
||||
bufferCounter = std::make_unique<BufferCounterBankwise>(config.requestBufferSize, config.memSpec->banksPerChannel);
|
||||
@@ -70,9 +70,9 @@ bool SchedulerGrpFrFcfsWm::hasBufferSpace() const
|
||||
void SchedulerGrpFrFcfsWm::storeRequest(tlm_generic_payload& trans)
|
||||
{
|
||||
if (trans.is_read())
|
||||
readBuffer[ControllerExtension::getBank(trans).ID()].push_back(&trans);
|
||||
readBuffer[ControllerExtension::getBank(trans)].push_back(&trans);
|
||||
else
|
||||
writeBuffer[ControllerExtension::getBank(trans).ID()].push_back(&trans);
|
||||
writeBuffer[ControllerExtension::getBank(trans)].push_back(&trans);
|
||||
bufferCounter->storeRequest(trans);
|
||||
evaluateWriteMode();
|
||||
}
|
||||
@@ -80,56 +80,56 @@ void SchedulerGrpFrFcfsWm::storeRequest(tlm_generic_payload& trans)
|
||||
void SchedulerGrpFrFcfsWm::removeRequest(tlm_generic_payload& trans)
|
||||
{
|
||||
bufferCounter->removeRequest(trans);
|
||||
unsigned bankID = ControllerExtension::getBank(trans).ID();
|
||||
Bank bank = ControllerExtension::getBank(trans);
|
||||
|
||||
if (trans.is_read())
|
||||
readBuffer[bankID].remove(&trans);
|
||||
readBuffer[bank].remove(&trans);
|
||||
else
|
||||
writeBuffer[bankID].remove(&trans);
|
||||
writeBuffer[bank].remove(&trans);
|
||||
|
||||
evaluateWriteMode();
|
||||
}
|
||||
|
||||
tlm_generic_payload* SchedulerGrpFrFcfsWm::getNextRequest(const BankMachine& bankMachine) const
|
||||
{
|
||||
unsigned bankID = bankMachine.getBank().ID();
|
||||
Bank bank = bankMachine.getBank();
|
||||
|
||||
if (!writeMode)
|
||||
{
|
||||
if (!readBuffer[bankID].empty())
|
||||
if (!readBuffer[bank].empty())
|
||||
{
|
||||
if (bankMachine.isActivated())
|
||||
{
|
||||
// Search for read row hit
|
||||
Row openRow = bankMachine.getOpenRow();
|
||||
for (auto it : readBuffer[bankID])
|
||||
for (auto it : readBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == openRow)
|
||||
return it;
|
||||
}
|
||||
}
|
||||
// No read row hit found or bank precharged
|
||||
return readBuffer[bankID].front();
|
||||
return readBuffer[bank].front();
|
||||
}
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!writeBuffer[bankID].empty())
|
||||
if (!writeBuffer[bank].empty())
|
||||
{
|
||||
if (bankMachine.isActivated())
|
||||
{
|
||||
// Search for write row hit
|
||||
Row openRow = bankMachine.getOpenRow();
|
||||
for (auto it : writeBuffer[bankID])
|
||||
for (auto it : writeBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == openRow)
|
||||
return it;
|
||||
}
|
||||
}
|
||||
// No row hit found or bank precharged
|
||||
return writeBuffer[bankID].front();
|
||||
return writeBuffer[bank].front();
|
||||
}
|
||||
else
|
||||
return nullptr;
|
||||
@@ -141,7 +141,7 @@ bool SchedulerGrpFrFcfsWm::hasFurtherRowHit(Bank bank, Row row, tlm::tlm_command
|
||||
unsigned rowHitCounter = 0;
|
||||
if (!writeMode)
|
||||
{
|
||||
for (const auto* it : readBuffer[bank.ID()])
|
||||
for (const auto* it : readBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == row)
|
||||
{
|
||||
@@ -154,7 +154,7 @@ bool SchedulerGrpFrFcfsWm::hasFurtherRowHit(Bank bank, Row row, tlm::tlm_command
|
||||
}
|
||||
else
|
||||
{
|
||||
for (auto it : writeBuffer[bank.ID()])
|
||||
for (auto it : writeBuffer[bank])
|
||||
{
|
||||
if (ControllerExtension::getRow(*it) == row)
|
||||
{
|
||||
@@ -170,9 +170,9 @@ bool SchedulerGrpFrFcfsWm::hasFurtherRowHit(Bank bank, Row row, tlm::tlm_command
|
||||
bool SchedulerGrpFrFcfsWm::hasFurtherRequest(Bank bank, tlm::tlm_command command) const
|
||||
{
|
||||
if (!writeMode)
|
||||
return (readBuffer[bank.ID()].size() >= 2);
|
||||
return (readBuffer[bank].size() >= 2);
|
||||
else
|
||||
return (writeBuffer[bank.ID()].size() >= 2);
|
||||
return (writeBuffer[bank].size() >= 2);
|
||||
}
|
||||
|
||||
const std::vector<unsigned>& SchedulerGrpFrFcfsWm::getBufferDepth() const
|
||||
|
||||
@@ -64,8 +64,8 @@ public:
|
||||
private:
|
||||
void evaluateWriteMode();
|
||||
|
||||
std::vector<std::list<tlm::tlm_generic_payload*>> readBuffer;
|
||||
std::vector<std::list<tlm::tlm_generic_payload*>> writeBuffer;
|
||||
ControllerVector<Bank, std::list<tlm::tlm_generic_payload*>> readBuffer;
|
||||
ControllerVector<Bank, std::list<tlm::tlm_generic_payload*>> writeBuffer;
|
||||
std::unique_ptr<BufferCounterIF> bufferCounter;
|
||||
const unsigned lowWatermark;
|
||||
const unsigned highWatermark;
|
||||
|
||||
@@ -72,8 +72,8 @@ ArbiterSimple::ArbiterSimple(const sc_module_name& name, const Configuration& co
|
||||
|
||||
ArbiterFifo::ArbiterFifo(const sc_module_name& name, const Configuration& config,
|
||||
const AddressDecoder& addressDecoder) :
|
||||
Arbiter(name, config, addressDecoder),
|
||||
maxActiveTransactions(config.maxActiveTransactions) {}
|
||||
Arbiter(name, config, addressDecoder),
|
||||
maxActiveTransactionsPerThread(config.maxActiveTransactions) {}
|
||||
|
||||
ArbiterReorder::ArbiterReorder(const sc_module_name& name, const Configuration& config,
|
||||
const AddressDecoder& addressDecoder) :
|
||||
@@ -83,14 +83,14 @@ ArbiterReorder::ArbiterReorder(const sc_module_name& name, const Configuration&
|
||||
void Arbiter::end_of_elaboration()
|
||||
{
|
||||
// initiator side
|
||||
threadIsBusy = std::vector<bool>(tSocket.size(), false);
|
||||
nextThreadPayloadIDToAppend = std::vector<uint64_t>(tSocket.size(), 1);
|
||||
threadIsBusy = ControllerVector<Thread, bool>(tSocket.size(), false);
|
||||
nextThreadPayloadIDToAppend = ControllerVector<Thread, std::uint64_t>(tSocket.size(), 1);
|
||||
|
||||
// channel side
|
||||
channelIsBusy = std::vector<bool>(iSocket.size(), false);
|
||||
pendingRequests = std::vector<std::queue<tlm_generic_payload*>>(iSocket.size(),
|
||||
std::queue<tlm_generic_payload*>());
|
||||
nextChannelPayloadIDToAppend = std::vector<uint64_t>(iSocket.size(), 1);
|
||||
channelIsBusy = ControllerVector<Channel, bool>(iSocket.size(), false);
|
||||
pendingRequestsOnChannel = ControllerVector<Channel, std::queue<tlm_generic_payload*>>(
|
||||
iSocket.size(), std::queue<tlm_generic_payload*>());
|
||||
nextChannelPayloadIDToAppend = ControllerVector<Channel, std::uint64_t>(iSocket.size(), 1);
|
||||
}
|
||||
|
||||
void ArbiterSimple::end_of_elaboration()
|
||||
@@ -98,8 +98,8 @@ void ArbiterSimple::end_of_elaboration()
|
||||
Arbiter::end_of_elaboration();
|
||||
|
||||
// initiator side
|
||||
pendingResponses = std::vector<std::queue<tlm_generic_payload*>>(tSocket.size(),
|
||||
std::queue<tlm_generic_payload*>());
|
||||
pendingResponsesOnThread = ControllerVector<Thread, std::queue<tlm_generic_payload*>>(
|
||||
tSocket.size(), std::queue<tlm_generic_payload*>());
|
||||
}
|
||||
|
||||
void ArbiterFifo::end_of_elaboration()
|
||||
@@ -107,13 +107,13 @@ void ArbiterFifo::end_of_elaboration()
|
||||
Arbiter::end_of_elaboration();
|
||||
|
||||
// initiator side
|
||||
activeTransactions = std::vector<unsigned int>(tSocket.size(), 0);
|
||||
outstandingEndReq = std::vector<tlm_generic_payload*>(tSocket.size(), nullptr);
|
||||
pendingResponses = std::vector<std::queue<tlm_generic_payload*>>(tSocket.size(),
|
||||
std::queue<tlm_generic_payload*>());
|
||||
activeTransactionsOnThread = ControllerVector<Thread, unsigned int>(tSocket.size(), 0);
|
||||
outstandingEndReqOnThread = ControllerVector<Thread, tlm_generic_payload*>(tSocket.size(), nullptr);
|
||||
pendingResponsesOnThread = ControllerVector<Thread, std::queue<tlm_generic_payload*>>(
|
||||
tSocket.size(), std::queue<tlm_generic_payload*>());
|
||||
|
||||
lastEndReq = std::vector<sc_time>(iSocket.size(), sc_max_time());
|
||||
lastEndResp = std::vector<sc_time>(tSocket.size(), sc_max_time());
|
||||
lastEndReqOnChannel = ControllerVector<Channel, sc_time>(iSocket.size(), sc_max_time());
|
||||
lastEndRespOnThread = ControllerVector<Thread, sc_time>(tSocket.size(), sc_max_time());
|
||||
}
|
||||
|
||||
void ArbiterReorder::end_of_elaboration()
|
||||
@@ -121,14 +121,14 @@ void ArbiterReorder::end_of_elaboration()
|
||||
Arbiter::end_of_elaboration();
|
||||
|
||||
// initiator side
|
||||
activeTransactions = std::vector<unsigned int>(tSocket.size(), 0);
|
||||
outstandingEndReq = std::vector<tlm_generic_payload*>(tSocket.size(), nullptr);
|
||||
pendingResponses = std::vector<std::set<tlm_generic_payload*, ThreadPayloadIDCompare>>
|
||||
activeTransactionsOnThread = ControllerVector<Thread, unsigned int>(tSocket.size(), 0);
|
||||
outstandingEndReqOnThread = ControllerVector<Thread, tlm_generic_payload*>(tSocket.size(), nullptr);
|
||||
pendingResponsesOnThread = ControllerVector<Thread, std::set<tlm_generic_payload*, ThreadPayloadIDCompare>>
|
||||
(tSocket.size(), std::set<tlm_generic_payload*, ThreadPayloadIDCompare>());
|
||||
nextThreadPayloadIDToReturn = std::vector<uint64_t>(tSocket.size(), 1);
|
||||
nextThreadPayloadIDToReturn = ControllerVector<Thread, std::uint64_t>(tSocket.size(), 1);
|
||||
|
||||
lastEndReq = std::vector<sc_time>(iSocket.size(), sc_max_time());
|
||||
lastEndResp = std::vector<sc_time>(tSocket.size(), sc_max_time());
|
||||
lastEndReqOnChannel = ControllerVector<Channel, sc_time>(iSocket.size(), sc_max_time());
|
||||
lastEndRespOnThread = ControllerVector<Thread, sc_time>(tSocket.size(), sc_max_time());
|
||||
}
|
||||
|
||||
tlm_sync_enum Arbiter::nb_transport_fw(int id, tlm_generic_payload& trans,
|
||||
@@ -183,24 +183,24 @@ unsigned int Arbiter::transport_dbg(int /*id*/, tlm::tlm_generic_payload& trans)
|
||||
|
||||
void ArbiterSimple::peqCallback(tlm_generic_payload& cbTrans, const tlm_phase& cbPhase)
|
||||
{
|
||||
unsigned int threadId = ArbiterExtension::getThread(cbTrans).ID();
|
||||
unsigned int channelId = ArbiterExtension::getChannel(cbTrans).ID();
|
||||
Thread thread = ArbiterExtension::getThread(cbTrans);
|
||||
Channel channel = ArbiterExtension::getChannel(cbTrans);
|
||||
|
||||
if (cbPhase == BEGIN_REQ) // from initiator
|
||||
{
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(cbTrans, nextThreadPayloadIDToAppend[threadId]++, sc_time_stamp());
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(cbTrans, nextThreadPayloadIDToAppend[thread]++, sc_time_stamp());
|
||||
|
||||
if (!channelIsBusy[channelId])
|
||||
if (!channelIsBusy[channel])
|
||||
{
|
||||
channelIsBusy[channelId] = true;
|
||||
channelIsBusy[channel] = true;
|
||||
|
||||
tlm_phase tPhase = BEGIN_REQ;
|
||||
sc_time tDelay = arbitrationDelayFw;
|
||||
|
||||
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbTrans, tPhase, tDelay);
|
||||
iSocket[static_cast<int>(channel)]->nb_transport_fw(cbTrans, tPhase, tDelay);
|
||||
}
|
||||
else
|
||||
pendingRequests[channelId].push(&cbTrans);
|
||||
pendingRequestsOnChannel[channel].push(&cbTrans);
|
||||
}
|
||||
else if (cbPhase == END_REQ) // from target
|
||||
{
|
||||
@@ -208,37 +208,37 @@ void ArbiterSimple::peqCallback(tlm_generic_payload& cbTrans, const tlm_phase& c
|
||||
tlm_phase tPhase = END_REQ;
|
||||
sc_time tDelay = SC_ZERO_TIME;
|
||||
|
||||
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbTrans, tPhase, tDelay);
|
||||
tSocket[static_cast<int>(thread)]->nb_transport_bw(cbTrans, tPhase, tDelay);
|
||||
}
|
||||
|
||||
if (!pendingRequests[channelId].empty())
|
||||
if (!pendingRequestsOnChannel[channel].empty())
|
||||
{
|
||||
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
|
||||
pendingRequests[channelId].pop();
|
||||
tlm_generic_payload &tPayload = *pendingRequestsOnChannel[channel].front();
|
||||
pendingRequestsOnChannel[channel].pop();
|
||||
tlm_phase tPhase = BEGIN_REQ;
|
||||
// do not send two requests in the same cycle
|
||||
sc_time tDelay = tCK + arbitrationDelayFw;
|
||||
|
||||
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
iSocket[static_cast<int>(channel)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
}
|
||||
else
|
||||
channelIsBusy[channelId] = false;
|
||||
channelIsBusy[channel] = false;
|
||||
}
|
||||
else if (cbPhase == BEGIN_RESP) // from memory controller
|
||||
{
|
||||
if (!threadIsBusy[threadId])
|
||||
if (!threadIsBusy[thread])
|
||||
{
|
||||
tlm_phase tPhase = BEGIN_RESP;
|
||||
sc_time tDelay = arbitrationDelayBw;
|
||||
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbTrans, tPhase, tDelay);
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(thread)]->nb_transport_bw(cbTrans, tPhase, tDelay);
|
||||
// Early completion from initiator
|
||||
if (returnValue == TLM_UPDATED)
|
||||
payloadEventQueue.notify(cbTrans, tPhase, tDelay);
|
||||
threadIsBusy[threadId] = true;
|
||||
threadIsBusy[thread] = true;
|
||||
}
|
||||
else
|
||||
pendingResponses[threadId].push(&cbTrans);
|
||||
pendingResponsesOnThread[thread].push(&cbTrans);
|
||||
}
|
||||
else if (cbPhase == END_RESP) // from initiator
|
||||
{
|
||||
@@ -246,25 +246,25 @@ void ArbiterSimple::peqCallback(tlm_generic_payload& cbTrans, const tlm_phase& c
|
||||
tlm_phase tPhase = END_RESP;
|
||||
sc_time tDelay = SC_ZERO_TIME;
|
||||
|
||||
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbTrans, tPhase, tDelay);
|
||||
iSocket[static_cast<int>(channel)]->nb_transport_fw(cbTrans, tPhase, tDelay);
|
||||
}
|
||||
cbTrans.release();
|
||||
|
||||
if (!pendingResponses[threadId].empty())
|
||||
if (!pendingResponsesOnThread[thread].empty())
|
||||
{
|
||||
tlm_generic_payload &tPayload = *pendingResponses[threadId].front();
|
||||
pendingResponses[threadId].pop();
|
||||
tlm_generic_payload &tPayload = *pendingResponsesOnThread[thread].front();
|
||||
pendingResponsesOnThread[thread].pop();
|
||||
tlm_phase tPhase = BEGIN_RESP;
|
||||
// do not send two responses in the same cycle
|
||||
sc_time tDelay = tCK + arbitrationDelayBw;
|
||||
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(thread)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
// Early completion from initiator
|
||||
if (returnValue == TLM_UPDATED)
|
||||
payloadEventQueue.notify(tPayload, tPhase, tDelay);
|
||||
}
|
||||
else
|
||||
threadIsBusy[threadId] = false;
|
||||
threadIsBusy[thread] = false;
|
||||
}
|
||||
else
|
||||
SC_REPORT_FATAL(0, "Payload event queue in arbiter was triggered with unknown phase");
|
||||
@@ -272,43 +272,43 @@ void ArbiterSimple::peqCallback(tlm_generic_payload& cbTrans, const tlm_phase& c
|
||||
|
||||
void ArbiterFifo::peqCallback(tlm_generic_payload& cbTrans, const tlm_phase& cbPhase)
|
||||
{
|
||||
unsigned int threadId = ArbiterExtension::getThread(cbTrans).ID();
|
||||
unsigned int channelId = ArbiterExtension::getChannel(cbTrans).ID();
|
||||
Thread thread = ArbiterExtension::getThread(cbTrans);
|
||||
Channel channel = ArbiterExtension::getChannel(cbTrans);
|
||||
|
||||
if (cbPhase == BEGIN_REQ) // from initiator
|
||||
{
|
||||
if (activeTransactions[threadId] < maxActiveTransactions)
|
||||
if (activeTransactionsOnThread[thread] < maxActiveTransactionsPerThread)
|
||||
{
|
||||
activeTransactions[threadId]++;
|
||||
activeTransactionsOnThread[thread]++;
|
||||
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(cbTrans, nextThreadPayloadIDToAppend[threadId]++,
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(cbTrans, nextThreadPayloadIDToAppend[thread]++,
|
||||
sc_time_stamp());
|
||||
|
||||
tlm_phase tPhase = END_REQ;
|
||||
sc_time tDelay = SC_ZERO_TIME;
|
||||
|
||||
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbTrans, tPhase, tDelay);
|
||||
tSocket[static_cast<int>(thread)]->nb_transport_bw(cbTrans, tPhase, tDelay);
|
||||
|
||||
payloadEventQueue.notify(cbTrans, REQ_ARBITRATION, arbitrationDelayFw);
|
||||
}
|
||||
else
|
||||
outstandingEndReq[threadId] = &cbTrans;
|
||||
outstandingEndReqOnThread[thread] = &cbTrans;
|
||||
}
|
||||
else if (cbPhase == END_REQ) // from memory controller
|
||||
{
|
||||
lastEndReq[channelId] = sc_time_stamp();
|
||||
lastEndReqOnChannel[channel] = sc_time_stamp();
|
||||
|
||||
if (!pendingRequests[channelId].empty())
|
||||
if (!pendingRequestsOnChannel[channel].empty())
|
||||
{
|
||||
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
|
||||
pendingRequests[channelId].pop();
|
||||
tlm_generic_payload &tPayload = *pendingRequestsOnChannel[channel].front();
|
||||
pendingRequestsOnChannel[channel].pop();
|
||||
tlm_phase tPhase = BEGIN_REQ;
|
||||
sc_time tDelay = tCK;
|
||||
|
||||
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
iSocket[static_cast<int>(channel)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
}
|
||||
else
|
||||
channelIsBusy[channelId] = false;
|
||||
channelIsBusy[channel] = false;
|
||||
}
|
||||
else if (cbPhase == BEGIN_RESP) // from memory controller
|
||||
{
|
||||
@@ -317,78 +317,78 @@ void ArbiterFifo::peqCallback(tlm_generic_payload& cbTrans, const tlm_phase& cbP
|
||||
tlm_phase tPhase = END_RESP;
|
||||
sc_time tDelay = SC_ZERO_TIME;
|
||||
|
||||
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbTrans, tPhase, tDelay);
|
||||
iSocket[static_cast<int>(channel)]->nb_transport_fw(cbTrans, tPhase, tDelay);
|
||||
}
|
||||
|
||||
payloadEventQueue.notify(cbTrans, RESP_ARBITRATION, arbitrationDelayBw);
|
||||
}
|
||||
else if (cbPhase == END_RESP) // from initiator
|
||||
{
|
||||
lastEndResp[threadId] = sc_time_stamp();
|
||||
lastEndRespOnThread[thread] = sc_time_stamp();
|
||||
cbTrans.release();
|
||||
|
||||
if (outstandingEndReq[threadId] != nullptr)
|
||||
if (outstandingEndReqOnThread[thread] != nullptr)
|
||||
{
|
||||
tlm_generic_payload &tPayload = *outstandingEndReq[threadId];
|
||||
outstandingEndReq[threadId] = nullptr;
|
||||
tlm_generic_payload &tPayload = *outstandingEndReqOnThread[thread];
|
||||
outstandingEndReqOnThread[thread] = nullptr;
|
||||
tlm_phase tPhase = END_REQ;
|
||||
sc_time tDelay = SC_ZERO_TIME;
|
||||
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(tPayload, nextThreadPayloadIDToAppend[threadId]++,
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(tPayload, nextThreadPayloadIDToAppend[thread]++,
|
||||
sc_time_stamp());
|
||||
|
||||
tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
tSocket[static_cast<int>(thread)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
|
||||
payloadEventQueue.notify(tPayload, REQ_ARBITRATION, arbitrationDelayFw);
|
||||
}
|
||||
else
|
||||
activeTransactions[threadId]--;
|
||||
activeTransactionsOnThread[thread]--;
|
||||
|
||||
if (!pendingResponses[threadId].empty())
|
||||
if (!pendingResponsesOnThread[thread].empty())
|
||||
{
|
||||
tlm_generic_payload &tPayload = *pendingResponses[threadId].front();
|
||||
pendingResponses[threadId].pop();
|
||||
tlm_generic_payload &tPayload = *pendingResponsesOnThread[thread].front();
|
||||
pendingResponsesOnThread[thread].pop();
|
||||
tlm_phase tPhase = BEGIN_RESP;
|
||||
sc_time tDelay = tCK;
|
||||
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(thread)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
// Early completion from initiator
|
||||
if (returnValue == TLM_UPDATED)
|
||||
payloadEventQueue.notify(tPayload, tPhase, tDelay);
|
||||
}
|
||||
else
|
||||
threadIsBusy[threadId] = false;
|
||||
threadIsBusy[thread] = false;
|
||||
}
|
||||
else if (cbPhase == REQ_ARBITRATION)
|
||||
{
|
||||
pendingRequests[channelId].push(&cbTrans);
|
||||
pendingRequestsOnChannel[channel].push(&cbTrans);
|
||||
|
||||
if (!channelIsBusy[channelId])
|
||||
if (!channelIsBusy[channel])
|
||||
{
|
||||
channelIsBusy[channelId] = true;
|
||||
channelIsBusy[channel] = true;
|
||||
|
||||
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
|
||||
pendingRequests[channelId].pop();
|
||||
tlm_generic_payload &tPayload = *pendingRequestsOnChannel[channel].front();
|
||||
pendingRequestsOnChannel[channel].pop();
|
||||
tlm_phase tPhase = BEGIN_REQ;
|
||||
sc_time tDelay = lastEndReq[channelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
|
||||
sc_time tDelay = lastEndReqOnChannel[channel] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
|
||||
|
||||
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
iSocket[static_cast<int>(channel)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
}
|
||||
}
|
||||
else if (cbPhase == RESP_ARBITRATION)
|
||||
{
|
||||
pendingResponses[threadId].push(&cbTrans);
|
||||
pendingResponsesOnThread[thread].push(&cbTrans);
|
||||
|
||||
if (!threadIsBusy[threadId])
|
||||
if (!threadIsBusy[thread])
|
||||
{
|
||||
threadIsBusy[threadId] = true;
|
||||
threadIsBusy[thread] = true;
|
||||
|
||||
tlm_generic_payload &tPayload = *pendingResponses[threadId].front();
|
||||
pendingResponses[threadId].pop();
|
||||
tlm_generic_payload &tPayload = *pendingResponsesOnThread[thread].front();
|
||||
pendingResponsesOnThread[thread].pop();
|
||||
tlm_phase tPhase = BEGIN_RESP;
|
||||
sc_time tDelay = lastEndResp[threadId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
|
||||
sc_time tDelay = lastEndRespOnThread[thread] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
|
||||
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(thread)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
// Early completion from initiator
|
||||
if (returnValue == TLM_UPDATED)
|
||||
payloadEventQueue.notify(tPayload, tPhase, tDelay);
|
||||
@@ -400,43 +400,43 @@ void ArbiterFifo::peqCallback(tlm_generic_payload& cbTrans, const tlm_phase& cbP
|
||||
|
||||
void ArbiterReorder::peqCallback(tlm_generic_payload& cbTrans, const tlm_phase& cbPhase)
|
||||
{
|
||||
unsigned int threadId = ArbiterExtension::getThread(cbTrans).ID();
|
||||
unsigned int channelId = ArbiterExtension::getChannel(cbTrans).ID();
|
||||
Thread thread = ArbiterExtension::getThread(cbTrans);
|
||||
Channel channel = ArbiterExtension::getChannel(cbTrans);
|
||||
|
||||
if (cbPhase == BEGIN_REQ) // from initiator
|
||||
{
|
||||
if (activeTransactions[threadId] < maxActiveTransactions)
|
||||
if (activeTransactionsOnThread[thread] < maxActiveTransactions)
|
||||
{
|
||||
activeTransactions[threadId]++;
|
||||
activeTransactionsOnThread[thread]++;
|
||||
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(cbTrans, nextThreadPayloadIDToAppend[threadId]++,
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(cbTrans, nextThreadPayloadIDToAppend[thread]++,
|
||||
sc_time_stamp());
|
||||
|
||||
tlm_phase tPhase = END_REQ;
|
||||
sc_time tDelay = SC_ZERO_TIME;
|
||||
|
||||
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbTrans, tPhase, tDelay);
|
||||
tSocket[static_cast<int>(thread)]->nb_transport_bw(cbTrans, tPhase, tDelay);
|
||||
|
||||
payloadEventQueue.notify(cbTrans, REQ_ARBITRATION, arbitrationDelayFw);
|
||||
}
|
||||
else
|
||||
outstandingEndReq[threadId] = &cbTrans;
|
||||
outstandingEndReqOnThread[thread] = &cbTrans;
|
||||
}
|
||||
else if (cbPhase == END_REQ) // from memory controller
|
||||
{
|
||||
lastEndReq[channelId] = sc_time_stamp();
|
||||
lastEndReqOnChannel[channel] = sc_time_stamp();
|
||||
|
||||
if (!pendingRequests[channelId].empty())
|
||||
if (!pendingRequestsOnChannel[channel].empty())
|
||||
{
|
||||
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
|
||||
pendingRequests[channelId].pop();
|
||||
tlm_generic_payload &tPayload = *pendingRequestsOnChannel[channel].front();
|
||||
pendingRequestsOnChannel[channel].pop();
|
||||
tlm_phase tPhase = BEGIN_REQ;
|
||||
sc_time tDelay = tCK;
|
||||
|
||||
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
iSocket[static_cast<int>(channel)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
}
|
||||
else
|
||||
channelIsBusy[channelId] = false;
|
||||
channelIsBusy[channel] = false;
|
||||
}
|
||||
else if (cbPhase == BEGIN_RESP) // from memory controller
|
||||
{
|
||||
@@ -444,86 +444,86 @@ void ArbiterReorder::peqCallback(tlm_generic_payload& cbTrans, const tlm_phase&
|
||||
{
|
||||
tlm_phase tPhase = END_RESP;
|
||||
sc_time tDelay = SC_ZERO_TIME;
|
||||
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbTrans, tPhase, tDelay);
|
||||
iSocket[static_cast<int>(channel)]->nb_transport_fw(cbTrans, tPhase, tDelay);
|
||||
}
|
||||
|
||||
payloadEventQueue.notify(cbTrans, RESP_ARBITRATION, arbitrationDelayBw);
|
||||
}
|
||||
else if (cbPhase == END_RESP) // from initiator
|
||||
{
|
||||
lastEndResp[threadId] = sc_time_stamp();
|
||||
lastEndRespOnThread[thread] = sc_time_stamp();
|
||||
cbTrans.release();
|
||||
|
||||
if (outstandingEndReq[threadId] != nullptr)
|
||||
if (outstandingEndReqOnThread[thread] != nullptr)
|
||||
{
|
||||
tlm_generic_payload &tPayload = *outstandingEndReq[threadId];
|
||||
outstandingEndReq[threadId] = nullptr;
|
||||
tlm_generic_payload &tPayload = *outstandingEndReqOnThread[thread];
|
||||
outstandingEndReqOnThread[thread] = nullptr;
|
||||
tlm_phase tPhase = END_REQ;
|
||||
sc_time tDelay = SC_ZERO_TIME;
|
||||
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(tPayload, nextThreadPayloadIDToAppend[threadId]++,
|
||||
ArbiterExtension::setIDAndTimeOfGeneration(tPayload, nextThreadPayloadIDToAppend[thread]++,
|
||||
sc_time_stamp());
|
||||
|
||||
tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
tSocket[static_cast<int>(thread)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
|
||||
payloadEventQueue.notify(tPayload, REQ_ARBITRATION, arbitrationDelayFw);
|
||||
}
|
||||
else
|
||||
activeTransactions[threadId]--;
|
||||
activeTransactionsOnThread[thread]--;
|
||||
|
||||
tlm_generic_payload &tPayload = **pendingResponses[threadId].begin();
|
||||
tlm_generic_payload &tPayload = **pendingResponsesOnThread[thread].begin();
|
||||
|
||||
if (!pendingResponses[threadId].empty() &&
|
||||
ArbiterExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
|
||||
if (!pendingResponsesOnThread[thread].empty() &&
|
||||
ArbiterExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[thread])
|
||||
{
|
||||
nextThreadPayloadIDToReturn[threadId]++;
|
||||
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
|
||||
nextThreadPayloadIDToReturn[thread]++;
|
||||
pendingResponsesOnThread[thread].erase(pendingResponsesOnThread[thread].begin());
|
||||
|
||||
tlm_phase tPhase = BEGIN_RESP;
|
||||
sc_time tDelay = tCK;
|
||||
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(thread)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
// Early completion from initiator
|
||||
if (returnValue == TLM_UPDATED)
|
||||
payloadEventQueue.notify(tPayload, tPhase, tDelay);
|
||||
}
|
||||
else
|
||||
threadIsBusy[threadId] = false;
|
||||
threadIsBusy[thread] = false;
|
||||
}
|
||||
else if (cbPhase == REQ_ARBITRATION)
|
||||
{
|
||||
pendingRequests[channelId].push(&cbTrans);
|
||||
pendingRequestsOnChannel[channel].push(&cbTrans);
|
||||
|
||||
if (!channelIsBusy[channelId])
|
||||
if (!channelIsBusy[channel])
|
||||
{
|
||||
channelIsBusy[channelId] = true;
|
||||
channelIsBusy[channel] = true;
|
||||
|
||||
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
|
||||
pendingRequests[channelId].pop();
|
||||
tlm_generic_payload &tPayload = *pendingRequestsOnChannel[channel].front();
|
||||
pendingRequestsOnChannel[channel].pop();
|
||||
tlm_phase tPhase = BEGIN_REQ;
|
||||
sc_time tDelay = lastEndReq[channelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
|
||||
sc_time tDelay = lastEndReqOnChannel[channel] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
|
||||
|
||||
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
iSocket[static_cast<int>(channel)]->nb_transport_fw(tPayload, tPhase, tDelay);
|
||||
}
|
||||
}
|
||||
else if (cbPhase == RESP_ARBITRATION)
|
||||
{
|
||||
pendingResponses[threadId].insert(&cbTrans);
|
||||
pendingResponsesOnThread[thread].insert(&cbTrans);
|
||||
|
||||
if (!threadIsBusy[threadId])
|
||||
if (!threadIsBusy[thread])
|
||||
{
|
||||
tlm_generic_payload &tPayload = **pendingResponses[threadId].begin();
|
||||
tlm_generic_payload &tPayload = **pendingResponsesOnThread[thread].begin();
|
||||
|
||||
if (ArbiterExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
|
||||
if (ArbiterExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[thread])
|
||||
{
|
||||
threadIsBusy[threadId] = true;
|
||||
threadIsBusy[thread] = true;
|
||||
|
||||
nextThreadPayloadIDToReturn[threadId]++;
|
||||
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
|
||||
nextThreadPayloadIDToReturn[thread]++;
|
||||
pendingResponsesOnThread[thread].erase(pendingResponsesOnThread[thread].begin());
|
||||
tlm_phase tPhase = BEGIN_RESP;
|
||||
sc_time tDelay = lastEndResp[threadId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
|
||||
sc_time tDelay = lastEndRespOnThread[thread] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
|
||||
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
tlm_sync_enum returnValue = tSocket[static_cast<int>(thread)]->nb_transport_bw(tPayload, tPhase, tDelay);
|
||||
// Early completion from initiator
|
||||
if (returnValue == TLM_UPDATED)
|
||||
payloadEventQueue.notify(tPayload, tPhase, tDelay);
|
||||
|
||||
@@ -77,13 +77,13 @@ protected:
|
||||
tlm_utils::peq_with_cb_and_phase<Arbiter> payloadEventQueue;
|
||||
virtual void peqCallback(tlm::tlm_generic_payload& payload, const tlm::tlm_phase& phase) = 0;
|
||||
|
||||
std::vector<bool> threadIsBusy;
|
||||
std::vector<bool> channelIsBusy;
|
||||
ControllerVector<Thread, bool> threadIsBusy;
|
||||
ControllerVector<Channel, bool> channelIsBusy;
|
||||
|
||||
std::vector<std::queue<tlm::tlm_generic_payload*>> pendingRequests;
|
||||
ControllerVector<Channel, std::queue<tlm::tlm_generic_payload*>> pendingRequestsOnChannel;
|
||||
|
||||
std::vector<uint64_t> nextThreadPayloadIDToAppend;
|
||||
std::vector<uint64_t> nextChannelPayloadIDToAppend;
|
||||
ControllerVector<Thread, std::uint64_t> nextThreadPayloadIDToAppend;
|
||||
ControllerVector<Channel, std::uint64_t> nextChannelPayloadIDToAppend;
|
||||
|
||||
tlm::tlm_sync_enum nb_transport_fw(int id, tlm::tlm_generic_payload& trans,
|
||||
tlm::tlm_phase& phase, sc_core::sc_time& fwDelay);
|
||||
@@ -111,7 +111,7 @@ private:
|
||||
void end_of_elaboration() override;
|
||||
void peqCallback(tlm::tlm_generic_payload& cbTrans, const tlm::tlm_phase& phase) override;
|
||||
|
||||
std::vector<std::queue<tlm::tlm_generic_payload*>> pendingResponses;
|
||||
ControllerVector<Thread, std::queue<tlm::tlm_generic_payload*>> pendingResponsesOnThread;
|
||||
};
|
||||
|
||||
class ArbiterFifo final : public Arbiter
|
||||
@@ -125,14 +125,14 @@ private:
|
||||
void end_of_elaboration() override;
|
||||
void peqCallback(tlm::tlm_generic_payload& cbTrans, const tlm::tlm_phase& phase) override;
|
||||
|
||||
std::vector<unsigned int> activeTransactions;
|
||||
const unsigned maxActiveTransactions;
|
||||
ControllerVector<Thread, unsigned int> activeTransactionsOnThread;
|
||||
const unsigned maxActiveTransactionsPerThread;
|
||||
|
||||
std::vector<tlm::tlm_generic_payload*> outstandingEndReq;
|
||||
std::vector<std::queue<tlm::tlm_generic_payload*>> pendingResponses;
|
||||
ControllerVector<Thread, tlm::tlm_generic_payload*> outstandingEndReqOnThread;
|
||||
ControllerVector<Thread, std::queue<tlm::tlm_generic_payload*>> pendingResponsesOnThread;
|
||||
|
||||
std::vector<sc_core::sc_time> lastEndReq;
|
||||
std::vector<sc_core::sc_time> lastEndResp;
|
||||
ControllerVector<Channel, sc_core::sc_time> lastEndReqOnChannel;
|
||||
ControllerVector<Thread, sc_core::sc_time> lastEndRespOnThread;
|
||||
};
|
||||
|
||||
class ArbiterReorder final : public Arbiter
|
||||
@@ -146,7 +146,7 @@ private:
|
||||
void end_of_elaboration() override;
|
||||
void peqCallback(tlm::tlm_generic_payload& cbTrans, const tlm::tlm_phase& phase) override;
|
||||
|
||||
std::vector<unsigned int> activeTransactions;
|
||||
ControllerVector<Thread, unsigned int> activeTransactionsOnThread;
|
||||
const unsigned maxActiveTransactions;
|
||||
|
||||
struct ThreadPayloadIDCompare
|
||||
@@ -157,13 +157,13 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<tlm::tlm_generic_payload*> outstandingEndReq;
|
||||
std::vector<std::set<tlm::tlm_generic_payload*, ThreadPayloadIDCompare>> pendingResponses;
|
||||
ControllerVector<Thread, tlm::tlm_generic_payload*> outstandingEndReqOnThread;
|
||||
ControllerVector<Thread, std::set<tlm::tlm_generic_payload*, ThreadPayloadIDCompare>> pendingResponsesOnThread;
|
||||
|
||||
std::vector<sc_core::sc_time> lastEndReq;
|
||||
std::vector<sc_core::sc_time> lastEndResp;
|
||||
ControllerVector<Channel, sc_core::sc_time> lastEndReqOnChannel;
|
||||
ControllerVector<Thread, sc_core::sc_time> lastEndRespOnThread;
|
||||
|
||||
std::vector<uint64_t> nextThreadPayloadIDToReturn;
|
||||
ControllerVector<Thread, std::uint64_t> nextThreadPayloadIDToReturn;
|
||||
};
|
||||
|
||||
} // namespace DRAMSys
|
||||
|
||||
@@ -133,7 +133,7 @@ tlm_sync_enum Dram::nb_transport_fw(tlm_generic_payload& trans, tlm_phase& phase
|
||||
#ifdef DRAMPOWER
|
||||
if (powerAnalysis)
|
||||
{
|
||||
int bank = static_cast<int>(ControllerExtension::getBank(trans).ID());
|
||||
int bank = static_cast<int>(ControllerExtension::getBank(trans));
|
||||
int64_t cycle = std::lround((sc_time_stamp() + delay) / memSpec.tCK);
|
||||
DRAMPower->doCommand(phaseToDRAMPowerCommand(phase), bank, cycle);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user