Removed RowBufferState in ControllerState.

This commit is contained in:
Lukas Steiner
2019-07-28 20:57:48 +02:00
parent e0743b71d6
commit a05b0ed610
8 changed files with 489 additions and 522 deletions

View File

@@ -104,45 +104,26 @@ void ControllerState::change(const ScheduledCommand &scheduledCommand)
lastScheduledByCommandAndBank[scheduledCommand.getCommand()][scheduledCommand.getBank()]
= scheduledCommand;
switch (scheduledCommand.getCommand()) {
switch (scheduledCommand.getCommand())
{
case Command::RD:
lastDataStrobeCommands.emplace_back(scheduledCommand);
break;
case Command::RDA:
rowBufferStates->closeRowBuffer(scheduledCommand.getBank());
lastDataStrobeCommands.emplace_back(scheduledCommand);
break;
case Command::WR:
lastDataStrobeCommands.emplace_back(scheduledCommand);
break;
case Command::WRA:
rowBufferStates->closeRowBuffer(scheduledCommand.getBank());
lastDataStrobeCommands.emplace_back(scheduledCommand);
break;
case Command::REFA:
break;
case Command::ACTB:
rowBufferStates->openRowInRowBuffer(scheduledCommand.getBank(),
scheduledCommand.getRow());
lastActivatesB.emplace(scheduledCommand.getStart(), scheduledCommand);
break;
case Command::ACT:
rowBufferStates->openRowInRowBuffer(scheduledCommand.getBank(),
scheduledCommand.getRow());
lastActivates.emplace(scheduledCommand.getStart(), scheduledCommand);
break;
case Command::PREB:
rowBufferStates->closeRowBuffer(scheduledCommand.getBank());
break;
case Command::PRE:
rowBufferStates->closeRowBuffer(scheduledCommand.getBank());
break;
case Command::PREA:
rowBufferStates->closeAllRowBuffers();
break;
case Command::SREFEN:
rowBufferStates->closeRowBuffer(scheduledCommand.getBank());
break;
default:
break;
}

View File

@@ -38,7 +38,6 @@
#define CONTROLLERSTATE_H
#include <systemc.h>
#include "RowBufferStates.h"
#include "core/Slots.h"
#include "core/configuration/Configuration.h"
#include <map>
@@ -50,15 +49,7 @@ class ControllerState
public:
ControllerState(std::string ownerName, Configuration *config)
: bus(config->memSpec->clk), ownerName(ownerName),
config(config)
{
rowBufferStates = new RowBufferState(ownerName);
}
virtual ~ControllerState()
{
delete rowBufferStates;
}
config(config) {}
const ScheduledCommand getLastCommandOnBank(Command command, Bank bank);
const ScheduledCommand getLastCommand(Command command);
@@ -68,8 +59,6 @@ public:
void change(const ScheduledCommand &scheduledCommand);
void cleanUp(sc_time time);
RowBufferState *rowBufferStates;
//used by the various checkers
std::map<Command, std::map<Bank, ScheduledCommand> >
lastScheduledByCommandAndBank;

View File

@@ -46,7 +46,7 @@
#include "powerdown/PowerDownManager.h"
#include "refresh/IRefreshManager.h"
#include "scheduling/checker/ICommandChecker.h"
#include "../RowBufferStates.h"
//#include "../RowBufferStates.h"
#include "../ControllerState.h"
using namespace std;
@@ -63,10 +63,7 @@ public:
const std::vector<Bank> &getBanks();
std::vector<Bank> getFreeBanks();
const RowBufferState &getRowBufferStates()
{
return *(state->rowBufferStates);
}
bool hasPendingRequests();
bool hasPendingRequests(Bank bank);
bool bankIsBusy(Bank bank);

View File

@@ -35,88 +35,88 @@
* Matthias Jung
*/
#include "FrFcfs.h"
#include "../../common/dramExtensions.h"
#include "../core/configuration/Configuration.h"
#include <algorithm>
//#include "FrFcfs.h"
//#include "../../common/dramExtensions.h"
//#include "../core/configuration/Configuration.h"
//#include <algorithm>
using namespace std;
//using namespace std;
// The FrFcfs (First Ready First Come First Served) is descibed in a 2000 paper from Rixner et al.:
// Memory Access Scheduling
//
// The FrFcfs scheduler features for each bank in the DRAM a specific
// scheduling buffer for example:
//
// Bank0: OOOOOOOO
// Bank1: OOXXXXXX
// ... ^ ^
// ... | |
// ... back |
// ... front
// ...
// Bank6: OOOOO0XX
// Bank7: XXXXXXXX
//// The FrFcfs (First Ready First Come First Served) is descibed in a 2000 paper from Rixner et al.:
//// Memory Access Scheduling
////
//// The FrFcfs scheduler features for each bank in the DRAM a specific
//// scheduling buffer for example:
////
//// Bank0: OOOOOOOO
//// Bank1: OOXXXXXX
//// ... ^ ^
//// ... | |
//// ... back |
//// ... front
//// ...
//// Bank6: OOOOO0XX
//// Bank7: XXXXXXXX
void FrFcfs::storeRequest(gp *payload)
{
// FIXME: Question: what if the buffer is full? IMHO the schedule function
// should provide a true or false when the placement into the buffer worked
// out or not (?).
buffer[DramExtension::getExtension(payload).getBank()]
.emplace_back(payload);
}
//void FrFcfs::storeRequest(gp *payload)
//{
// // FIXME: Question: what if the buffer is full? IMHO the schedule function
// // should provide a true or false when the placement into the buffer worked
// // out or not (?).
// buffer[DramExtension::getExtension(payload).getBank()]
// .emplace_back(payload);
//}
std::pair<Command, gp *> FrFcfs::getNextRequest(Bank bank)
{
// If the bank is empty like Bank0 in the example we do nothing
if (buffer[bank].empty())
return std::pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
//std::pair<Command, gp *> FrFcfs::getNextRequest(Bank bank)
//{
// // If the bank is empty like Bank0 in the example we do nothing
// if (buffer[bank].empty())
// return std::pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
// In FrFcfs row hits have always the highest priority, therefore we search
// for row hits. If we find a row hit, we remove the transaction from the
// queue and send it to the DRAM.
std::deque<gp *>::iterator it = findRowHit(bank);
if (it != buffer[bank].end())
{
gp *payload = *it;
buffer[bank].erase(it);
return std::pair<Command, gp *>(getReadWriteCommand(payload), payload);
}
// // In FrFcfs row hits have always the highest priority, therefore we search
// // for row hits. If we find a row hit, we remove the transaction from the
// // queue and send it to the DRAM.
// std::deque<gp *>::iterator it = findRowHit(bank);
// if (it != buffer[bank].end())
// {
// gp *payload = *it;
// buffer[bank].erase(it);
// return std::pair<Command, gp *>(getReadWriteCommand(payload), payload);
// }
// If there is no row hit, the FrFcfs takes always the oldest transaction
// in the buffer, i.e. the transaction in the front.
return std::pair<Command, gp *>(getNextCommand(buffer[bank].front()),
buffer[bank].front());
}
// // If there is no row hit, the FrFcfs takes always the oldest transaction
// // in the buffer, i.e. the transaction in the front.
// return std::pair<Command, gp *>(getNextCommand(buffer[bank].front()),
// buffer[bank].front());
//}
// This function searches for a row hit in the scheduling queue of the specific
// bank. If no row hit is found the end of the queue is returned.
//
// Note: end() Returns an iterator referring to the past-the-end element in the
// deque container. The past-the-end element is the theoretical element that
// would follow the last element in the deque container. It does not point to
// any element, and thus shall not be dereferenced.
std::deque<gp *>::iterator FrFcfs::findRowHit(Bank bank)
{
std::deque<gp *> &queue = buffer[bank];
Row activeRow = controllerCore.getRowBufferStates().getRowInRowBuffer(bank);
//// This function searches for a row hit in the scheduling queue of the specific
//// bank. If no row hit is found the end of the queue is returned.
////
//// Note: end() Returns an iterator referring to the past-the-end element in the
//// deque container. The past-the-end element is the theoretical element that
//// would follow the last element in the deque container. It does not point to
//// any element, and thus shall not be dereferenced.
//std::deque<gp *>::iterator FrFcfs::findRowHit(Bank bank)
//{
// std::deque<gp *> &queue = buffer[bank];
// Row activeRow = controllerCore.getRowBufferStates().getRowInRowBuffer(bank);
if (!controllerCore.getRowBufferStates().rowBufferIsOpen(bank))
return queue.end();
// if (!controllerCore.getRowBufferStates().rowBufferIsOpen(bank))
// return queue.end();
// Traverse the scheduling queue of the specific bank:
for (auto it = queue.begin(); it != queue.end(); it++)
{
//Found row-hit and return the according iterator
if (DramExtension::getRow(*it) == activeRow)
return it;
}
// // Traverse the scheduling queue of the specific bank:
// for (auto it = queue.begin(); it != queue.end(); it++)
// {
// //Found row-hit and return the according iterator
// if (DramExtension::getRow(*it) == activeRow)
// return it;
// }
return queue.end();
}
// return queue.end();
//}
gp *FrFcfs::getPendingRequest(Bank /*bank*/)
{
return NULL;
}
//gp *FrFcfs::getPendingRequest(Bank /*bank*/)
//{
// return NULL;
//}

View File

@@ -33,162 +33,162 @@
* Matthias Jung
*/
#include "FrFcfsGrp.h"
//#include "FrFcfsGrp.h"
// The FrFcfsGrp (First Ready First Come First Served Grouper) works exactly
// like the FrFcfsRp (First Ready First Come First Served Read Priority).
// However writes are grouped! For detailed documentation look into the FrFcfs.
// TODO: what is missed is a check if the buffers are full. This will only work
// if we have buffers with a fixed size (Prado's future patch).
//// The FrFcfsGrp (First Ready First Come First Served Grouper) works exactly
//// like the FrFcfsRp (First Ready First Come First Served Read Priority).
//// However writes are grouped! For detailed documentation look into the FrFcfs.
//// TODO: what is missed is a check if the buffers are full. This will only work
//// if we have buffers with a fixed size (Prado's future patch).
std::pair<Command, gp *> FrFcfsGrp::getNextRequest(Bank bank)
{
// If the bank is empty we do nothing:
if (buffer[bank].empty()) {
return pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
}
//std::pair<Command, gp *> FrFcfsGrp::getNextRequest(Bank bank)
//{
// // If the bank is empty we do nothing:
// if (buffer[bank].empty()) {
// return pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
// }
// If we are in write mode we should check if we should switch to read mode
// because there are no writes anymore in the buffer.
if (readMode == false) {
if (getNumberOfRequest(tlm::TLM_WRITE_COMMAND) == 0) {
readMode = true;
}
} else { // If we are in read mode but all reads are served we switch to write
if (getNumberOfRequest(tlm::TLM_READ_COMMAND) == 0) {
readMode = false;
}
}
// // If we are in write mode we should check if we should switch to read mode
// // because there are no writes anymore in the buffer.
// if (readMode == false) {
// if (getNumberOfRequest(tlm::TLM_WRITE_COMMAND) == 0) {
// readMode = true;
// }
// } else { // If we are in read mode but all reads are served we switch to write
// if (getNumberOfRequest(tlm::TLM_READ_COMMAND) == 0) {
// readMode = false;
// }
// }
// Now lets search for read and write commands. However keep in mind that
// readMode is a shared variable for all the banks!
if (readMode == true) {
// 1. Seach for read hit:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *read = *it;
// // Now lets search for read and write commands. However keep in mind that
// // readMode is a shared variable for all the banks!
// if (readMode == true) {
// // 1. Seach for read hit:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *read = *it;
if (read->get_command() == tlm::TLM_READ_COMMAND) {
// If there is a row hit:
if (DramExtension::getRow(read)
== controllerCore.getRowBufferStates()
.getRowInRowBuffer(bank)) {
if (hazardDetection(bank, it) == false) {
buffer[bank].erase(it);
printDebugMessage("Read Hit found");
return pair<Command, gp *>(getReadWriteCommand(read),
read);
} else {
// If there was a hazard, switch the mode and try again:
readMode = false;
return getNextRequest(bank);
}
}
}
}
// if (read->get_command() == tlm::TLM_READ_COMMAND) {
// // If there is a row hit:
// if (DramExtension::getRow(read)
// == controllerCore.getRowBufferStates()
// .getRowInRowBuffer(bank)) {
// if (hazardDetection(bank, it) == false) {
// buffer[bank].erase(it);
// printDebugMessage("Read Hit found");
// return pair<Command, gp *>(getReadWriteCommand(read),
// read);
// } else {
// // If there was a hazard, switch the mode and try again:
// readMode = false;
// return getNextRequest(bank);
// }
// }
// }
// }
// 2. Search for read miss:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *read = *it;
// // 2. Search for read miss:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *read = *it;
if (read->get_command() == tlm::TLM_READ_COMMAND) {
if (hazardDetection(bank, it) == false) {
printDebugMessage("Read miss found");
return pair<Command, gp *>(getNextCommand(read), read);
} else {
// If there was a hazard, switch the mode and try again:
readMode = false;
return getNextRequest(bank);
}
}
}
} else { // write mode:
// 3. Search for write hit:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *write = *it;
// if (read->get_command() == tlm::TLM_READ_COMMAND) {
// if (hazardDetection(bank, it) == false) {
// printDebugMessage("Read miss found");
// return pair<Command, gp *>(getNextCommand(read), read);
// } else {
// // If there was a hazard, switch the mode and try again:
// readMode = false;
// return getNextRequest(bank);
// }
// }
// }
// } else { // write mode:
// // 3. Search for write hit:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *write = *it;
if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// If there is a row hit:
if (DramExtension::getRow(write)
== controllerCore.getRowBufferStates()
.getRowInRowBuffer(bank)) {
buffer[bank].erase(it);
printDebugMessage("Write Hit found");
return pair<Command, gp *>(getReadWriteCommand(write),
write);
}
}
}
// if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// // If there is a row hit:
// if (DramExtension::getRow(write)
// == controllerCore.getRowBufferStates()
// .getRowInRowBuffer(bank)) {
// buffer[bank].erase(it);
// printDebugMessage("Write Hit found");
// return pair<Command, gp *>(getReadWriteCommand(write),
// write);
// }
// }
// }
// 4. Search for write miss:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *write = *it;
// // 4. Search for write miss:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *write = *it;
if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
printDebugMessage("Write miss found");
return pair<Command, gp *>(getNextCommand(write), write);
}
}
}
// if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// printDebugMessage("Write miss found");
// return pair<Command, gp *>(getNextCommand(write), write);
// }
// }
// }
// If nothing was found we check the other banks before we switch the mode:
pair<Command, gp *> other(Command::NOP, NULL);
unsigned int B = Configuration::getInstance().memSpec->NumberOfBanks;
// // If nothing was found we check the other banks before we switch the mode:
// pair<Command, gp *> other(Command::NOP, NULL);
// unsigned int B = Configuration::getInstance().memSpec->NumberOfBanks;
for (unsigned int i = 1; i < B; i++) {
Bank nextBank((bank.ID() + i) % B);
ctrl->scheduleNextFromScheduler(nextBank);
}
// for (unsigned int i = 1; i < B; i++) {
// Bank nextBank((bank.ID() + i) % B);
// ctrl->scheduleNextFromScheduler(nextBank);
// }
// If nothing was found in the current mode, switch the mode and try again:
// FIXME: this is in my opinion not so clever yet, because we switch maybe
// even though there are still reads/writes request on other banks ...
readMode = !readMode;
return getNextRequest(bank);
// // If nothing was found in the current mode, switch the mode and try again:
// // FIXME: this is in my opinion not so clever yet, because we switch maybe
// // even though there are still reads/writes request on other banks ...
// readMode = !readMode;
// return getNextRequest(bank);
reportFatal("FrFcfsGrp", "Never should go here ...");
}
// reportFatal("FrFcfsGrp", "Never should go here ...");
//}
// There is a hazard if a read is found which will be scheduled before a write
// to the same column and the same row of the same bank:
bool FrFcfsGrp::hazardDetection(Bank bank, std::deque<gp *>::iterator ext)
{
gp *read = *ext;
//// There is a hazard if a read is found which will be scheduled before a write
//// to the same column and the same row of the same bank:
//bool FrFcfsGrp::hazardDetection(Bank bank, std::deque<gp *>::iterator ext)
//{
// gp *read = *ext;
//for(unsigned long i=0; i < id; i++)
for (auto it = buffer[bank].begin(); it != ext; it++) {
gp *write = *it;
if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
if ((DramExtension::getExtension(read).getColumn()
== DramExtension::getExtension(write).getColumn())
&& (DramExtension::getExtension(read).getRow()
== DramExtension::getExtension(write).getRow())) {
printDebugMessage("Hazard Detected");
return true;
}
}
}
return false;
}
// //for(unsigned long i=0; i < id; i++)
// for (auto it = buffer[bank].begin(); it != ext; it++) {
// gp *write = *it;
// if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// if ((DramExtension::getExtension(read).getColumn()
// == DramExtension::getExtension(write).getColumn())
// && (DramExtension::getExtension(read).getRow()
// == DramExtension::getExtension(write).getRow())) {
// printDebugMessage("Hazard Detected");
// return true;
// }
// }
// }
// return false;
//}
// Estimate the number of writes/reads in all bank buffers:
unsigned int FrFcfsGrp::getNumberOfRequest(tlm::tlm_command cmd)
{
unsigned int numberOfRequests = 0;
for (unsigned int i = 0;
i < Configuration::getInstance().memSpec->NumberOfBanks;
i++) {
for (auto it = buffer[i].begin(); it != buffer[i].end(); it++) {
gp *trans = *it;
if (trans->get_command() == cmd) {
numberOfRequests++;
}
}
}
//// Estimate the number of writes/reads in all bank buffers:
//unsigned int FrFcfsGrp::getNumberOfRequest(tlm::tlm_command cmd)
//{
// unsigned int numberOfRequests = 0;
// for (unsigned int i = 0;
// i < Configuration::getInstance().memSpec->NumberOfBanks;
// i++) {
// for (auto it = buffer[i].begin(); it != buffer[i].end(); it++) {
// gp *trans = *it;
// if (trans->get_command() == cmd) {
// numberOfRequests++;
// }
// }
// }
return numberOfRequests;
}
// return numberOfRequests;
//}
void FrFcfsGrp::printDebugMessage(std::string message)
{
DebugManager::getInstance().printDebugMessage("FrFcfsGrp", message);
}
//void FrFcfsGrp::printDebugMessage(std::string message)
//{
// DebugManager::getInstance().printDebugMessage("FrFcfsGrp", message);
//}

View File

@@ -33,108 +33,108 @@
* Matthias Jung
*/
#include "FrFcfsRp.h"
//#include "FrFcfsRp.h"
// The FrFcfsRp (First Ready First Come First Served Read Priority) works
// exactly like the FrFcfs but reads are prioratized over writes.
// For detailed documentation look into the FrFcfs.
//// The FrFcfsRp (First Ready First Come First Served Read Priority) works
//// exactly like the FrFcfs but reads are prioratized over writes.
//// For detailed documentation look into the FrFcfs.
std::pair<Command, gp *> FrFcfsRp::getNextRequest(Bank bank)
{
// If the bank is empty like Bank0 in the example we do nothing:
if (buffer[bank].empty()) {
return pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
}
//std::pair<Command, gp *> FrFcfsRp::getNextRequest(Bank bank)
//{
// // If the bank is empty like Bank0 in the example we do nothing:
// if (buffer[bank].empty()) {
// return pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
// }
// Order of Priority:
// 1. Read Hits (Hazard Check)
// 2. Write Hits
// 3. Read Miss (Hazard Check)
// 4. Write Miss
// // Order of Priority:
// // 1. Read Hits (Hazard Check)
// // 2. Write Hits
// // 3. Read Miss (Hazard Check)
// // 4. Write Miss
// 1. Seach for read hit:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *read = *it;
// // 1. Seach for read hit:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *read = *it;
if (read->get_command() == tlm::TLM_READ_COMMAND) {
// If there is a row hit:
if (DramExtension::getRow(read)
== controllerCore.getRowBufferStates().getRowInRowBuffer(bank)) {
if (hazardDetection(bank, it) == false) {
buffer[bank].erase(it);
printDebugMessage("Read Hit found");
return pair<Command, gp *>(getReadWriteCommand(read), read);
}
}
}
}
// if (read->get_command() == tlm::TLM_READ_COMMAND) {
// // If there is a row hit:
// if (DramExtension::getRow(read)
// == controllerCore.getRowBufferStates().getRowInRowBuffer(bank)) {
// if (hazardDetection(bank, it) == false) {
// buffer[bank].erase(it);
// printDebugMessage("Read Hit found");
// return pair<Command, gp *>(getReadWriteCommand(read), read);
// }
// }
// }
// }
// 2. Search for write hit:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *write = *it;
// // 2. Search for write hit:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *write = *it;
if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// If there is a row hit:
if (DramExtension::getRow(write)
== controllerCore.getRowBufferStates().getRowInRowBuffer(bank)) {
buffer[bank].erase(it);
printDebugMessage("Write Hit found");
return pair<Command, gp *>(getReadWriteCommand(write), write);
}
}
}
// if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// // If there is a row hit:
// if (DramExtension::getRow(write)
// == controllerCore.getRowBufferStates().getRowInRowBuffer(bank)) {
// buffer[bank].erase(it);
// printDebugMessage("Write Hit found");
// return pair<Command, gp *>(getReadWriteCommand(write), write);
// }
// }
// }
// For now return the oldest request but prefere also reads before writes:
// // For now return the oldest request but prefere also reads before writes:
// 3. Search for read miss:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *read = *it;
// // 3. Search for read miss:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *read = *it;
if (read->get_command() == tlm::TLM_READ_COMMAND) {
if (hazardDetection(bank, it) == false) {
printDebugMessage("Read miss found");
return pair<Command, gp *>(getNextCommand(read), read);
}
}
}
// if (read->get_command() == tlm::TLM_READ_COMMAND) {
// if (hazardDetection(bank, it) == false) {
// printDebugMessage("Read miss found");
// return pair<Command, gp *>(getNextCommand(read), read);
// }
// }
// }
// 3. Search for write miss:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *write = *it;
// // 3. Search for write miss:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *write = *it;
if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
printDebugMessage("Write miss found");
return pair<Command, gp *>(getNextCommand(write), write);
}
}
// if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// printDebugMessage("Write miss found");
// return pair<Command, gp *>(getNextCommand(write), write);
// }
// }
reportFatal("FrFcfsRp", "Never should go here ...");
return pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
}
// reportFatal("FrFcfsRp", "Never should go here ...");
// return pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
//}
// There is a hazard if a read is found which will be scheduled before a write
// to the same column and the same row of the same bank:
bool FrFcfsRp::hazardDetection(Bank bank, std::deque<gp *>::iterator ext)
{
gp *read = *ext;
//// There is a hazard if a read is found which will be scheduled before a write
//// to the same column and the same row of the same bank:
//bool FrFcfsRp::hazardDetection(Bank bank, std::deque<gp *>::iterator ext)
//{
// gp *read = *ext;
//for(unsigned long i=0; i < id; i++)
for (auto it = buffer[bank].begin(); it != ext; it++) {
gp *write = *it;
if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
if ((DramExtension::getExtension(read).getColumn()
== DramExtension::getExtension(write).getColumn())
&& (DramExtension::getExtension(read).getRow()
== DramExtension::getExtension(write).getRow())) {
printDebugMessage("Hazard Detected");
return true;
}
}
}
return false;
}
// //for(unsigned long i=0; i < id; i++)
// for (auto it = buffer[bank].begin(); it != ext; it++) {
// gp *write = *it;
// if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// if ((DramExtension::getExtension(read).getColumn()
// == DramExtension::getExtension(write).getColumn())
// && (DramExtension::getExtension(read).getRow()
// == DramExtension::getExtension(write).getRow())) {
// printDebugMessage("Hazard Detected");
// return true;
// }
// }
// }
// return false;
//}
void FrFcfsRp::printDebugMessage(std::string message)
{
DebugManager::getInstance().printDebugMessage("FrFcfsRp", message);
}
//void FrFcfsRp::printDebugMessage(std::string message)
//{
// DebugManager::getInstance().printDebugMessage("FrFcfsRp", message);
//}

View File

@@ -33,148 +33,148 @@
* Matthias Jung
*/
#include "Grp.h"
//#include "Grp.h"
// Grp (Grouper) just reorders w.r.t. read write grouping, however is not aware of the
// row buffer. For a row buffer aware grouper refer to FrFcfsGrp.
// TODO: what is missed is a check if the buffers are full. This will only work
// if we have buffers with a fixed size (Prado's future patch).
//// Grp (Grouper) just reorders w.r.t. read write grouping, however is not aware of the
//// row buffer. For a row buffer aware grouper refer to FrFcfsGrp.
//// TODO: what is missed is a check if the buffers are full. This will only work
//// if we have buffers with a fixed size (Prado's future patch).
std::pair<Command, gp *> Grp::getNextRequest(Bank bank)
{
// If the bank is empty we do nothing:
if (buffer[bank].empty()) {
return pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
}
//std::pair<Command, gp *> Grp::getNextRequest(Bank bank)
//{
// // If the bank is empty we do nothing:
// if (buffer[bank].empty()) {
// return pair<Command, tlm::tlm_generic_payload *>(Command::NOP, NULL);
// }
// If we are in write mode we should check if we should switch to read mode
// because there are no writes anymore in the buffer.
if (readMode == false) {
if (getNumberOfRequest(tlm::TLM_WRITE_COMMAND) == 0) {
readMode = true;
}
} else { // If we are in read mode but all reads are served we switch to write
if (getNumberOfRequest(tlm::TLM_READ_COMMAND) == 0) {
readMode = false;
}
}
// // If we are in write mode we should check if we should switch to read mode
// // because there are no writes anymore in the buffer.
// if (readMode == false) {
// if (getNumberOfRequest(tlm::TLM_WRITE_COMMAND) == 0) {
// readMode = true;
// }
// } else { // If we are in read mode but all reads are served we switch to write
// if (getNumberOfRequest(tlm::TLM_READ_COMMAND) == 0) {
// readMode = false;
// }
// }
// Now lets search for read and write commands. However keep in mind that
// readMode is a shared variable for all the banks!
if (readMode == true) {
// 1. Seach for read hit:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *read = *it;
// // Now lets search for read and write commands. However keep in mind that
// // readMode is a shared variable for all the banks!
// if (readMode == true) {
// // 1. Seach for read hit:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *read = *it;
if (read->get_command() == tlm::TLM_READ_COMMAND) {
// If there is a row hit:
if (DramExtension::getRow(read)
== controllerCore.getRowBufferStates()
.getRowInRowBuffer(bank)) {
if (hazardDetection(bank, it) == false) {
buffer[bank].erase(it);
printDebugMessage("Read Hit found");
return pair<Command, gp *>(getReadWriteCommand(read),
read);
} else {
// If there was a hazard, switch the mode and try again:
readMode = false;
return getNextRequest(bank);
}
} else { // if there is a row miss:
if (hazardDetection(bank, it) == false) {
printDebugMessage("Read miss found");
return pair<Command, gp *>(getNextCommand(read), read);
} else {
// If there was a hazard, switch the mode and try again:
readMode = false;
return getNextRequest(bank);
}
}
}
}
// if (read->get_command() == tlm::TLM_READ_COMMAND) {
// // If there is a row hit:
// if (DramExtension::getRow(read)
// == controllerCore.getRowBufferStates()
// .getRowInRowBuffer(bank)) {
// if (hazardDetection(bank, it) == false) {
// buffer[bank].erase(it);
// printDebugMessage("Read Hit found");
// return pair<Command, gp *>(getReadWriteCommand(read),
// read);
// } else {
// // If there was a hazard, switch the mode and try again:
// readMode = false;
// return getNextRequest(bank);
// }
// } else { // if there is a row miss:
// if (hazardDetection(bank, it) == false) {
// printDebugMessage("Read miss found");
// return pair<Command, gp *>(getNextCommand(read), read);
// } else {
// // If there was a hazard, switch the mode and try again:
// readMode = false;
// return getNextRequest(bank);
// }
// }
// }
// }
} else { // write mode:
// 3. Search for write hit:
for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
gp *write = *it;
// } else { // write mode:
// // 3. Search for write hit:
// for (auto it = buffer[bank].begin(); it != buffer[bank].end(); it++) {
// gp *write = *it;
if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// If there is a row hit:
if (DramExtension::getRow(write)
== controllerCore.getRowBufferStates()
.getRowInRowBuffer(bank)) {
buffer[bank].erase(it);
printDebugMessage("Write Hit found");
return pair<Command, gp *>(getReadWriteCommand(write),
write);
} else {
printDebugMessage("Write miss found");
return pair<Command, gp *>(getNextCommand(write), write);
}
}
}
}
// if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// // If there is a row hit:
// if (DramExtension::getRow(write)
// == controllerCore.getRowBufferStates()
// .getRowInRowBuffer(bank)) {
// buffer[bank].erase(it);
// printDebugMessage("Write Hit found");
// return pair<Command, gp *>(getReadWriteCommand(write),
// write);
// } else {
// printDebugMessage("Write miss found");
// return pair<Command, gp *>(getNextCommand(write), write);
// }
// }
// }
// }
// If nothing was found we check the other banks before we switch the mode:
pair<Command, gp *> other(Command::NOP, NULL);
unsigned int B = Configuration::getInstance().memSpec->NumberOfBanks;
// // If nothing was found we check the other banks before we switch the mode:
// pair<Command, gp *> other(Command::NOP, NULL);
// unsigned int B = Configuration::getInstance().memSpec->NumberOfBanks;
for (unsigned int i = 1; i < B; i++) {
Bank nextBank((bank.ID() + i) % B);
ctrl->scheduleNextFromScheduler(nextBank);
}
// for (unsigned int i = 1; i < B; i++) {
// Bank nextBank((bank.ID() + i) % B);
// ctrl->scheduleNextFromScheduler(nextBank);
// }
// If nothing was found in the current mode, switch the mode and try again:
// FIXME: this is in my opinion not so clever yet, because we switch maybe
// even though there are still reads/writes request on other banks ...
readMode = !readMode;
return getNextRequest(bank);
// // If nothing was found in the current mode, switch the mode and try again:
// // FIXME: this is in my opinion not so clever yet, because we switch maybe
// // even though there are still reads/writes request on other banks ...
// readMode = !readMode;
// return getNextRequest(bank);
reportFatal("Grp", "Never should go here ...");
}
// reportFatal("Grp", "Never should go here ...");
//}
// There is a hazard if a read is found which will be scheduled before a write
// to the same column and the same row of the same bank:
bool Grp::hazardDetection(Bank bank, std::deque<gp *>::iterator ext)
{
gp *read = *ext;
//// There is a hazard if a read is found which will be scheduled before a write
//// to the same column and the same row of the same bank:
//bool Grp::hazardDetection(Bank bank, std::deque<gp *>::iterator ext)
//{
// gp *read = *ext;
//for(unsigned long i=0; i < id; i++)
for (auto it = buffer[bank].begin(); it != ext; it++) {
gp *write = *it;
if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
if ((DramExtension::getExtension(read).getColumn()
== DramExtension::getExtension(write).getColumn())
&& (DramExtension::getExtension(read).getRow()
== DramExtension::getExtension(write).getRow())) {
printDebugMessage("Hazard Detected");
return true;
}
}
}
return false;
}
// //for(unsigned long i=0; i < id; i++)
// for (auto it = buffer[bank].begin(); it != ext; it++) {
// gp *write = *it;
// if (write->get_command() == tlm::TLM_WRITE_COMMAND) {
// if ((DramExtension::getExtension(read).getColumn()
// == DramExtension::getExtension(write).getColumn())
// && (DramExtension::getExtension(read).getRow()
// == DramExtension::getExtension(write).getRow())) {
// printDebugMessage("Hazard Detected");
// return true;
// }
// }
// }
// return false;
//}
// Estimate the number of writes/reads in all bank buffers:
unsigned int Grp::getNumberOfRequest(tlm::tlm_command cmd)
{
unsigned int numberOfRequests = 0;
for (unsigned int i = 0;
i < Configuration::getInstance().memSpec->NumberOfBanks;
i++) {
for (auto it = buffer[i].begin(); it != buffer[i].end(); it++) {
gp *trans = *it;
if (trans->get_command() == cmd) {
numberOfRequests++;
}
}
}
//// Estimate the number of writes/reads in all bank buffers:
//unsigned int Grp::getNumberOfRequest(tlm::tlm_command cmd)
//{
// unsigned int numberOfRequests = 0;
// for (unsigned int i = 0;
// i < Configuration::getInstance().memSpec->NumberOfBanks;
// i++) {
// for (auto it = buffer[i].begin(); it != buffer[i].end(); it++) {
// gp *trans = *it;
// if (trans->get_command() == cmd) {
// numberOfRequests++;
// }
// }
// }
return numberOfRequests;
}
// return numberOfRequests;
//}
void Grp::printDebugMessage(std::string message)
{
DebugManager::getInstance().printDebugMessage("FrFcfsGrp", message);
}
//void Grp::printDebugMessage(std::string message)
//{
// DebugManager::getInstance().printDebugMessage("FrFcfsGrp", message);
//}

View File

@@ -35,62 +35,62 @@
* Matthias Jung
*/
#include "IScheduler.h"
#include "../../common/DebugManager.h"
#include "../core/configuration/Configuration.h"
//#include "IScheduler.h"
//#include "../../common/DebugManager.h"
//#include "../core/configuration/Configuration.h"
std::string IScheduler::sendername = "scheduler";
//std::string IScheduler::sendername = "scheduler";
void IScheduler::printDebugMessage(std::string message)
{
DebugManager::getInstance().printDebugMessage(IScheduler::sendername, message);
}
//void IScheduler::printDebugMessage(std::string message)
//{
// DebugManager::getInstance().printDebugMessage(IScheduler::sendername, message);
//}
// Get the next command that is necessary to process the request representend by the payload
Command IScheduler::getNextCommand(gp &payload)
{
Bank bank = DramExtension::getBank(payload);
if (!controllerCore.getRowBufferStates().rowBufferIsOpen(bank))
{
return Command::ACT;
}
else if (controllerCore.getRowBufferStates().rowBufferIsOpen(bank) &&
controllerCore.getRowBufferStates().getRowInRowBuffer(bank) !=
DramExtension::getRow(payload))
{
return Command::PRE;
}
else
{
return getReadWriteCommand(payload);
}
}
//// Get the next command that is necessary to process the request representend by the payload
//Command IScheduler::getNextCommand(gp &payload)
//{
// Bank bank = DramExtension::getBank(payload);
// if (!controllerCore.getRowBufferStates().rowBufferIsOpen(bank))
// {
// return Command::ACT;
// }
// else if (controllerCore.getRowBufferStates().rowBufferIsOpen(bank) &&
// controllerCore.getRowBufferStates().getRowInRowBuffer(bank) !=
// DramExtension::getRow(payload))
// {
// return Command::PRE;
// }
// else
// {
// return getReadWriteCommand(payload);
// }
//}
Command IScheduler::getNextCommand(gp *payload)
{
return getNextCommand(*payload);
}
//Command IScheduler::getNextCommand(gp *payload)
//{
// return getNextCommand(*payload);
//}
Command IScheduler::getReadWriteCommand(gp &payload)
{
if (payload.get_command() == tlm::TLM_READ_COMMAND)
{
if (Configuration::getInstance().OpenPagePolicy)
return Command::RD;
else
return Command::RDA;
}
else
{
if (Configuration::getInstance().OpenPagePolicy)
return Command::WR;
else
return Command::WRA;
}
}
//Command IScheduler::getReadWriteCommand(gp &payload)
//{
// if (payload.get_command() == tlm::TLM_READ_COMMAND)
// {
// if (Configuration::getInstance().OpenPagePolicy)
// return Command::RD;
// else
// return Command::RDA;
// }
// else
// {
// if (Configuration::getInstance().OpenPagePolicy)
// return Command::WR;
// else
// return Command::WRA;
// }
//}
Command IScheduler::getReadWriteCommand(gp *payload)
{
return getReadWriteCommand(*payload);
}
//Command IScheduler::getReadWriteCommand(gp *payload)
//{
// return getReadWriteCommand(*payload);
//}