Bugfix in fifo and reorder arbiter.

This commit is contained in:
Lukas Steiner
2021-01-27 13:12:36 +01:00
parent a6ce8f63cb
commit d86dc97a28

View File

@@ -184,6 +184,7 @@ void ArbiterSimple::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = arbitrationDelayFw;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbPayload, tPhase, tDelay);
}
else
@@ -194,6 +195,7 @@ void ArbiterSimple::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
}
@@ -204,6 +206,7 @@ void ArbiterSimple::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
tlm_phase tPhase = BEGIN_REQ;
// do not send two requests in the same cycle
sc_time tDelay = tCK + arbitrationDelayFw;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
else
@@ -215,6 +218,7 @@ void ArbiterSimple::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
{
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = arbitrationDelayBw;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(cbPayload, tPhase, tDelay);
@@ -228,6 +232,7 @@ void ArbiterSimple::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
{
tlm_phase tPhase = END_RESP;
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbPayload, tPhase, tDelay);
}
cbPayload.release();
@@ -239,6 +244,7 @@ void ArbiterSimple::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
tlm_phase tPhase = BEGIN_RESP;
// do not send two responses in the same cycle
sc_time tDelay = tCK + arbitrationDelayBw;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
@@ -266,24 +272,24 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
pendingRequests[channelId].push(&cbPayload);
}
else
{
outstandingEndReq[threadId] = &cbPayload;
}
if (!channelIsBusy[channelId]) // TODO: only required if pendingRequests was empty?
if (!channelIsBusy[channelId] && !pendingRequests[channelId].empty())
{
channelIsBusy[channelId] = true;
tlm_generic_payload *tPayload = pendingRequests[channelId].front();
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = lastEndReq[channelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(*tPayload, tPhase, tDelay);
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
}
else if (cbPhase == END_REQ) // from memory controller
@@ -296,12 +302,11 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
else
{
channelIsBusy[channelId] = false;
}
}
else if (cbPhase == BEGIN_RESP) // from memory controller
{
@@ -309,6 +314,7 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
{
tlm_phase tPhase = END_RESP;
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbPayload, tPhase, tDelay);
}
@@ -318,14 +324,14 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
{
threadIsBusy[threadId] = true;
tlm_generic_payload *tPayload = pendingResponses[threadId].front();
tlm_generic_payload &tPayload = *pendingResponses[threadId].front();
pendingResponses[threadId].pop();
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = lastEndResp[threadId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
}
else if (cbPhase == END_RESP) // from initiator
@@ -338,26 +344,27 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tlm_generic_payload *tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
tlm_generic_payload &tPayload = *outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
unsigned int tChannelId = DramExtension::getExtension(tPayload).getChannel().ID();
pendingRequests[tChannelId].push(tPayload);
pendingRequests[tChannelId].push(&tPayload);
}
if (!pendingResponses[threadId].empty())
{
tlm_generic_payload *tPayload = pendingResponses[threadId].front();
tlm_generic_payload &tPayload = *pendingResponses[threadId].front();
pendingResponses[threadId].pop();
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
else
{
threadIsBusy[threadId] = false;
}
}
else
SC_REPORT_FATAL(0, "Payload event queue in arbiter was triggered with unknown phase");
@@ -379,24 +386,24 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
pendingRequests[channelId].push(&cbPayload);
}
else
{
outstandingEndReq[threadId] = &cbPayload;
}
if (!channelIsBusy[channelId]) // TODO: only required if pendingRequests was empty?
if (!channelIsBusy[channelId] && !pendingRequests[channelId].empty())
{
channelIsBusy[channelId] = true;
tlm_generic_payload *tPayload = pendingRequests[channelId].front();
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = lastEndReq[channelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(*tPayload, tPhase, tDelay);
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
}
else if (cbPhase == END_REQ) // from memory controller
@@ -409,12 +416,11 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
else
{
channelIsBusy[channelId] = false;
}
}
else if (cbPhase == BEGIN_RESP) // from memory controller
{
@@ -429,7 +435,7 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
if (!threadIsBusy[threadId])
{
tlm_generic_payload *tPayload = *pendingResponses[threadId].begin();
tlm_generic_payload &tPayload = **pendingResponses[threadId].begin();
if (DramExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
{
@@ -440,9 +446,9 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = lastEndResp[threadId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
}
}
@@ -456,24 +462,28 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tlm_generic_payload *tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
tlm_generic_payload &tPayload = *outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
unsigned int tChannelId = DramExtension::getExtension(tPayload).getChannel().ID();
pendingRequests[tChannelId].push(tPayload);
pendingRequests[tChannelId].push(&tPayload);
}
tlm_generic_payload *tPayload = *pendingResponses[threadId].begin();
tlm_generic_payload &tPayload = **pendingResponses[threadId].begin();
if (!pendingResponses[threadId].empty() &&
DramExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
{
nextThreadPayloadIDToReturn[threadId]++;
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
else
{