Start adaption of fifo and reorder arbiter, not finished!

This commit is contained in:
Lukas Steiner
2021-01-14 14:27:53 +01:00
parent 87906da06b
commit a6684d95a4

View File

@@ -257,27 +257,27 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
nextThreadPayloadIDToAppend[threadId]++, nextChannelPayloadIDToAppend[channelId]++);
activeTransactions[threadId]++;
if (activeTransactions[threadId] < maxActiveTransactions)
if (activeTransactions[threadId] <= maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
pendingRequests[channelId].push(&cbPayload);
}
else
{
outstandingEndReq[threadId] = &cbPayload;
}
pendingRequests[channelId].push(&cbPayload); // TODO: wrong, only insert when END_REQ has been sent
if (!channelIsBusy[channelId])
if (!channelIsBusy[channelId]) // TODO: only required if pendingRequests was empty?
{
channelIsBusy[channelId] = true;
tlm_generic_payload *tPayload = pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK; // TODO: SC_ZERO_TIME
// TODO: what if END_REQ was in the same cycle?
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(*tPayload, tPhase, tDelay);
}
@@ -315,26 +315,28 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
tlm_generic_payload *tPayload = pendingResponses[threadId].front();
pendingResponses[threadId].pop();
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
// TODO: what if one BEGIN_RESP has already been sent in this cycle?
sc_time tDelay = SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
}
else if (cbPhase == END_RESP) // from initiator
{
cbPayload.release();
activeTransactions[threadId]--;
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tlm_generic_payload *tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
unsigned int tChannelId = DramExtension::getExtension(tPayload).getChannel().ID();
pendingRequests[tChannelId].push(tPayload);
}
if (!pendingResponses[threadId].empty())
{
@@ -345,16 +347,6 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
else
{
@@ -377,27 +369,27 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
nextThreadPayloadIDToAppend[threadId]++, nextChannelPayloadIDToAppend[channelId]++);
activeTransactions[threadId]++;
if (activeTransactions[threadId] < maxActiveTransactions)
if (activeTransactions[threadId] <= maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
pendingRequests[channelId].push(&cbPayload);
}
else
{
outstandingEndReq[threadId] = &cbPayload;
}
pendingRequests[channelId].push(&cbPayload);
if (!channelIsBusy[channelId])
if (!channelIsBusy[channelId]) // TODO: only required if pendingRequests was empty?
{
channelIsBusy[channelId] = true;
tlm_generic_payload *tPayload = pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = tCK;
// TODO: what if END_REQ was in the same cycle?
sc_time tDelay = SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(*tPayload, tPhase, tDelay);
}
@@ -439,26 +431,29 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
threadIsBusy[threadId] = true;
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
// TODO: what if one BEGIN_RESP has already been sent in this cycle?
sc_time tDelay = SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
}
}
else if (cbPhase == END_RESP) // from initiator
{
cbPayload.release();
activeTransactions[threadId]--;
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tlm_generic_payload *tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
unsigned int tChannelId = DramExtension::getExtension(tPayload).getChannel().ID();
pendingRequests[tChannelId].push(tPayload);
}
tlm_generic_payload *tPayload = *pendingResponses[threadId].begin();
@@ -467,22 +462,11 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
{
nextThreadPayloadIDToReturn[threadId]++;
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(*tPayload, tPhase, tDelay);
if (activeTransactions[threadId] == maxActiveTransactions)
{
tlm_phase tPhase = END_REQ;
sc_time tDelay = SC_ZERO_TIME;
tPayload = outstandingEndReq[threadId];
tSocket[static_cast<int>(threadId)]->nb_transport_bw(*tPayload, tPhase, tDelay);
}
activeTransactions[threadId]--;
}
else
{