Merge branch 'develop' into 'work/traceanalyzer_dynamicarrangement'

# Conflicts:
#   DRAMSys/traceAnalyzer/businessObjects/phases/phase.cpp
This commit is contained in:
Lukas Steiner
2021-08-13 07:23:35 +00:00
5 changed files with 157 additions and 89 deletions

View File

@@ -224,6 +224,7 @@ void ArbiterSimple::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
sc_time tDelay = arbitrationDelayBw;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
// Early completion from initiator
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(cbPayload, tPhase, tDelay);
threadIsBusy[threadId] = true;
@@ -250,6 +251,7 @@ void ArbiterSimple::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
sc_time tDelay = tCK + arbitrationDelayBw;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
// Early completion from initiator
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
@@ -280,22 +282,10 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
pendingRequests[channelId].push(&cbPayload);
payloadEventQueue.notify(cbPayload, REQ_ARBITRATION, arbitrationDelayFw);
}
else
outstandingEndReq[threadId] = &cbPayload;
if (!channelIsBusy[channelId] && !pendingRequests[channelId].empty())
{
channelIsBusy[channelId] = true;
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = lastEndReq[channelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
}
else if (cbPhase == END_REQ) // from memory controller
{
@@ -323,21 +313,7 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbPayload, tPhase, tDelay);
}
pendingResponses[threadId].push(&cbPayload);
if (!threadIsBusy[threadId])
{
threadIsBusy[threadId] = true;
tlm_generic_payload &tPayload = *pendingResponses[threadId].front();
pendingResponses[threadId].pop();
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = lastEndResp[threadId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
payloadEventQueue.notify(cbPayload, RESP_ARBITRATION, arbitrationDelayBw);
}
else if (cbPhase == END_RESP) // from initiator
{
@@ -358,17 +334,7 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (!channelIsBusy[tChannelId])
{
channelIsBusy[tChannelId] = true;
tPhase = BEGIN_REQ;
tDelay = lastEndReq[tChannelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
iSocket[static_cast<int>(tChannelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
else
pendingRequests[tChannelId].push(&tPayload);
payloadEventQueue.notify(tPayload, REQ_ARBITRATION, arbitrationDelayFw);
}
else
activeTransactions[threadId]--;
@@ -381,12 +347,48 @@ void ArbiterFifo::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase &c
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
// Early completion from initiator
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
else
threadIsBusy[threadId] = false;
}
else if (cbPhase == REQ_ARBITRATION)
{
pendingRequests[channelId].push(&cbPayload);
if (!channelIsBusy[channelId])
{
channelIsBusy[channelId] = true;
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = lastEndReq[channelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
}
else if (cbPhase == RESP_ARBITRATION)
{
pendingResponses[threadId].push(&cbPayload);
if (!threadIsBusy[threadId])
{
threadIsBusy[threadId] = true;
tlm_generic_payload &tPayload = *pendingResponses[threadId].front();
pendingResponses[threadId].pop();
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = lastEndResp[threadId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
// Early completion from initiator
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
}
else
SC_REPORT_FATAL(0, "Payload event queue in arbiter was triggered with unknown phase");
}
@@ -411,22 +413,10 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
tSocket[static_cast<int>(threadId)]->nb_transport_bw(cbPayload, tPhase, tDelay);
pendingRequests[channelId].push(&cbPayload);
payloadEventQueue.notify(cbPayload, REQ_ARBITRATION, arbitrationDelayFw);
}
else
outstandingEndReq[threadId] = &cbPayload;
if (!channelIsBusy[channelId] && !pendingRequests[channelId].empty())
{
channelIsBusy[channelId] = true;
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = lastEndReq[channelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
}
else if (cbPhase == END_REQ) // from memory controller
{
@@ -453,26 +443,7 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
iSocket[static_cast<int>(channelId)]->nb_transport_fw(cbPayload, tPhase, tDelay);
}
pendingResponses[threadId].insert(&cbPayload);
if (!threadIsBusy[threadId])
{
tlm_generic_payload &tPayload = **pendingResponses[threadId].begin();
if (DramExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
{
nextThreadPayloadIDToReturn[threadId]++;
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
threadIsBusy[threadId] = true;
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = lastEndResp[threadId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
}
payloadEventQueue.notify(cbPayload, RESP_ARBITRATION, arbitrationDelayBw);
}
else if (cbPhase == END_RESP) // from initiator
{
@@ -493,17 +464,7 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
if (!channelIsBusy[tChannelId])
{
channelIsBusy[tChannelId] = true;
tPhase = BEGIN_REQ;
tDelay = lastEndReq[tChannelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
iSocket[static_cast<int>(tChannelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
else
pendingRequests[tChannelId].push(&tPayload);
payloadEventQueue.notify(tPayload, REQ_ARBITRATION, arbitrationDelayFw);
}
else
activeTransactions[threadId]--;
@@ -520,12 +481,51 @@ void ArbiterReorder::peqCallback(tlm_generic_payload &cbPayload, const tlm_phase
sc_time tDelay = tCK;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
// Early completion from initiator
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
else
{
threadIsBusy[threadId] = false;
}
else if (cbPhase == REQ_ARBITRATION)
{
pendingRequests[channelId].push(&cbPayload);
if (!channelIsBusy[channelId])
{
channelIsBusy[channelId] = true;
tlm_generic_payload &tPayload = *pendingRequests[channelId].front();
pendingRequests[channelId].pop();
tlm_phase tPhase = BEGIN_REQ;
sc_time tDelay = lastEndReq[channelId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
iSocket[static_cast<int>(channelId)]->nb_transport_fw(tPayload, tPhase, tDelay);
}
}
else if (cbPhase == RESP_ARBITRATION)
{
pendingResponses[threadId].insert(&cbPayload);
if (!threadIsBusy[threadId])
{
tlm_generic_payload &tPayload = **pendingResponses[threadId].begin();
if (DramExtension::getThreadPayloadID(tPayload) == nextThreadPayloadIDToReturn[threadId])
{
threadIsBusy[threadId] = true;
nextThreadPayloadIDToReturn[threadId]++;
pendingResponses[threadId].erase(pendingResponses[threadId].begin());
tlm_phase tPhase = BEGIN_RESP;
sc_time tDelay = lastEndResp[threadId] == sc_time_stamp() ? tCK : SC_ZERO_TIME;
tlm_sync_enum returnValue = tSocket[static_cast<int>(threadId)]->nb_transport_bw(tPayload, tPhase, tDelay);
// Early completion from initiator
if (returnValue == TLM_UPDATED)
payloadEventQueue.notify(tPayload, tPhase, tDelay);
}
}
}
else

View File

@@ -51,6 +51,9 @@
#include "AddressDecoder.h"
#include "../common/dramExtensions.h"
DECLARE_EXTENDED_PHASE(REQ_ARBITRATION);
DECLARE_EXTENDED_PHASE(RESP_ARBITRATION);
class Arbiter : public sc_module
{
public:

View File

@@ -124,11 +124,11 @@ QColor Phase::getColor(const TraceDrawingProperties &drawingProperties) const
break;
case ColorGrouping::Thread:
return ColorGenerator::getColor(static_cast<unsigned int>
(transaction->thread));
(transaction.lock()->thread));
break;
case ColorGrouping::Transaction:
default:
return ColorGenerator::getColor(transaction->id);
return ColorGenerator::getColor(transaction.lock()->id);
}
}

View File

@@ -80,7 +80,7 @@ protected:
ID id;
Timespan span;
traceTime clk;
std::shared_ptr<Transaction> transaction;
std::weak_ptr<Transaction> transaction;
std::vector<Timespan> spansOnCommandBus;
std::shared_ptr<Timespan> spanOnDataBus;
double hexagonHeight;

View File

@@ -909,6 +909,71 @@ def calculateMetrics(pathToTrace, selectedMetrics=[]):
connection.close()
return calculatedMetrics
def calculateMetricsFromFuncs(pathToTrace, selectedMetrics):
calculatedMetrics = []
connection = sqlite3.connect(pathToTrace)
mcconfig = MCConfig(connection)
print("================================")
print("Calculating metrics for {0}".format(pathToTrace))
print("Number of threads is {0}".format(len(getThreads(connection))))
if not selectedMetrics:
selectedMetrics = [0] * (len(metrics) + len(getThreads(connection))*len(threadMetrics) + 1)
for i in range(len(selectedMetrics)):
selectedMetrics[i] = True
for metric in selectedMetrics:
mres = metric(connection)
mname = metric.__name__.replace("_", " ")
res = (mname, mres)
if (metric.__name__ == "bank_overlap_ratio"):
values = mres.split(",")
nbanks = 0
for v in values:
name = mname + " (" + str(nbanks) + " banks active)"
nbanks = nbanks + 1
r = (name, float(v))
calculatedMetrics.append(r)
else:
calculatedMetrics.append(res)
print("{0}: {1}".format(res[0], res[1]))
# refreshMissDecision(connection, calculatedMetrics)
connection.close()
return calculatedMetrics
import argparse
if __name__ == "__main__":
path = sys.argv[1]
calculateMetrics(path)
"""
Only non-threaded metrics are implemented for selection through command line
"""
parser = argparse.ArgumentParser(description="Calculates metrics of a given .tdb file")
parser.add_argument('path', type=str, help="The path to the .tdb file to be used")
dic_metric_functions = {}
for m in metrics:
parser.add_argument("--"+m.__name__, action='store_true')
dic_metric_functions[m.__name__] = m
arg_namespace = parser.parse_args(sys.argv[1:])
selected_metrics = []
for k, v in arg_namespace.__dict__.items():
if k == 'path':
continue
if v:
selected_metrics.append(dic_metric_functions[k])
if selected_metrics == []:
calculateMetrics(arg_namespace.path)
else:
calculateMetricsFromFuncs(arg_namespace.path, selected_metrics)