MEM: Separate requests and responses for timing accesses
This patch moves send/recvTiming and send/recvTimingSnoop from the Port base class to the MasterPort and SlavePort, and also splits them into separate member functions for requests and responses: send/recvTimingReq, send/recvTimingResp, and send/recvTimingSnoopReq, send/recvTimingSnoopResp. A master port sends requests and receives responses, and also receives snoop requests and sends snoop responses. A slave port has the reciprocal behaviour as it receives requests and sends responses, and sends snoop requests and receives snoop responses. For all MemObjects that have only master ports or slave ports (but not both), e.g. a CPU, or a PIO device, this patch merely adds more clarity to what kind of access is taking place. For example, a CPU port used to call sendTiming, and will now call sendTimingReq. Similarly, a response previously came back through recvTiming, which is now recvTimingResp. For the modules that have both master and slave ports, e.g. the bus, the behaviour was previously relying on branches based on pkt->isRequest(), and this is now replaced with a direct call to the apprioriate member function depending on the type of access. Please note that send/recvRetry is still shared by all the timing accessors and remains in the Port base class for now (to maintain the current bus functionality and avoid changing the statistics of all regressions). The packet queue is split into a MasterPort and SlavePort version to facilitate the use of the new timing accessors. All uses of the PacketQueue are updated accordingly. With this patch, the type of packet (request or response) is now well defined for each type of access, and asserts on pkt->isRequest() and pkt->isResponse() are now moved to the appropriate send member functions. It is also worth noting that sendTimingSnoopReq no longer returns a boolean, as the semantics do not alow snoop requests to be rejected or stalled. All these assumptions are now excplicitly part of the port interface itself.
This commit is contained in:
4
src/mem/cache/base.hh
vendored
4
src/mem/cache/base.hh
vendored
@@ -148,7 +148,7 @@ class BaseCache : public MemObject
|
||||
protected:
|
||||
|
||||
CacheMasterPort(const std::string &_name, BaseCache *_cache,
|
||||
PacketQueue &_queue) :
|
||||
MasterPacketQueue &_queue) :
|
||||
QueuedMasterPort(_name, _cache, _queue)
|
||||
{ }
|
||||
|
||||
@@ -196,7 +196,7 @@ class BaseCache : public MemObject
|
||||
const std::string &_label);
|
||||
|
||||
/** A normal packet queue used to store responses. */
|
||||
PacketQueue queue;
|
||||
SlavePacketQueue queue;
|
||||
|
||||
bool blocked;
|
||||
|
||||
|
||||
14
src/mem/cache/cache.hh
vendored
14
src/mem/cache/cache.hh
vendored
@@ -90,9 +90,9 @@ class Cache : public BaseCache
|
||||
|
||||
protected:
|
||||
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt);
|
||||
virtual bool recvTimingSnoopResp(PacketPtr pkt);
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingReq(PacketPtr pkt);
|
||||
|
||||
virtual Tick recvAtomic(PacketPtr pkt);
|
||||
|
||||
@@ -116,7 +116,7 @@ class Cache : public BaseCache
|
||||
* current MSHR status. This queue has a pointer to our specific
|
||||
* cache implementation and is used by the MemSidePort.
|
||||
*/
|
||||
class MemSidePacketQueue : public PacketQueue
|
||||
class MemSidePacketQueue : public MasterPacketQueue
|
||||
{
|
||||
|
||||
protected:
|
||||
@@ -125,9 +125,9 @@ class Cache : public BaseCache
|
||||
|
||||
public:
|
||||
|
||||
MemSidePacketQueue(Cache<TagStore> &cache, Port &port,
|
||||
MemSidePacketQueue(Cache<TagStore> &cache, MasterPort &port,
|
||||
const std::string &label) :
|
||||
PacketQueue(cache, port, label), cache(cache) { }
|
||||
MasterPacketQueue(cache, port, label), cache(cache) { }
|
||||
|
||||
/**
|
||||
* Override the normal sendDeferredPacket and do not only
|
||||
@@ -154,9 +154,9 @@ class Cache : public BaseCache
|
||||
|
||||
protected:
|
||||
|
||||
virtual bool recvTimingSnoop(PacketPtr pkt);
|
||||
virtual void recvTimingSnoopReq(PacketPtr pkt);
|
||||
|
||||
virtual bool recvTiming(PacketPtr pkt);
|
||||
virtual bool recvTimingResp(PacketPtr pkt);
|
||||
|
||||
virtual Tick recvAtomicSnoop(PacketPtr pkt);
|
||||
|
||||
|
||||
28
src/mem/cache/cache_impl.hh
vendored
28
src/mem/cache/cache_impl.hh
vendored
@@ -417,7 +417,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
|
||||
Packet *snoopPkt = new Packet(pkt, true); // clear flags
|
||||
snoopPkt->setExpressSnoop();
|
||||
snoopPkt->assertMemInhibit();
|
||||
memSidePort->sendTiming(snoopPkt);
|
||||
memSidePort->sendTimingReq(snoopPkt);
|
||||
// main memory will delete snoopPkt
|
||||
}
|
||||
// since we're the official target but we aren't responding,
|
||||
@@ -1181,7 +1181,7 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
|
||||
Packet snoopPkt(pkt, true); // clear flags
|
||||
snoopPkt.setExpressSnoop();
|
||||
snoopPkt.senderState = new ForwardResponseRecord(pkt, this);
|
||||
cpuSidePort->sendTimingSnoop(&snoopPkt);
|
||||
cpuSidePort->sendTimingSnoopReq(&snoopPkt);
|
||||
if (snoopPkt.memInhibitAsserted()) {
|
||||
// cache-to-cache response from some upper cache
|
||||
assert(!alreadyResponded);
|
||||
@@ -1336,11 +1336,9 @@ Cache<TagStore>::snoopTiming(PacketPtr pkt)
|
||||
|
||||
template<class TagStore>
|
||||
bool
|
||||
Cache<TagStore>::CpuSidePort::recvTimingSnoop(PacketPtr pkt)
|
||||
Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
|
||||
{
|
||||
// Express snoop responses from master to slave, e.g., from L1 to L2
|
||||
assert(pkt->isResponse());
|
||||
|
||||
cache->timingAccess(pkt);
|
||||
return true;
|
||||
}
|
||||
@@ -1492,7 +1490,7 @@ Cache<TagStore>::getTimingPacket()
|
||||
PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
|
||||
snoop_pkt->setExpressSnoop();
|
||||
snoop_pkt->senderState = mshr;
|
||||
cpuSidePort->sendTimingSnoop(snoop_pkt);
|
||||
cpuSidePort->sendTimingSnoopReq(snoop_pkt);
|
||||
|
||||
if (snoop_pkt->memInhibitAsserted()) {
|
||||
markInService(mshr, snoop_pkt);
|
||||
@@ -1557,9 +1555,8 @@ Cache<TagStore>::CpuSidePort::getAddrRanges()
|
||||
|
||||
template<class TagStore>
|
||||
bool
|
||||
Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
|
||||
Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// always let inhibited requests through even if blocked
|
||||
if (!pkt->memInhibitAsserted() && blocked) {
|
||||
DPRINTF(Cache,"Scheduling a retry while blocked\n");
|
||||
@@ -1575,7 +1572,6 @@ template<class TagStore>
|
||||
Tick
|
||||
Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// atomic request
|
||||
return cache->atomicAccess(pkt);
|
||||
}
|
||||
@@ -1584,7 +1580,6 @@ template<class TagStore>
|
||||
void
|
||||
Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// functional request
|
||||
cache->functionalAccess(pkt, true);
|
||||
}
|
||||
@@ -1605,7 +1600,7 @@ CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
|
||||
|
||||
template<class TagStore>
|
||||
bool
|
||||
Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
|
||||
Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
|
||||
{
|
||||
// this needs to be fixed so that the cache updates the mshr and sends the
|
||||
// packet back out on the link, but it probably won't happen so until this
|
||||
@@ -1613,27 +1608,23 @@ Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
|
||||
if (pkt->wasNacked())
|
||||
panic("Need to implement cache resending nacked packets!\n");
|
||||
|
||||
assert(pkt->isResponse());
|
||||
cache->handleResponse(pkt);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Express snooping requests to memside port
|
||||
template<class TagStore>
|
||||
bool
|
||||
Cache<TagStore>::MemSidePort::recvTimingSnoop(PacketPtr pkt)
|
||||
void
|
||||
Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
|
||||
{
|
||||
// handle snooping requests
|
||||
assert(pkt->isRequest());
|
||||
cache->snoopTiming(pkt);
|
||||
return true;
|
||||
}
|
||||
|
||||
template<class TagStore>
|
||||
Tick
|
||||
Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// atomic snoop
|
||||
return cache->snoopAtomic(pkt);
|
||||
}
|
||||
@@ -1642,7 +1633,6 @@ template<class TagStore>
|
||||
void
|
||||
Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
|
||||
{
|
||||
assert(pkt->isRequest());
|
||||
// functional snoop (note that in contrast to atomic we don't have
|
||||
// a specific functionalSnoop method, as they have the same
|
||||
// behaviour regardless)
|
||||
@@ -1668,7 +1658,7 @@ Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
|
||||
} else {
|
||||
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
|
||||
|
||||
waitingOnRetry = !port.sendTiming(pkt);
|
||||
waitingOnRetry = !masterPort.sendTimingReq(pkt);
|
||||
|
||||
if (waitingOnRetry) {
|
||||
DPRINTF(CachePort, "now waiting on a retry\n");
|
||||
|
||||
Reference in New Issue
Block a user