MEM: Split SimpleTimingPort into PacketQueue and ports

This patch decouples the queueing and the port interactions to
simplify the introduction of the master and slave ports. By separating
the queueing functionality from the port itself, it becomes much
easier to distinguish between master and slave ports, and still retain
the queueing ability for both (without code duplication).

As part of the split into a PacketQueue and a port, there is now also
a hierarchy of two port classes, QueuedPort and SimpleTimingPort. The
QueuedPort is useful for ports that want to leave the packet
transmission of outgoing packets to the queue and is used by both
master and slave ports. The SimpleTimingPort inherits from the
QueuedPort and adds the implemention of recvTiming and recvFunctional
through recvAtomic.

The PioPort and MessagePort are cleaned up as part of the changes.

--HG--
rename : src/mem/tport.cc => src/mem/packet_queue.cc
rename : src/mem/tport.hh => src/mem/packet_queue.hh
This commit is contained in:
Andreas Hansson
2012-03-22 06:36:27 -04:00
parent fb395b56dd
commit c2d2ea99e3
17 changed files with 650 additions and 366 deletions

11
src/mem/cache/base.cc vendored
View File

@@ -54,18 +54,11 @@
using namespace std;
BaseCache::CacheMasterPort::CacheMasterPort(const std::string &_name,
BaseCache *_cache,
const std::string &_label)
: SimpleTimingPort(_name, _cache, _label)
{
}
BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
BaseCache *_cache,
const std::string &_label)
: SimpleTimingPort(_name, _cache, _label), blocked(false),
mustSendRetry(false), sendRetryEvent(this)
: QueuedPort(_name, _cache, queue), queue(*_cache, *this, _label),
blocked(false), mustSendRetry(false), sendRetryEvent(this)
{
}

35
src/mem/cache/base.hh vendored
View File

@@ -64,8 +64,8 @@
#include "mem/cache/mshr_queue.hh"
#include "mem/mem_object.hh"
#include "mem/packet.hh"
#include "mem/qport.hh"
#include "mem/request.hh"
#include "mem/tport.hh"
#include "params/BaseCache.hh"
#include "sim/eventq.hh"
#include "sim/full_system.hh"
@@ -118,7 +118,7 @@ class BaseCache : public MemObject
* and the sendDeferredPacket of the timing port is modified to
* consider both the transmit list and the requests from the MSHR.
*/
class CacheMasterPort : public SimpleTimingPort
class CacheMasterPort : public QueuedPort
{
public:
@@ -131,22 +131,31 @@ class BaseCache : public MemObject
void requestBus(RequestCause cause, Tick time)
{
DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
schedSendEvent(time);
queue.schedSendEvent(time);
}
/**
* Schedule the transmissions of a response packet at a given
* point in time.
*
* @param pkt response packet
* @param when time to send the response
*/
void respond(PacketPtr pkt, Tick time) {
schedSendTiming(pkt, time);
queue.schedSendTiming(pkt, time);
}
protected:
CacheMasterPort(const std::string &_name, BaseCache *_cache,
const std::string &_label);
PacketQueue &_queue) :
QueuedPort(_name, _cache, _queue)
{ }
/**
* Memory-side port always snoops.
*
* return always true
* @return always true
*/
virtual bool isSnooping() { return true; }
};
@@ -159,7 +168,7 @@ class BaseCache : public MemObject
* incoming requests. If blocked, the port will issue a retry once
* unblocked.
*/
class CacheSlavePort : public SimpleTimingPort
class CacheSlavePort : public QueuedPort
{
public:
@@ -170,8 +179,15 @@ class BaseCache : public MemObject
/** Return to normal operation and accept new requests. */
void clearBlocked();
/**
* Schedule the transmissions of a response packet at a given
* point in time.
*
* @param pkt response packet
* @param when time to send the response
*/
void respond(PacketPtr pkt, Tick time) {
schedSendTiming(pkt, time);
queue.schedSendTiming(pkt, time);
}
protected:
@@ -179,6 +195,9 @@ class BaseCache : public MemObject
CacheSlavePort(const std::string &_name, BaseCache *_cache,
const std::string &_label);
/** A normal packet queue used to store responses. */
PacketQueue queue;
bool blocked;
bool mustSendRetry;

View File

@@ -108,6 +108,34 @@ class Cache : public BaseCache
};
/**
* Override the default behaviour of sendDeferredPacket to enable
* the memory-side cache port to also send requests based on the
* current MSHR status. This queue has a pointer to our specific
* cache implementation and is used by the MemSidePort.
*/
class MemSidePacketQueue : public PacketQueue
{
protected:
Cache<TagStore> &cache;
public:
MemSidePacketQueue(Cache<TagStore> &cache, Port &port,
const std::string &label) :
PacketQueue(cache, port, label), cache(cache) { }
/**
* Override the normal sendDeferredPacket and do not only
* consider the transmit list (used for responses), but also
* requests.
*/
virtual void sendDeferredPacket();
};
/**
* The memory-side port extends the base cache master port with
* access functions for functional, atomic and timing snoops.
@@ -116,6 +144,9 @@ class Cache : public BaseCache
{
private:
/** The cache-specific queue. */
MemSidePacketQueue _queue;
// a pointer to our specific cache implementation
Cache<TagStore> *cache;
@@ -134,11 +165,6 @@ class Cache : public BaseCache
MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
const std::string &_label);
/**
* Overload sendDeferredPacket of SimpleTimingPort.
*/
virtual void sendDeferredPacket();
};
/** Tag and data Storage */

View File

@@ -1646,7 +1646,7 @@ Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
template<class TagStore>
void
Cache<TagStore>::MemSidePort::sendDeferredPacket()
Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
{
// if we have a response packet waiting we have to start with that
if (deferredPacketReady()) {
@@ -1654,7 +1654,7 @@ Cache<TagStore>::MemSidePort::sendDeferredPacket()
trySendTiming();
} else {
// check for request packets (requests & writebacks)
PacketPtr pkt = cache->getTimingPacket();
PacketPtr pkt = cache.getTimingPacket();
if (pkt == NULL) {
// can happen if e.g. we attempt a writeback and fail, but
// before the retry, the writeback is eliminated because
@@ -1663,7 +1663,7 @@ Cache<TagStore>::MemSidePort::sendDeferredPacket()
} else {
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
waitingOnRetry = !sendTiming(pkt);
waitingOnRetry = !port.sendTiming(pkt);
if (waitingOnRetry) {
DPRINTF(CachePort, "now waiting on a retry\n");
@@ -1679,7 +1679,7 @@ Cache<TagStore>::MemSidePort::sendDeferredPacket()
// care about this packet and might override it before
// it gets retried
} else {
cache->markInService(mshr, pkt);
cache.markInService(mshr, pkt);
}
}
}
@@ -1688,7 +1688,7 @@ Cache<TagStore>::MemSidePort::sendDeferredPacket()
// next send, not only looking at the response transmit list, but
// also considering when the next MSHR is ready
if (!waitingOnRetry) {
scheduleSend(cache->nextMSHRReadyTime());
scheduleSend(cache.nextMSHRReadyTime());
}
}
@@ -1696,6 +1696,7 @@ template<class TagStore>
Cache<TagStore>::
MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
const std::string &_label)
: BaseCache::CacheMasterPort(_name, _cache, _label), cache(_cache)
: BaseCache::CacheMasterPort(_name, _cache, _queue),
_queue(*_cache, *this, _label), cache(_cache)
{
}