mem: Fix a bug in the cache port flow control
This patch fixes a bug in the cache port where the retry flag was reset too early, allowing new requests to arrive before the retry was actually sent, but with the event already scheduled. This caused a deadlock in the interactions with the O3 LSQ. The patche fixes the underlying issue by shifting the resetting of the flag to be done by the event that also calls sendRetry(). The patch also tidies up the flow control in recvTimingReq and ensures that we also check if we already have a retry outstanding.
This commit is contained in:
11
src/mem/cache/base.cc
vendored
11
src/mem/cache/base.cc
vendored
@@ -106,13 +106,20 @@ BaseCache::CacheSlavePort::clearBlocked()
|
||||
DPRINTF(CachePort, "Cache port %s accepting new requests\n", name());
|
||||
blocked = false;
|
||||
if (mustSendRetry) {
|
||||
DPRINTF(CachePort, "Cache port %s sending retry\n", name());
|
||||
mustSendRetry = false;
|
||||
// @TODO: need to find a better time (next bus cycle?)
|
||||
owner.schedule(sendRetryEvent, curTick() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
BaseCache::CacheSlavePort::processSendRetry()
|
||||
{
|
||||
DPRINTF(CachePort, "Cache port %s sending retry\n", name());
|
||||
|
||||
// reset the flag and call retry
|
||||
mustSendRetry = false;
|
||||
sendRetry();
|
||||
}
|
||||
|
||||
void
|
||||
BaseCache::init()
|
||||
|
||||
5
src/mem/cache/base.hh
vendored
5
src/mem/cache/base.hh
vendored
@@ -182,7 +182,10 @@ class BaseCache : public MemObject
|
||||
|
||||
private:
|
||||
|
||||
EventWrapper<SlavePort, &SlavePort::sendRetry> sendRetryEvent;
|
||||
void processSendRetry();
|
||||
|
||||
EventWrapper<CacheSlavePort,
|
||||
&CacheSlavePort::processSendRetry> sendRetryEvent;
|
||||
|
||||
};
|
||||
|
||||
|
||||
27
src/mem/cache/cache_impl.hh
vendored
27
src/mem/cache/cache_impl.hh
vendored
@@ -1937,16 +1937,27 @@ template<class TagStore>
|
||||
bool
|
||||
Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
// always let inhibited requests through even if blocked
|
||||
if (!pkt->memInhibitAsserted() && blocked) {
|
||||
assert(!cache->system->bypassCaches());
|
||||
DPRINTF(Cache,"Scheduling a retry while blocked\n");
|
||||
mustSendRetry = true;
|
||||
return false;
|
||||
assert(!cache->system->bypassCaches());
|
||||
|
||||
bool success = false;
|
||||
|
||||
// always let inhibited requests through, even if blocked
|
||||
if (pkt->memInhibitAsserted()) {
|
||||
// this should always succeed
|
||||
success = cache->recvTimingReq(pkt);
|
||||
assert(success);
|
||||
} else if (blocked || mustSendRetry) {
|
||||
// either already committed to send a retry, or blocked
|
||||
success = false;
|
||||
} else {
|
||||
// for now this should always succeed
|
||||
success = cache->recvTimingReq(pkt);
|
||||
assert(success);
|
||||
}
|
||||
|
||||
cache->recvTimingReq(pkt);
|
||||
return true;
|
||||
// remember if we have to retry
|
||||
mustSendRetry = !success;
|
||||
return success;
|
||||
}
|
||||
|
||||
template<class TagStore>
|
||||
|
||||
Reference in New Issue
Block a user