sim: Add a system-global option to bypass caches
Virtualized CPUs and the fastmem mode of the atomic CPU require direct access to physical memory. We currently require caches to be disabled when using them to prevent chaos. This is not ideal when switching between hardware virutalized CPUs and other CPU models as it would require a configuration change on each switch. This changeset introduces a new version of the atomic memory mode, 'atomic_noncaching', where memory accesses are inserted into the memory system as atomic accesses, but bypass caches. To make memory mode tests cleaner, the following methods are added to the System class: * isAtomicMode() -- True if the memory mode is 'atomic' or 'direct'. * isTimingMode() -- True if the memory mode is 'timing'. * bypassCaches() -- True if caches should be bypassed. The old getMemoryMode() and setMemoryMode() methods should never be used from the C++ world anymore.
This commit is contained in:
@@ -40,7 +40,9 @@
|
||||
# Andreas Hansson
|
||||
|
||||
from MemObject import MemObject
|
||||
from System import System
|
||||
from m5.params import *
|
||||
from m5.proxy import *
|
||||
|
||||
class BaseBus(MemObject):
|
||||
type = 'BaseBus'
|
||||
@@ -72,3 +74,5 @@ class NoncoherentBus(BaseBus):
|
||||
class CoherentBus(BaseBus):
|
||||
type = 'CoherentBus'
|
||||
cxx_header = "mem/coherent_bus.hh"
|
||||
|
||||
system = Param.System(Parent.any, "System that the bus belongs to.")
|
||||
|
||||
29
src/mem/cache/cache_impl.hh
vendored
29
src/mem/cache/cache_impl.hh
vendored
@@ -390,6 +390,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
|
||||
// must be cache-to-cache response from upper to lower level
|
||||
ForwardResponseRecord *rec =
|
||||
dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
|
||||
assert(!system->bypassCaches());
|
||||
|
||||
if (rec == NULL) {
|
||||
assert(pkt->cmd == MemCmd::HardPFResp);
|
||||
@@ -409,6 +410,12 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
|
||||
|
||||
assert(pkt->isRequest());
|
||||
|
||||
// Just forward the packet if caches are disabled.
|
||||
if (system->bypassCaches()) {
|
||||
memSidePort->sendTimingReq(pkt);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (pkt->memInhibitAsserted()) {
|
||||
DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
|
||||
pkt->getAddr());
|
||||
@@ -629,6 +636,10 @@ Cache<TagStore>::atomicAccess(PacketPtr pkt)
|
||||
// @TODO: make this a parameter
|
||||
bool last_level_cache = false;
|
||||
|
||||
// Forward the request if the system is in cache bypass mode.
|
||||
if (system->bypassCaches())
|
||||
return memSidePort->sendAtomic(pkt);
|
||||
|
||||
if (pkt->memInhibitAsserted()) {
|
||||
assert(!pkt->req->isUncacheable());
|
||||
// have to invalidate ourselves and any lower caches even if
|
||||
@@ -744,6 +755,17 @@ template<class TagStore>
|
||||
void
|
||||
Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
|
||||
{
|
||||
if (system->bypassCaches()) {
|
||||
// Packets from the memory side are snoop request and
|
||||
// shouldn't happen in bypass mode.
|
||||
assert(fromCpuSide);
|
||||
|
||||
// The cache should be flushed if we are in cache bypass mode,
|
||||
// so we don't need to check if we need to update anything.
|
||||
memSidePort->sendFunctional(pkt);
|
||||
return;
|
||||
}
|
||||
|
||||
Addr blk_addr = blockAlign(pkt->getAddr());
|
||||
BlkType *blk = tags->findBlock(pkt->getAddr());
|
||||
MSHR *mshr = mshrQueue.findMatch(blk_addr);
|
||||
@@ -1354,6 +1376,9 @@ template<class TagStore>
|
||||
void
|
||||
Cache<TagStore>::snoopTiming(PacketPtr pkt)
|
||||
{
|
||||
// Snoops shouldn't happen when bypassing caches
|
||||
assert(!system->bypassCaches());
|
||||
|
||||
// Note that some deferred snoops don't have requests, since the
|
||||
// original access may have already completed
|
||||
if ((pkt->req && pkt->req->isUncacheable()) ||
|
||||
@@ -1438,6 +1463,9 @@ template<class TagStore>
|
||||
Cycles
|
||||
Cache<TagStore>::snoopAtomic(PacketPtr pkt)
|
||||
{
|
||||
// Snoops shouldn't happen when bypassing caches
|
||||
assert(!system->bypassCaches());
|
||||
|
||||
if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
|
||||
// Can't get a hit on an uncacheable address
|
||||
// Revisit this for multi level coherence
|
||||
@@ -1683,6 +1711,7 @@ Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
|
||||
{
|
||||
// always let inhibited requests through even if blocked
|
||||
if (!pkt->memInhibitAsserted() && blocked) {
|
||||
assert(!cache->system->bypassCaches());
|
||||
DPRINTF(Cache,"Scheduling a retry while blocked\n");
|
||||
mustSendRetry = true;
|
||||
return false;
|
||||
|
||||
@@ -52,11 +52,13 @@
|
||||
#include "debug/BusAddrRanges.hh"
|
||||
#include "debug/CoherentBus.hh"
|
||||
#include "mem/coherent_bus.hh"
|
||||
#include "sim/system.hh"
|
||||
|
||||
CoherentBus::CoherentBus(const CoherentBusParams *p)
|
||||
: BaseBus(p), reqLayer(*this, ".reqLayer", p->clock),
|
||||
respLayer(*this, ".respLayer", p->clock),
|
||||
snoopRespLayer(*this, ".snoopRespLayer", p->clock)
|
||||
snoopRespLayer(*this, ".snoopRespLayer", p->clock),
|
||||
system(p->system)
|
||||
{
|
||||
// create the ports based on the size of the master and slave
|
||||
// vector ports, and the presence of the default port, the ports
|
||||
@@ -137,7 +139,7 @@ CoherentBus::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
|
||||
Tick packetFinishTime = is_express_snoop ? 0 : pkt->finishTime;
|
||||
|
||||
// uncacheable requests need never be snooped
|
||||
if (!pkt->req->isUncacheable()) {
|
||||
if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
|
||||
// the packet is a memory-mapped request and should be
|
||||
// broadcasted to our snoopers but the source
|
||||
forwardTiming(pkt, slave_port_id);
|
||||
@@ -323,6 +325,9 @@ CoherentBus::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id)
|
||||
void
|
||||
CoherentBus::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id)
|
||||
{
|
||||
// snoops should only happen if the system isn't bypassing caches
|
||||
assert(!system->bypassCaches());
|
||||
|
||||
for (SlavePortIter s = snoopPorts.begin(); s != snoopPorts.end(); ++s) {
|
||||
SlavePort *p = *s;
|
||||
// we could have gotten this request from a snooping master
|
||||
@@ -357,7 +362,7 @@ CoherentBus::recvAtomic(PacketPtr pkt, PortID slave_port_id)
|
||||
Tick snoop_response_latency = 0;
|
||||
|
||||
// uncacheable requests need never be snooped
|
||||
if (!pkt->req->isUncacheable()) {
|
||||
if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
|
||||
// forward to all snoopers but the source
|
||||
std::pair<MemCmd, Tick> snoop_result =
|
||||
forwardAtomic(pkt, slave_port_id);
|
||||
@@ -414,6 +419,9 @@ CoherentBus::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id)
|
||||
MemCmd snoop_response_cmd = MemCmd::InvalidCmd;
|
||||
Tick snoop_response_latency = 0;
|
||||
|
||||
// snoops should only happen if the system isn't bypassing caches
|
||||
assert(!system->bypassCaches());
|
||||
|
||||
for (SlavePortIter s = snoopPorts.begin(); s != snoopPorts.end(); ++s) {
|
||||
SlavePort *p = *s;
|
||||
// we could have gotten this request from a snooping master
|
||||
@@ -458,7 +466,7 @@ CoherentBus::recvFunctional(PacketPtr pkt, PortID slave_port_id)
|
||||
}
|
||||
|
||||
// uncacheable requests need never be snooped
|
||||
if (!pkt->req->isUncacheable()) {
|
||||
if (!pkt->req->isUncacheable() && !system->bypassCaches()) {
|
||||
// forward to all snoopers but the source
|
||||
forwardFunctional(pkt, slave_port_id);
|
||||
}
|
||||
@@ -490,6 +498,9 @@ CoherentBus::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id)
|
||||
void
|
||||
CoherentBus::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id)
|
||||
{
|
||||
// snoops should only happen if the system isn't bypassing caches
|
||||
assert(!system->bypassCaches());
|
||||
|
||||
for (SlavePortIter s = snoopPorts.begin(); s != snoopPorts.end(); ++s) {
|
||||
SlavePort *p = *s;
|
||||
// we could have gotten this request from a snooping master
|
||||
|
||||
@@ -224,6 +224,12 @@ class CoherentBus : public BaseBus
|
||||
*/
|
||||
std::set<RequestPtr> outstandingReq;
|
||||
|
||||
/**
|
||||
* Keep a pointer to the system to be allow to querying memory system
|
||||
* properties.
|
||||
*/
|
||||
System *system;
|
||||
|
||||
/** Function called by the port when the bus is recieving a Timing
|
||||
request packet.*/
|
||||
virtual bool recvTimingReq(PacketPtr pkt, PortID slave_port_id);
|
||||
|
||||
Reference in New Issue
Block a user