misc: BaseCPU using ArchMMU instead of ArchDTB/ArchITB

With this commit we replace every TLB pointer stored in the
cpu model with a BaseMMU pointer.

JIRA: https://gem5.atlassian.net/browse/GEM5-790

Change-Id: I4932a32f68582b25cd252b5420b54d6a40ee15b8
Signed-off-by: Giacomo Travaglini <giacomo.travaglini@arm.com>
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/34976
Reviewed-by: Jason Lowe-Power <power.jg@gmail.com>
Maintainer: Jason Lowe-Power <power.jg@gmail.com>
Tested-by: kokoro <noreply+kokoro@google.com>
This commit is contained in:
Giacomo Travaglini
2019-12-13 00:18:47 +00:00
parent 85a36581d4
commit 330a5f7bad
48 changed files with 230 additions and 183 deletions

View File

@@ -364,8 +364,8 @@ def addCommonOptions(parser):
parser.add_option("--stats-root", action="append", default=[], help=
"If given, dump only stats of objects under the given SimObject. "
"SimObjects are identified with Python notation as in: "
"system.cpu[0].dtb. All elements of an array can be selected at "
"once with: system.cpu[:].dtb. If given multiple times, dump stats "
"system.cpu[0].mmu. All elements of an array can be selected at "
"once with: system.cpu[:].mmu. If given multiple times, dump stats "
"that are present under any of the roots. If not given, dump all "
"stats. "
)

View File

@@ -1,4 +1,4 @@
# Copyright (c) 2014-2017 ARM Limited
# Copyright (c) 2014-2017, 2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
@@ -1337,6 +1337,10 @@ class HPI_DTB(ArmDTB):
class HPI_ITB(ArmITB):
size = 256
class HPI_MMU(ArmMMU):
itb = HPI_ITB()
dtb = HPI_DTB()
class HPI_WalkCache(Cache):
data_latency = 4
tag_latency = 4
@@ -1443,8 +1447,7 @@ class HPI(MinorCPU):
branchPred = HPI_BP()
itb = HPI_ITB()
dtb = HPI_DTB()
mmu = HPI_MMU()
__all__ = [
"HPI_BP",

View File

@@ -551,8 +551,8 @@ for i in range(options.num_cpus):
system.cpu[i].interrupts[0].int_master = system.piobus.slave
system.cpu[i].interrupts[0].int_slave = system.piobus.master
if fast_forward:
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
system.cpu[i].mmu.connectWalkerPorts(
ruby_port.slave, ruby_port.slave)
# attach CU ports to Ruby
# Because of the peculiarities of the CP core, you may have 1 CPU but 2

View File

@@ -179,7 +179,8 @@ class CpuCluster(SubSystem):
int_cls = ArmPPI if pint < 32 else ArmSPI
for isa in cpu.isa:
isa.pmu = ArmPMU(interrupt=int_cls(num=pint))
isa.pmu.addArchEvents(cpu=cpu, itb=cpu.itb, dtb=cpu.dtb,
isa.pmu.addArchEvents(cpu=cpu,
itb=cpu.mmu.itb, dtb=cpu.mmu.dtb,
icache=getattr(cpu, 'icache', None),
dcache=getattr(cpu, 'dcache', None),
l2cache=getattr(self, 'l2', None))

View File

@@ -175,9 +175,9 @@ def build_test_system(np):
cpu.icache_port = test_sys.ruby._cpu_ports[i].slave
cpu.dcache_port = test_sys.ruby._cpu_ports[i].slave
if buildEnv['TARGET_ISA'] in ("x86", "arm"):
cpu.itb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = test_sys.ruby._cpu_ports[i].slave
cpu.mmu.connectWalkerPorts(
test_sys.ruby._cpu_ports[i].slave,
test_sys.ruby._cpu_ports[i].slave)
if buildEnv['TARGET_ISA'] in "x86":
cpu.interrupts[0].pio = test_sys.ruby._cpu_ports[i].master

View File

@@ -266,8 +266,8 @@ if options.ruby:
system.cpu[i].interrupts[0].pio = ruby_port.master
system.cpu[i].interrupts[0].int_master = ruby_port.slave
system.cpu[i].interrupts[0].int_slave = ruby_port.master
system.cpu[i].itb.walker.port = ruby_port.slave
system.cpu[i].dtb.walker.port = ruby_port.slave
system.cpu[i].mmu.connectWalkerPorts(
ruby_port.slave, ruby_port.slave)
else:
MemClass = Simulation.setMemClass(options)
system.membus = SystemXBar()

View File

@@ -107,14 +107,13 @@ class MyCacheSystem(RubySystem):
for i,cpu in enumerate(cpus):
cpu.icache_port = self.sequencers[i].slave
cpu.dcache_port = self.sequencers[i].slave
cpu.mmu.connectWalkerPorts(
self.sequencers[i].slave, self.sequencers[i].slave)
isa = buildEnv['TARGET_ISA']
if isa == 'x86':
cpu.interrupts[0].pio = self.sequencers[i].master
cpu.interrupts[0].int_master = self.sequencers[i].slave
cpu.interrupts[0].int_slave = self.sequencers[i].master
if isa == 'x86' or isa == 'arm':
cpu.itb.walker.port = self.sequencers[i].slave
cpu.dtb.walker.port = self.sequencers[i].slave
class L1Cache(L1Cache_Controller):

View File

@@ -105,14 +105,13 @@ class MyCacheSystem(RubySystem):
for i,cpu in enumerate(cpus):
cpu.icache_port = self.sequencers[i].slave
cpu.dcache_port = self.sequencers[i].slave
cpu.mmu.connectWalkerPorts(
self.sequencers[i].slave, self.sequencers[i].slave)
isa = buildEnv['TARGET_ISA']
if isa == 'x86':
cpu.interrupts[0].pio = self.sequencers[i].master
cpu.interrupts[0].int_master = self.sequencers[i].slave
cpu.interrupts[0].int_slave = self.sequencers[i].master
if isa == 'x86' or isa == 'arm':
cpu.itb.walker.port = self.sequencers[i].slave
cpu.dtb.walker.port = self.sequencers[i].slave
class L1Cache(L1Cache_Controller):

View File

@@ -44,3 +44,11 @@ class ArmMMU(BaseMMU):
cxx_header = 'arch/arm/mmu.hh'
itb = ArmITB()
dtb = ArmDTB()
@classmethod
def walkerPorts(cls):
return ["mmu.itb.walker.port", "mmu.dtb.walker.port"]
def connectWalkerPorts(self, iport, dport):
self.itb.walker.port = iport
self.dtb.walker.port = dport

View File

@@ -36,10 +36,10 @@ namespace FastModel
{
CortexA76TC::CortexA76TC(::BaseCPU *cpu, int id, System *system,
::BaseTLB *dtb, ::BaseTLB *itb, ::BaseISA *isa,
::BaseMMU *mmu, ::BaseISA *isa,
iris::IrisConnectionInterface *iris_if,
const std::string &iris_path) :
ThreadContext(cpu, id, system, dtb, itb, isa, iris_if, iris_path)
ThreadContext(cpu, id, system, mmu, isa, iris_if, iris_path)
{}
bool

View File

@@ -48,7 +48,7 @@ class CortexA76TC : public Iris::ThreadContext
public:
CortexA76TC(::BaseCPU *cpu, int id, System *system,
::BaseTLB *dtb, ::BaseTLB *itb, ::BaseISA *isa,
::BaseMMU *mmu, ::BaseISA *isa,
iris::IrisConnectionInterface *iris_if,
const std::string &iris_path);

View File

@@ -90,8 +90,7 @@ class IrisBaseCPU(BaseCPU):
thread_paths = VectorParam.String(
"Sub-paths to elements in the EVS which support a thread context")
dtb = IrisTLB()
itb = IrisTLB()
mmu = IrisMMU()
def createThreads(self):
if len(self.isa) == 0:

View File

@@ -139,7 +139,7 @@ class CPU : public Iris::BaseCPU
for (const std::string &sub_path: params.thread_paths) {
std::string path = parent_path + "." + sub_path;
auto id = thread_id++;
auto *tc = new TC(this, id, sys, params.dtb, params.itb,
auto *tc = new TC(this, id, sys, params.mmu,
params.isa[id], iris_if, path);
threadContexts.push_back(tc);
}

View File

@@ -304,10 +304,10 @@ ThreadContext::semihostingEvent(
}
ThreadContext::ThreadContext(
BaseCPU *cpu, int id, System *system, ::BaseTLB *dtb, ::BaseTLB *itb,
BaseCPU *cpu, int id, System *system, ::BaseMMU *mmu,
BaseISA *isa, iris::IrisConnectionInterface *iris_if,
const std::string &iris_path) :
_cpu(cpu), _threadId(id), _system(system), _dtb(dtb), _itb(itb), _isa(isa),
_cpu(cpu), _threadId(id), _system(system), _mmu(mmu), _isa(isa),
_irisPath(iris_path), vecRegs(ArmISA::NumVecRegs),
vecPredRegs(ArmISA::NumVecPredRegs),
comInstEventQueue("instruction-based event queue"),

View File

@@ -57,8 +57,7 @@ class ThreadContext : public ::ThreadContext
int _threadId;
ContextID _contextId;
System *_system;
::BaseTLB *_dtb;
::BaseTLB *_itb;
::BaseMMU *_mmu;
::BaseISA *_isa;
std::string _irisPath;
@@ -168,7 +167,7 @@ class ThreadContext : public ::ThreadContext
public:
ThreadContext(::BaseCPU *cpu, int id, System *system,
::BaseTLB *dtb, ::BaseTLB *itb, ::BaseISA *isa,
::BaseMMU *mmu, ::BaseISA *isa,
iris::IrisConnectionInterface *iris_if,
const std::string &iris_path);
virtual ~ThreadContext();
@@ -202,6 +201,12 @@ class ThreadContext : public ::ThreadContext
{
return _dtb;
}
BaseMMU *
getMMUPtr() override
{
return _mmu;
}
CheckerCPU *getCheckerCpuPtr() override { return nullptr; }
ArmISA::Decoder *
getDecoderPtr() override

View File

@@ -45,3 +45,27 @@ class BaseMMU(SimObject):
cxx_header = "arch/generic/mmu.hh"
itb = Param.BaseTLB("Instruction TLB")
dtb = Param.BaseTLB("Data TLB")
@classmethod
def walkerPorts(cls):
# This classmethod is used by the BaseCPU. It should return
# a list of strings: the table walker ports to be assigned
# to the _cached_ports variable. The method should be removed once
# we remove the _cached_ports methodology of composing
# cache hierarchies
return []
def connectWalkerPorts(self, iport, dport):
"""
Connect the instruction and data table walkers
to the ports passed as arguments.
An ISA specific MMU should override
this method, which is doing nothing to support ISAs
not implementing a table walker
:param iport: Port to be connected to the instruction
table walker port
:param dport: Port to be connected to the data
table walker port
"""
pass

View File

@@ -44,3 +44,11 @@ class RiscvMMU(BaseMMU):
cxx_header = 'arch/riscv/mmu.hh'
itb = RiscvTLB()
dtb = RiscvTLB()
@classmethod
def walkerPorts(cls):
return ["mmu.itb.walker.port", "mmu.dtb.walker.port"]
def connectWalkerPorts(self, iport, dport):
self.itb.walker.port = iport
self.dtb.walker.port = dport

View File

@@ -44,3 +44,11 @@ class X86MMU(BaseMMU):
cxx_header = 'arch/x86/mmu.hh'
itb = X86TLB()
dtb = X86TLB()
@classmethod
def walkerPorts(cls):
return ["mmu.itb.walker.port", "mmu.dtb.walker.port"]
def connectWalkerPorts(self, iport, dport):
self.itb.walker.port = iport
self.dtb.walker.port = dport

View File

@@ -59,27 +59,27 @@ from m5.objects.Platform import Platform
default_tracer = ExeTracer()
if buildEnv['TARGET_ISA'] == 'sparc':
from m5.objects.SparcTLB import SparcTLB as ArchDTB, SparcTLB as ArchITB
from m5.objects.SparcMMU import SparcMMU as ArchMMU
from m5.objects.SparcInterrupts import SparcInterrupts as ArchInterrupts
from m5.objects.SparcISA import SparcISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'x86':
from m5.objects.X86TLB import X86TLB as ArchDTB, X86TLB as ArchITB
from m5.objects.X86MMU import X86MMU as ArchMMU
from m5.objects.X86LocalApic import X86LocalApic as ArchInterrupts
from m5.objects.X86ISA import X86ISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'mips':
from m5.objects.MipsTLB import MipsTLB as ArchDTB, MipsTLB as ArchITB
from m5.objects.MipsMMU import MipsMMU as ArchMMU
from m5.objects.MipsInterrupts import MipsInterrupts as ArchInterrupts
from m5.objects.MipsISA import MipsISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'arm':
from m5.objects.ArmTLB import ArmDTB as ArchDTB, ArmITB as ArchITB
from m5.objects.ArmMMU import ArmMMU as ArchMMU
from m5.objects.ArmInterrupts import ArmInterrupts as ArchInterrupts
from m5.objects.ArmISA import ArmISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'power':
from m5.objects.PowerTLB import PowerTLB as ArchDTB, PowerTLB as ArchITB
from m5.objects.PowerMMU import PowerMMU as ArchMMU
from m5.objects.PowerInterrupts import PowerInterrupts as ArchInterrupts
from m5.objects.PowerISA import PowerISA as ArchISA
elif buildEnv['TARGET_ISA'] == 'riscv':
from m5.objects.RiscvTLB import RiscvTLB as ArchDTB, RiscvTLB as ArchITB
from m5.objects.RiscvMMU import RiscvMMU as ArchMMU
from m5.objects.RiscvInterrupts import RiscvInterrupts as ArchInterrupts
from m5.objects.RiscvISA import RiscvISA as ArchISA
else:
@@ -153,8 +153,7 @@ class BaseCPU(ClockedObject):
workload = VectorParam.Process([], "processes to run")
dtb = Param.BaseTLB(ArchDTB(), "Data TLB")
itb = Param.BaseTLB(ArchITB(), "Instruction TLB")
mmu = Param.BaseMMU(ArchMMU(), "CPU memory management unit")
if buildEnv['TARGET_ISA'] == 'power':
UnifiedTLB = Param.Bool(True, "Is this a Unified TLB?")
interrupts = VectorParam.BaseInterrupts([], "Interrupt Controller")
@@ -179,8 +178,7 @@ class BaseCPU(ClockedObject):
dcache_port = RequestPort("Data Port")
_cached_ports = ['icache_port', 'dcache_port']
if buildEnv['TARGET_ISA'] in ['x86', 'arm', 'riscv']:
_cached_ports += ["itb.walker.port", "dtb.walker.port"]
_cached_ports += ArchMMU.walkerPorts()
_uncached_interrupt_response_ports = []
_uncached_interrupt_request_ports = []
@@ -218,18 +216,18 @@ class BaseCPU(ClockedObject):
if iwc and dwc:
self.itb_walker_cache = iwc
self.dtb_walker_cache = dwc
self.itb.walker.port = iwc.cpu_side
self.dtb.walker.port = dwc.cpu_side
self.mmu.connectWalkerPorts(
iwc.cpu_side, dwc.cpu_side)
self._cached_ports += ["itb_walker_cache.mem_side", \
"dtb_walker_cache.mem_side"]
else:
self._cached_ports += ["itb.walker.port", "dtb.walker.port"]
self._cached_ports += ArchMMU.walkerPorts()
# Checker doesn't need its own tlb caches because it does
# functional accesses only
if self.checker != NULL:
self._cached_ports += ["checker.itb.walker.port", \
"checker.dtb.walker.port"]
self._cached_ports += [ ".".join("checker", port) \
for port in ArchMMU.walkerPorts() ]
def addTwoLevelCacheHierarchy(self, ic, dc, l2c, iwc=None, dwc=None,
xbar=None):

View File

@@ -235,7 +235,7 @@ BaseCPU::mwait(ThreadID tid, PacketPtr pkt)
}
void
BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb)
BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseMMU *mmu)
{
assert(tid < numThreads);
AddressMonitor &monitor = addressMonitor[tid];
@@ -256,7 +256,7 @@ BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb)
req->setVirt(addr, size, 0x0, dataRequestorId(), tc->instAddr());
// translate to physical address
Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read);
Fault fault = mmu->translateAtomic(req, tc, BaseTLB::Read);
assert(fault == NoFault);
monitor.pAddr = req->getPaddr() & mask;
@@ -588,41 +588,14 @@ BaseCPU::takeOverFrom(BaseCPU *oldCPU)
ThreadContext::compare(oldTC, newTC);
*/
Port *old_itb_port = oldTC->getITBPtr()->getTableWalkerPort();
Port *old_dtb_port = oldTC->getDTBPtr()->getTableWalkerPort();
Port *new_itb_port = newTC->getITBPtr()->getTableWalkerPort();
Port *new_dtb_port = newTC->getDTBPtr()->getTableWalkerPort();
// Move over any table walker ports if they exist
if (new_itb_port)
new_itb_port->takeOverFrom(old_itb_port);
if (new_dtb_port)
new_dtb_port->takeOverFrom(old_dtb_port);
newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
newTC->getMMUPtr()->takeOverFrom(oldTC->getMMUPtr());
// Checker whether or not we have to transfer CheckerCPU
// objects over in the switch
CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
if (oldChecker && newChecker) {
Port *old_checker_itb_port =
oldChecker->getITBPtr()->getTableWalkerPort();
Port *old_checker_dtb_port =
oldChecker->getDTBPtr()->getTableWalkerPort();
Port *new_checker_itb_port =
newChecker->getITBPtr()->getTableWalkerPort();
Port *new_checker_dtb_port =
newChecker->getDTBPtr()->getTableWalkerPort();
newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
// Move over any table walker ports if they exist for checker
if (new_checker_itb_port)
new_checker_itb_port->takeOverFrom(old_checker_itb_port);
if (new_checker_dtb_port)
new_checker_dtb_port->takeOverFrom(old_checker_dtb_port);
CheckerCPU *old_checker = oldTC->getCheckerCpuPtr();
CheckerCPU *new_checker = newTC->getCheckerCpuPtr();
if (old_checker && new_checker) {
new_checker->getMMUPtr()->takeOverFrom(old_checker->getMMUPtr());
}
}
@@ -647,11 +620,9 @@ BaseCPU::flushTLBs()
ThreadContext &tc(*threadContexts[i]);
CheckerCPU *checker(tc.getCheckerCpuPtr());
tc.getITBPtr()->flushAll();
tc.getDTBPtr()->flushAll();
tc.getMMUPtr()->flushAll();
if (checker) {
checker->getITBPtr()->flushAll();
checker->getDTBPtr()->flushAll();
checker->getMMUPtr()->flushAll();
}
}
}

View File

@@ -615,7 +615,7 @@ class BaseCPU : public ClockedObject
public:
void armMonitor(ThreadID tid, Addr address);
bool mwait(ThreadID tid, PacketPtr pkt);
void mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb);
void mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseMMU *mmu);
AddressMonitor *getCpuAddrMonitor(ThreadID tid)
{
assert(tid < numThreads);

View File

@@ -1053,7 +1053,7 @@ class BaseDynInst : public ExecContext, public RefCounted
void
mwaitAtomic(ThreadContext *tc) override
{
return cpu->mwaitAtomic(threadNumber, tc, cpu->dtb);
return cpu->mwaitAtomic(threadNumber, tc, cpu->mmu);
}
AddressMonitor *
getAddrMonitor() override

View File

@@ -80,8 +80,7 @@ CheckerCPU::CheckerCPU(const Params &p)
exitOnError = p.exitOnError;
warnOnlyOnLoadError = p.warnOnlyOnLoadError;
itb = p.itb;
dtb = p.dtb;
mmu = p.mmu;
workload = p.workload;
updateOnError = true;
@@ -99,11 +98,11 @@ CheckerCPU::setSystem(System *system)
systemPtr = system;
if (FullSystem) {
thread = new SimpleThread(this, 0, systemPtr, itb, dtb, p.isa[0]);
thread = new SimpleThread(this, 0, systemPtr, mmu, p.isa[0]);
} else {
thread = new SimpleThread(this, 0, systemPtr,
workload.size() ? workload[0] : NULL,
itb, dtb, p.isa[0]);
mmu, p.isa[0]);
}
tc = thread->getTC();
@@ -188,7 +187,7 @@ CheckerCPU::readMem(Addr addr, uint8_t *data, unsigned size,
// translate to physical address
if (predicate) {
fault = dtb->translateFunctional(mem_req, tc, BaseTLB::Read);
fault = mmu->translateFunctional(mem_req, tc, BaseTLB::Read);
}
if (predicate && !checked_flags && fault == NoFault && unverifiedReq) {
@@ -272,7 +271,7 @@ CheckerCPU::writeMem(uint8_t *data, unsigned size,
predicate = (mem_req != nullptr);
if (predicate) {
fault = dtb->translateFunctional(mem_req, tc, BaseTLB::Write);
fault = mmu->translateFunctional(mem_req, tc, BaseTLB::Write);
}
if (predicate && !checked_flags && fault == NoFault && unverifiedReq) {

View File

@@ -132,8 +132,7 @@ class CheckerCPU : public BaseCPU, public ExecContext
ThreadContext *tc;
BaseTLB *itb;
BaseTLB *dtb;
BaseMMU *mmu;
// ISAs like ARM can have multiple destination registers to check,
// keep them all in a std::queue
@@ -153,8 +152,10 @@ class CheckerCPU : public BaseCPU, public ExecContext
// Primary thread being run.
SimpleThread *thread;
BaseTLB* getITBPtr() { return itb; }
BaseTLB* getDTBPtr() { return dtb; }
BaseTLB* getITBPtr() { return mmu->itb; }
BaseTLB* getDTBPtr() { return mmu->dtb; }
BaseMMU* getMMUPtr() { return mmu; }
virtual Counter totalInsts() const override
{
@@ -540,28 +541,32 @@ class CheckerCPU : public BaseCPU, public ExecContext
void
demapPage(Addr vaddr, uint64_t asn) override
{
this->itb->demapPage(vaddr, asn);
this->dtb->demapPage(vaddr, asn);
mmu->demapPage(vaddr, asn);
}
// monitor/mwait funtions
void armMonitor(Addr address) override { BaseCPU::armMonitor(0, address); }
bool mwait(PacketPtr pkt) override { return BaseCPU::mwait(0, pkt); }
void mwaitAtomic(ThreadContext *tc) override
{ return BaseCPU::mwaitAtomic(0, tc, thread->dtb); }
void
mwaitAtomic(ThreadContext *tc) override
{
return BaseCPU::mwaitAtomic(0, tc, thread->mmu);
}
AddressMonitor *getAddrMonitor() override
{ return BaseCPU::getCpuAddrMonitor(0); }
void
demapInstPage(Addr vaddr, uint64_t asn)
{
this->itb->demapPage(vaddr, asn);
mmu->itb->demapPage(vaddr, asn);
}
void
demapDataPage(Addr vaddr, uint64_t asn)
{
this->dtb->demapPage(vaddr, asn);
mmu->dtb->demapPage(vaddr, asn);
}
/**

View File

@@ -244,7 +244,7 @@ Checker<Impl>::verify(const DynInstPtr &completed_inst)
Request::INST_FETCH, requestorId,
thread->instAddr());
fault = itb->translateFunctional(
fault = mmu->translateFunctional(
mem_req, tc, BaseTLB::Execute);
if (fault != NoFault) {

View File

@@ -131,6 +131,8 @@ class CheckerThreadContext : public ThreadContext
BaseTLB *getDTBPtr() override { return actualTC->getDTBPtr(); }
BaseMMU *getMMUPtr() override { return actualTC->getMMUPtr(); }
CheckerCPU *
getCheckerCpuPtr() override
{

View File

@@ -82,14 +82,13 @@ BaseKvmCPU::BaseKvmCPU(const BaseKvmCPUParams &params)
panic("KVM: Failed to determine host page size (%i)\n",
errno);
if (FullSystem) {
thread = new SimpleThread(this, 0, params.system, params.itb,
params.dtb, params.isa[0]);
} else {
if (FullSystem)
thread = new SimpleThread(this, 0, params.system, params.mmu,
params.isa[0]);
else
thread = new SimpleThread(this, /* thread_num */ 0, params.system,
params.workload[0], params.itb,
params.dtb, params.isa[0]);
}
params.workload[0], params.mmu,
params.isa[0]);
thread->setStatus(ThreadContext::Halted);
tc = thread->getTC();
@@ -1082,7 +1081,7 @@ BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
// APIC accesses on x86 and m5ops where supported through a MMIO
// interface.
BaseTLB::Mode tlb_mode(write ? BaseTLB::Write : BaseTLB::Read);
Fault fault(tc->getDTBPtr()->finalizePhysical(mmio_req, tc, tlb_mode));
Fault fault(tc->getMMUPtr()->finalizePhysical(mmio_req, tc, tlb_mode));
if (fault != NoFault)
warn("Finalization of MMIO address failed: %s\n", fault->name());

View File

@@ -55,11 +55,11 @@ MinorCPU::MinorCPU(const MinorCPUParams &params) :
for (ThreadID i = 0; i < numThreads; i++) {
if (FullSystem) {
thread = new Minor::MinorThread(this, i, params.system,
params.itb, params.dtb, params.isa[i]);
params.mmu, params.isa[i]);
thread->setStatus(ThreadContext::Halted);
} else {
thread = new Minor::MinorThread(this, i, params.system,
params.workload[i], params.itb, params.dtb,
params.workload[i], params.mmu,
params.isa[i]);
}

View File

@@ -432,8 +432,7 @@ class ExecContext : public ::ExecContext
void
demapPage(Addr vaddr, uint64_t asn) override
{
thread.getITBPtr()->demapPage(vaddr, asn);
thread.getDTBPtr()->demapPage(vaddr, asn);
thread.getMMUPtr()->demapPage(vaddr, asn);
}
RegVal
@@ -468,17 +467,29 @@ class ExecContext : public ::ExecContext
public:
// monitor/mwait funtions
void armMonitor(Addr address) override
{ getCpuPtr()->armMonitor(inst->id.threadId, address); }
void
armMonitor(Addr address) override
{
getCpuPtr()->armMonitor(inst->id.threadId, address);
}
bool mwait(PacketPtr pkt) override
{ return getCpuPtr()->mwait(inst->id.threadId, pkt); }
bool
mwait(PacketPtr pkt) override
{
return getCpuPtr()->mwait(inst->id.threadId, pkt);
}
void mwaitAtomic(ThreadContext *tc) override
{ return getCpuPtr()->mwaitAtomic(inst->id.threadId, tc, thread.dtb); }
void
mwaitAtomic(ThreadContext *tc) override
{
return getCpuPtr()->mwaitAtomic(inst->id.threadId, tc, thread.mmu);
}
AddressMonitor *getAddrMonitor() override
{ return getCpuPtr()->getCpuAddrMonitor(inst->id.threadId); }
AddressMonitor *
getAddrMonitor() override
{
return getCpuPtr()->getCpuAddrMonitor(inst->id.threadId);
}
};
}

View File

@@ -184,7 +184,7 @@ Fetch1::fetchLine(ThreadID tid)
/* Submit the translation request. The response will come
* through finish/markDelayed on this request as it bears
* the Translation interface */
cpu.threads[request->id.threadId]->itb->translateTiming(
cpu.threads[request->id.threadId]->mmu->translateTiming(
request->request,
cpu.getContext(request->id.threadId),
request, BaseTLB::Execute);

View File

@@ -310,7 +310,7 @@ LSQ::SingleDataRequest::startAddrTranslation()
/* Submit the translation request. The response will come through
* finish/markDelayed on the LSQRequest as it bears the Translation
* interface */
thread->getDTBPtr()->translateTiming(
thread->getMMUPtr()->translateTiming(
request, thread, this, (isLoad ? BaseTLB::Read : BaseTLB::Write));
} else {
disableMemAccess();
@@ -708,7 +708,7 @@ LSQ::SplitDataRequest::sendNextFragmentToTranslation()
port.numAccessesInDTLB++;
numInTranslationFragments++;
thread->getDTBPtr()->translateTiming(
thread->getMMUPtr()->translateTiming(
fragmentRequests[fragment_index], thread, this, (isLoad ?
BaseTLB::Read : BaseTLB::Write));
}

View File

@@ -179,14 +179,15 @@ class DerivO3CPU(BaseCPU):
def addCheckerCpu(self):
if buildEnv['TARGET_ISA'] in ['arm']:
from m5.objects.ArmTLB import ArmDTB, ArmITB
from m5.objects.ArmTLB import ArmMMU
self.checker = O3Checker(workload=self.workload,
exitOnError=False,
updateOnError=True,
warnOnlyOnLoadError=True)
self.checker.itb = ArmITB(size = self.itb.size)
self.checker.dtb = ArmDTB(size = self.dtb.size)
self.checker.mmu = ArmMMU()
self.checker.mmu.itb.size = self.mmu.itb.size
self.checker.mmu.dtb.size = self.mmu.dtb.size
self.checker.cpu_id = self.cpu_id
else:

View File

@@ -81,8 +81,7 @@ BaseO3CPU::regStats()
template <class Impl>
FullO3CPU<Impl>::FullO3CPU(const DerivO3CPUParams &params)
: BaseO3CPU(params),
itb(params.itb),
dtb(params.dtb),
mmu(params.mmu),
tickEvent([this]{ tick(); }, "FullO3CPU tick",
false, Event::CPU_Tick_Pri),
threadExitEvent([this]{ exitThreads(); }, "FullO3CPU exit threads",

View File

@@ -119,8 +119,7 @@ class FullO3CPU : public BaseO3CPU
SwitchedOut
};
BaseTLB *itb;
BaseTLB *dtb;
BaseMMU *mmu;
using LSQRequest = typename LSQ<Impl>::LSQRequest;
/** Overall CPU status. */
@@ -192,20 +191,20 @@ class FullO3CPU : public BaseO3CPU
/** Register probe points. */
void regProbePoints() override;
void demapPage(Addr vaddr, uint64_t asn)
void
demapPage(Addr vaddr, uint64_t asn)
{
this->itb->demapPage(vaddr, asn);
this->dtb->demapPage(vaddr, asn);
mmu->demapPage(vaddr, asn);
}
void demapInstPage(Addr vaddr, uint64_t asn)
{
this->itb->demapPage(vaddr, asn);
mmu->itb->demapPage(vaddr, asn);
}
void demapDataPage(Addr vaddr, uint64_t asn)
{
this->dtb->demapPage(vaddr, asn);
mmu->dtb->demapPage(vaddr, asn);
}
/** Ticks CPU, calling tick() on each stage, and checking the overall

View File

@@ -604,7 +604,7 @@ DefaultFetch<Impl>::fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc)
// Initiate translation of the icache block
fetchStatus[tid] = ItlbWait;
FetchTranslation *trans = new FetchTranslation(this);
cpu->itb->translateTiming(mem_req, cpu->thread[tid]->getTC(),
cpu->mmu->translateTiming(mem_req, cpu->thread[tid]->getTC(),
trans, BaseTLB::Execute);
return true;
}

View File

@@ -954,7 +954,7 @@ void
LSQ<Impl>::LSQRequest::sendFragmentToTranslation(int i)
{
numInTranslationFragments++;
_port.dTLB()->translateTiming(
_port.getMMUPtr()->translateTiming(
this->request(i),
this->_inst->thread->getTC(), this,
this->isLoad() ? BaseTLB::Read : BaseTLB::Write);

View File

@@ -401,7 +401,7 @@ class LSQUnit
/** Schedule event for the cpu. */
void schedule(Event& ev, Tick when) { cpu->schedule(ev, when); }
BaseTLB* dTLB() { return cpu->dtb; }
BaseMMU* getMMUPtr() { return cpu->mmu; }
private:
/** Pointer to the CPU. */

View File

@@ -99,10 +99,13 @@ class O3ThreadContext : public ThreadContext
O3ThreadState<Impl> *thread;
/** Returns a pointer to the ITB. */
BaseTLB *getITBPtr() override { return cpu->itb; }
BaseTLB *getITBPtr() override { return cpu->mmu->itb; }
/** Returns a pointer to the DTB. */
BaseTLB *getDTBPtr() override { return cpu->dtb; }
BaseTLB *getDTBPtr() override { return cpu->mmu->dtb; }
/** Returns a pointer to the MMU. */
BaseMMU *getMMUPtr() override { return cpu->mmu; }
CheckerCPU *getCheckerCpuPtr() override { return NULL; }

View File

@@ -40,11 +40,12 @@ class BaseSimpleCPU(BaseCPU):
def addCheckerCpu(self):
if buildEnv['TARGET_ISA'] in ['arm']:
from m5.objects.ArmTLB import ArmITB, ArmDTB
from m5.objects.ArmTLB import ArmMMU
self.checker = DummyChecker(workload = self.workload)
self.checker.itb = ArmITB(size = self.itb.size)
self.checker.dtb = ArmDTB(size = self.dtb.size)
self.checker.mmu = ArmMMU()
self.checker.mmu.itb.size = self.mmu.itb.size
self.checker.mmu.dtb.size = self.mmu.dtb.size
else:
print("ERROR: Checker only supported under ARM ISA!")
exit(1)

View File

@@ -389,7 +389,7 @@ AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size,
// translate to physical address
if (predicate) {
fault = thread->dtb->translateAtomic(req, thread->getTC(),
fault = thread->mmu->translateAtomic(req, thread->getTC(),
BaseTLB::Read);
}
@@ -480,7 +480,7 @@ AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
// translate to physical address
if (predicate)
fault = thread->dtb->translateAtomic(req, thread->getTC(),
fault = thread->mmu->translateAtomic(req, thread->getTC(),
BaseTLB::Write);
// Now do the access.
@@ -590,8 +590,8 @@ AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
thread->pcState().instAddr(), std::move(amo_op));
// translate to physical address
Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
BaseTLB::Write);
Fault fault = thread->mmu->translateAtomic(
req, thread->getTC(), BaseTLB::Write);
// Now do the access.
if (fault == NoFault && !req->getFlags().isSet(Request::NO_ACCESS)) {
@@ -667,7 +667,7 @@ AtomicSimpleCPU::tick()
if (needToFetch) {
ifetch_req->taskId(taskId());
setupFetchRequest(ifetch_req);
fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(),
fault = thread->mmu->translateAtomic(ifetch_req, thread->getTC(),
BaseTLB::Execute);
}

View File

@@ -92,11 +92,11 @@ BaseSimpleCPU::BaseSimpleCPU(const BaseSimpleCPUParams &p)
for (unsigned i = 0; i < numThreads; i++) {
if (FullSystem) {
thread = new SimpleThread(this, i, p.system,
p.itb, p.dtb, p.isa[i]);
thread = new SimpleThread(
this, i, p.system, p.mmu, p.isa[i]);
} else {
thread = new SimpleThread(this, i, p.system, p.workload[i],
p.itb, p.dtb, p.isa[i]);
thread = new SimpleThread(
this, i, p.system, p.workload[i], p.mmu, p.isa[i]);
}
threadInfo.push_back(new SimpleExecContext(this, thread));
ThreadContext *tc = thread->getTC();

View File

@@ -577,7 +577,7 @@ class SimpleExecContext : public ExecContext {
void
mwaitAtomic(ThreadContext *tc) override
{
cpu->mwaitAtomic(thread->threadId(), tc, thread->dtb);
cpu->mwaitAtomic(thread->threadId(), tc, thread->mmu);
}
AddressMonitor *

View File

@@ -488,14 +488,14 @@ TimingSimpleCPU::initiateMemRead(Addr addr, unsigned size,
DataTranslation<TimingSimpleCPU *> *trans2 =
new DataTranslation<TimingSimpleCPU *>(this, state, 1);
thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode);
thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode);
thread->mmu->translateTiming(req1, thread->getTC(), trans1, mode);
thread->mmu->translateTiming(req2, thread->getTC(), trans2, mode);
} else {
WholeTranslationState *state =
new WholeTranslationState(req, new uint8_t[size], NULL, mode);
DataTranslation<TimingSimpleCPU *> *translation
= new DataTranslation<TimingSimpleCPU *>(this, state);
thread->dtb->translateTiming(req, thread->getTC(), translation, mode);
thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
}
return NoFault;
@@ -573,14 +573,14 @@ TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
DataTranslation<TimingSimpleCPU *> *trans2 =
new DataTranslation<TimingSimpleCPU *>(this, state, 1);
thread->dtb->translateTiming(req1, thread->getTC(), trans1, mode);
thread->dtb->translateTiming(req2, thread->getTC(), trans2, mode);
thread->mmu->translateTiming(req1, thread->getTC(), trans1, mode);
thread->mmu->translateTiming(req2, thread->getTC(), trans2, mode);
} else {
WholeTranslationState *state =
new WholeTranslationState(req, newData, res, mode);
DataTranslation<TimingSimpleCPU *> *translation =
new DataTranslation<TimingSimpleCPU *>(this, state);
thread->dtb->translateTiming(req, thread->getTC(), translation, mode);
thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
}
// Translation faults will be returned via finishTranslation()
@@ -630,7 +630,7 @@ TimingSimpleCPU::initiateMemAMO(Addr addr, unsigned size,
new WholeTranslationState(req, new uint8_t[size], NULL, mode);
DataTranslation<TimingSimpleCPU *> *translation
= new DataTranslation<TimingSimpleCPU *>(this, state);
thread->dtb->translateTiming(req, thread->getTC(), translation, mode);
thread->mmu->translateTiming(req, thread->getTC(), translation, mode);
return NoFault;
}
@@ -706,7 +706,7 @@ TimingSimpleCPU::fetch()
ifetch_req->setContext(thread->contextId());
setupFetchRequest(ifetch_req);
DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
thread->itb->translateTiming(ifetch_req, thread->getTC(),
thread->mmu->translateTiming(ifetch_req, thread->getTC(),
&fetchTranslation, BaseTLB::Execute);
} else {
_status = IcacheWaitResponse;

View File

@@ -66,13 +66,13 @@ using namespace std;
// constructor
SimpleThread::SimpleThread(BaseCPU *_cpu, int _thread_num, System *_sys,
Process *_process, BaseTLB *_itb,
BaseTLB *_dtb, BaseISA *_isa)
Process *_process, BaseMMU *_mmu,
BaseISA *_isa)
: ThreadState(_cpu, _thread_num, _process),
isa(dynamic_cast<TheISA::ISA *>(_isa)),
predicate(true), memAccPredicate(true),
comInstEventQueue("instruction-based event queue"),
system(_sys), itb(_itb), dtb(_dtb), decoder(isa),
system(_sys), mmu(_mmu), decoder(isa),
htmTransactionStarts(0), htmTransactionStops(0)
{
assert(isa);
@@ -80,12 +80,12 @@ SimpleThread::SimpleThread(BaseCPU *_cpu, int _thread_num, System *_sys,
}
SimpleThread::SimpleThread(BaseCPU *_cpu, int _thread_num, System *_sys,
BaseTLB *_itb, BaseTLB *_dtb, BaseISA *_isa)
BaseMMU *_mmu, BaseISA *_isa)
: ThreadState(_cpu, _thread_num, NULL),
isa(dynamic_cast<TheISA::ISA *>(_isa)),
predicate(true), memAccPredicate(true),
comInstEventQueue("instruction-based event queue"),
system(_sys), itb(_itb), dtb(_dtb), decoder(isa),
system(_sys), mmu(_mmu), decoder(isa),
htmTransactionStarts(0), htmTransactionStops(0)
{
assert(isa);

View File

@@ -46,6 +46,7 @@
#include "arch/decoder.hh"
#include "arch/generic/htm.hh"
#include "arch/generic/mmu.hh"
#include "arch/generic/tlb.hh"
#include "arch/isa.hh"
#include "arch/registers.hh"
@@ -130,8 +131,7 @@ class SimpleThread : public ThreadState, public ThreadContext
System *system;
BaseTLB *itb;
BaseTLB *dtb;
BaseMMU *mmu;
TheISA::Decoder decoder;
@@ -142,10 +142,10 @@ class SimpleThread : public ThreadState, public ThreadContext
// constructor: initialize SimpleThread from given process structure
// FS
SimpleThread(BaseCPU *_cpu, int _thread_num, System *_system,
BaseTLB *_itb, BaseTLB *_dtb, BaseISA *_isa);
BaseMMU *_mmu, BaseISA *_isa);
// SE
SimpleThread(BaseCPU *_cpu, int _thread_num, System *_system,
Process *_process, BaseTLB *_itb, BaseTLB *_dtb,
Process *_process, BaseMMU *_mmu,
BaseISA *_isa);
virtual ~SimpleThread() {}
@@ -168,20 +168,20 @@ class SimpleThread : public ThreadState, public ThreadContext
*/
ThreadContext *getTC() { return this; }
void demapPage(Addr vaddr, uint64_t asn)
void
demapPage(Addr vaddr, uint64_t asn)
{
itb->demapPage(vaddr, asn);
dtb->demapPage(vaddr, asn);
mmu->demapPage(vaddr, asn);
}
void demapInstPage(Addr vaddr, uint64_t asn)
{
itb->demapPage(vaddr, asn);
mmu->itb->demapPage(vaddr, asn);
}
void demapDataPage(Addr vaddr, uint64_t asn)
{
dtb->demapPage(vaddr, asn);
mmu->dtb->demapPage(vaddr, asn);
}
/*******************************************
@@ -216,9 +216,11 @@ class SimpleThread : public ThreadState, public ThreadContext
ContextID contextId() const override { return ThreadState::contextId(); }
void setContextId(ContextID id) override { ThreadState::setContextId(id); }
BaseTLB *getITBPtr() override { return itb; }
BaseTLB *getITBPtr() override { return mmu->itb; }
BaseTLB *getDTBPtr() override { return dtb; }
BaseTLB *getDTBPtr() override { return mmu->dtb; }
BaseMMU *getMMUPtr() override { return mmu; }
CheckerCPU *getCheckerCpuPtr() override { return NULL; }

View File

@@ -61,6 +61,7 @@ namespace TheISA
class Decoder;
}
class BaseCPU;
class BaseMMU;
class BaseTLB;
class CheckerCPU;
class Checkpoint;
@@ -133,6 +134,8 @@ class ThreadContext : public PCEventScope
virtual BaseTLB *getDTBPtr() = 0;
virtual BaseMMU *getMMUPtr() = 0;
virtual CheckerCPU *getCheckerCpuPtr() = 0;
virtual BaseISA *getIsaPtr() = 0;

View File

@@ -80,8 +80,8 @@ for (i, cpu) in enumerate(system.cpu):
# Tie the cpu ports to the correct ruby system ports
cpu.icache_port = system.ruby._cpu_ports[i].slave
cpu.dcache_port = system.ruby._cpu_ports[i].slave
cpu.itb.walker.port = system.ruby._cpu_ports[i].slave
cpu.dtb.walker.port = system.ruby._cpu_ports[i].slave
cpu.mmu.connectWalkerPorts(
system.ruby._cpu_ports[i].slave, system.ruby._cpu_ports[i].slave)
cpu.interrupts[0].pio = system.ruby._cpu_ports[i].master
cpu.interrupts[0].int_master = system.ruby._cpu_ports[i].slave

View File

@@ -113,8 +113,8 @@ class MMUCache(Cache):
"""
self.mmubus = L2XBar()
self.cpu_side = self.mmubus.master
for tlb in [cpu.itb, cpu.dtb]:
self.mmubus.slave = tlb.walker.port
cpu.mmu.connectWalkerPorts(
self.mmubus.slave, self.mmubus.slave)
def connectBus(self, bus):
"""Connect this cache to a memory-side bus"""