arch-arm: Rewrite the ArmTLB storage to use an AssociativeCache (#1661)

With this PR we replace the TlbEntry storage within the TLB from an
array of entries with a custom hardcoded FA indexing policy and LRU
replacement policy, into the flexible SetAssociative cache.
This commit is contained in:
Giacomo Travaglini
2024-11-02 10:18:44 +00:00
committed by GitHub
20 changed files with 614 additions and 458 deletions

View File

@@ -72,7 +72,9 @@ class ArmMMU(BaseMMU):
cxx_header = "arch/arm/mmu.hh"
# L2 TLBs
l2_shared = ArmTLB(entry_type="unified", size=1280, partial_levels=["L2"])
l2_shared = ArmTLB(
entry_type="unified", size=1280, assoc=5, partial_levels=["L2"]
)
# L1 TLBs
itb = ArmTLB(entry_type="instruction", next_level=Parent.l2_shared)

View File

@@ -1,6 +1,6 @@
# -*- mode:python -*-
# Copyright (c) 2009, 2013, 2015, 2021 Arm Limited
# Copyright (c) 2009, 2013, 2015, 2021, 2024 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
@@ -36,6 +36,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.objects.BaseTLB import BaseTLB
from m5.objects.ReplacementPolicies import LRURP
from m5.params import *
from m5.proxy import *
from m5.SimObject import SimObject
@@ -45,12 +46,42 @@ class ArmLookupLevel(Enum):
vals = ["L0", "L1", "L2", "L3"]
class TLBIndexingPolicy(SimObject):
type = "TLBIndexingPolicy"
abstract = True
cxx_class = "gem5::IndexingPolicyTemplate<gem5::ArmISA::TLBTypes>"
cxx_header = "arch/arm/pagetable.hh"
cxx_template_params = ["class Types"]
# Get the size from the parent (cache)
num_entries = Param.Int(Parent.size, "number of TLB entries")
# Get the associativity
assoc = Param.Int(Parent.assoc, "associativity")
class TLBSetAssociative(TLBIndexingPolicy):
type = "TLBSetAssociative"
cxx_class = "gem5::ArmISA::TLBSetAssociative"
cxx_header = "arch/arm/pagetable.hh"
class ArmTLB(BaseTLB):
type = "ArmTLB"
cxx_class = "gem5::ArmISA::TLB"
cxx_header = "arch/arm/tlb.hh"
sys = Param.System(Parent.any, "system object parameter")
size = Param.Int(64, "TLB size")
assoc = Param.Int(
Self.size, "Associativity of the TLB. Fully Associative by default"
)
indexing_policy = Param.TLBIndexingPolicy(
TLBSetAssociative(assoc=Parent.assoc, num_entries=Parent.size),
"Indexing policy of the TLB",
)
replacement_policy = Param.BaseReplacementPolicy(
LRURP(), "Replacement policy of the TLB"
)
is_stage2 = Param.Bool(False, "Is this a stage 2 TLB?")
partial_levels = VectorParam.ArmLookupLevel(

View File

@@ -38,104 +38,127 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Import('*')
Import("*")
Source('insts/fplib.cc')
Source("insts/fplib.cc")
if env['CONF']['USE_ARM_ISA']:
env.TagImplies('arm isa', 'gem5 lib')
if env["CONF"]["USE_ARM_ISA"]:
env.TagImplies("arm isa", "gem5 lib")
# The GTest function does not have a 'tags' parameter. We therefore apply this
# guard to ensure this test is only built when ARM is compiled.
#
# Note: This will need reconfigured for multi-isa. E.g., if this is
# incorporated: https://gem5-review.googlesource.com/c/public/gem5/+/52491
if env['CONF']['USE_ARM_ISA']:
GTest('aapcs64.test', 'aapcs64.test.cc',
'../../base/debug.cc',
'../../cpu/reg_class.cc',
'../../sim/bufval.cc', '../../sim/cur_tick.cc',
'regs/int.cc')
GTest('matrix.test', 'matrix.test.cc')
Source('decoder.cc', tags='arm isa')
Source('faults.cc', tags='arm isa')
Source('htm.cc', tags='arm isa')
Source('insts/branch.cc', tags='arm isa')
Source('insts/branch64.cc', tags='arm isa')
Source('insts/data64.cc', tags='arm isa')
Source('insts/macromem.cc', tags='arm isa')
Source('insts/mem.cc', tags='arm isa')
Source('insts/mem64.cc', tags='arm isa')
Source('insts/misc.cc', tags='arm isa')
Source('insts/misc64.cc', tags='arm isa')
Source('insts/pred_inst.cc', tags='arm isa')
Source('insts/pseudo.cc', tags='arm isa')
Source('insts/sme.cc', tags='arm isa')
Source('insts/static_inst.cc', tags='arm isa')
Source('insts/sve.cc', tags='arm isa')
Source('insts/sve_mem.cc', tags='arm isa')
Source('insts/vfp.cc', tags='arm isa')
Source('insts/crypto.cc', tags='arm isa')
Source('insts/tme64.cc', tags='arm isa')
if env['CONF']['PROTOCOL'] == 'MESI_Three_Level_HTM':
Source('insts/tme64ruby.cc', tags='arm isa')
if env["CONF"]["USE_ARM_ISA"]:
GTest(
"aapcs64.test",
"aapcs64.test.cc",
"../../base/debug.cc",
"../../cpu/reg_class.cc",
"../../sim/bufval.cc",
"../../sim/cur_tick.cc",
"regs/int.cc",
)
GTest("matrix.test", "matrix.test.cc")
Source("decoder.cc", tags="arm isa")
Source("faults.cc", tags="arm isa")
Source("htm.cc", tags="arm isa")
Source("insts/branch.cc", tags="arm isa")
Source("insts/branch64.cc", tags="arm isa")
Source("insts/data64.cc", tags="arm isa")
Source("insts/macromem.cc", tags="arm isa")
Source("insts/mem.cc", tags="arm isa")
Source("insts/mem64.cc", tags="arm isa")
Source("insts/misc.cc", tags="arm isa")
Source("insts/misc64.cc", tags="arm isa")
Source("insts/pred_inst.cc", tags="arm isa")
Source("insts/pseudo.cc", tags="arm isa")
Source("insts/sme.cc", tags="arm isa")
Source("insts/static_inst.cc", tags="arm isa")
Source("insts/sve.cc", tags="arm isa")
Source("insts/sve_mem.cc", tags="arm isa")
Source("insts/vfp.cc", tags="arm isa")
Source("insts/crypto.cc", tags="arm isa")
Source("insts/tme64.cc", tags="arm isa")
if env["CONF"]["PROTOCOL"] == "MESI_Three_Level_HTM":
Source("insts/tme64ruby.cc", tags="arm isa")
else:
Source('insts/tme64classic.cc', tags='arm isa')
Source('interrupts.cc', tags='arm isa')
Source('isa.cc', tags='arm isa')
Source('isa_device.cc', tags='arm isa')
Source('linux/process.cc', tags='arm isa')
Source('linux/se_workload.cc', tags='arm isa')
Source('linux/fs_workload.cc', tags='arm isa')
Source('freebsd/fs_workload.cc', tags='arm isa')
Source('freebsd/se_workload.cc', tags='arm isa')
Source('fs_workload.cc', tags='arm isa')
Source('regs/int.cc', tags='arm isa')
Source('regs/misc.cc', tags='arm isa')
Source('mmu.cc', tags='arm isa')
Source('mpam.cc', tags='arm isa')
Source('nativetrace.cc', tags='arm isa')
Source('pagetable.cc', tags='arm isa')
Source('pauth_helpers.cc', tags='arm isa')
Source('pmu.cc', tags='arm isa')
Source('process.cc', tags='arm isa')
Source('qarma.cc', tags='arm isa')
Source('remote_gdb.cc', tags='arm isa')
Source('reg_abi.cc', tags='arm isa')
Source('semihosting.cc', tags='arm isa')
Source('system.cc', tags='arm isa')
Source('table_walker.cc', tags='arm isa')
Source('self_debug.cc', tags='arm isa')
Source('stage2_lookup.cc', tags='arm isa')
Source('tlb.cc', tags='arm isa')
Source('tlbi_op.cc', tags='arm isa')
Source('utility.cc', tags='arm isa')
Source("insts/tme64classic.cc", tags="arm isa")
Source("interrupts.cc", tags="arm isa")
Source("isa.cc", tags="arm isa")
Source("isa_device.cc", tags="arm isa")
Source("linux/process.cc", tags="arm isa")
Source("linux/se_workload.cc", tags="arm isa")
Source("linux/fs_workload.cc", tags="arm isa")
Source("freebsd/fs_workload.cc", tags="arm isa")
Source("freebsd/se_workload.cc", tags="arm isa")
Source("fs_workload.cc", tags="arm isa")
Source("regs/int.cc", tags="arm isa")
Source("regs/misc.cc", tags="arm isa")
Source("mmu.cc", tags="arm isa")
Source("mpam.cc", tags="arm isa")
Source("nativetrace.cc", tags="arm isa")
Source("pagetable.cc", tags="arm isa")
Source("pauth_helpers.cc", tags="arm isa")
Source("pmu.cc", tags="arm isa")
Source("process.cc", tags="arm isa")
Source("qarma.cc", tags="arm isa")
Source("remote_gdb.cc", tags="arm isa")
Source("reg_abi.cc", tags="arm isa")
Source("semihosting.cc", tags="arm isa")
Source("system.cc", tags="arm isa")
Source("table_walker.cc", tags="arm isa")
Source("self_debug.cc", tags="arm isa")
Source("stage2_lookup.cc", tags="arm isa")
Source("tlb.cc", tags="arm isa")
Source("tlbi_op.cc", tags="arm isa")
Source("utility.cc", tags="arm isa")
SimObject('ArmDecoder.py', sim_objects=['ArmDecoder'], tags='arm isa')
SimObject('ArmFsWorkload.py', sim_objects=[
'ArmFsWorkload', 'ArmFsLinux', 'ArmFsFreebsd'],
enums=['ArmMachineType'], tags='arm isa')
SimObject('ArmInterrupts.py', sim_objects=['ArmInterrupts'], tags='arm isa')
SimObject('ArmISA.py', sim_objects=['ArmISA'], enums=['DecoderFlavor'],
tags='arm isa')
SimObject('ArmMMU.py', sim_objects=['ArmTableWalker', 'ArmMMU'],
tags='arm isa')
SimObject('ArmNativeTrace.py', sim_objects=['ArmNativeTrace'], tags='arm isa')
SimObject('ArmSemihosting.py', sim_objects=['ArmSemihosting'], tags='arm isa')
SimObject('ArmSeWorkload.py', sim_objects=[
'ArmSEWorkload', 'ArmEmuLinux', 'ArmEmuFreebsd'], tags='arm isa')
SimObject('ArmSystem.py', sim_objects=['ArmSystem', 'ArmRelease'],
enums=['ArmExtension'], tags='arm isa')
SimObject('ArmTLB.py', sim_objects=['ArmTLB'], enums=['ArmLookupLevel'],
tags='arm isa')
SimObject('ArmPMU.py', sim_objects=['ArmPMU'], tags='arm isa')
SimObject("ArmDecoder.py", sim_objects=["ArmDecoder"], tags="arm isa")
SimObject(
"ArmFsWorkload.py",
sim_objects=["ArmFsWorkload", "ArmFsLinux", "ArmFsFreebsd"],
enums=["ArmMachineType"],
tags="arm isa",
)
SimObject("ArmInterrupts.py", sim_objects=["ArmInterrupts"], tags="arm isa")
SimObject(
"ArmISA.py",
sim_objects=["ArmISA"],
enums=["DecoderFlavor"],
tags="arm isa",
)
SimObject(
"ArmMMU.py", sim_objects=["ArmTableWalker", "ArmMMU"], tags="arm isa"
)
SimObject("ArmNativeTrace.py", sim_objects=["ArmNativeTrace"], tags="arm isa")
SimObject("ArmSemihosting.py", sim_objects=["ArmSemihosting"], tags="arm isa")
SimObject(
"ArmSeWorkload.py",
sim_objects=["ArmSEWorkload", "ArmEmuLinux", "ArmEmuFreebsd"],
tags="arm isa",
)
SimObject(
"ArmSystem.py",
sim_objects=["ArmSystem", "ArmRelease"],
enums=["ArmExtension"],
tags="arm isa",
)
SimObject(
"ArmTLB.py",
sim_objects=["ArmTLB", "TLBIndexingPolicy", "TLBSetAssociative"],
enums=["ArmLookupLevel"],
tags="arm isa",
)
SimObject("ArmPMU.py", sim_objects=["ArmPMU"], tags="arm isa")
SimObject('ArmCPU.py', sim_objects=[], tags='arm isa')
SimObject("ArmCPU.py", sim_objects=[], tags="arm isa")
DebugFlag('Arm', tags='arm isa')
DebugFlag('ArmTme', 'Transactional Memory Extension', tags='arm isa')
DebugFlag('MPAM', 'MPAM debug flag', tags='arm isa')
DebugFlag('PMUVerbose', "Performance Monitor", tags='arm isa')
DebugFlag("Arm", tags="arm isa")
DebugFlag("ArmTme", "Transactional Memory Extension", tags="arm isa")
DebugFlag("MPAM", "MPAM debug flag", tags="arm isa")
DebugFlag("PMUVerbose", "Performance Monitor", tags="arm isa")
# Add files generated by the ISA description.
ISADesc('isa/main.isa', decoder_splits=3, exec_splits=6, tags='arm isa')
ISADesc("isa/main.isa", decoder_splits=3, exec_splits=6, tags="arm isa")

View File

@@ -1067,8 +1067,8 @@ AbortFault<T>::invoke(ThreadContext *tc, const StaticInstPtr &inst)
tc->setMiscReg(T::FsrIndex, fsr);
tc->setMiscReg(T::FarIndex, faultAddr);
}
DPRINTF(Faults, "Abort Fault source=%#x fsr=%#x faultAddr=%#x "\
"tranMethod=%#x\n", source, fsr, faultAddr, tranMethod);
DPRINTF(Faults, "Abort Fault source=%#x fsr=%#x faultAddr=%#x\n",
source, fsr, faultAddr);
} else { // AArch64
// Set the FAR register. Nothing else to do if we are in AArch64 state
// because the syndrome register has already been set inside invoke64()
@@ -1092,11 +1092,11 @@ template<class T>
void
AbortFault<T>::update(ThreadContext *tc)
{
if (tranMethod == ArmFault::UnknownTran) {
tranMethod = longDescFormatInUse(tc) ? ArmFault::LpaeTran
: ArmFault::VmsaTran;
if (tranMethod == TranMethod::UnknownTran) {
tranMethod = longDescFormatInUse(tc) ? TranMethod::LpaeTran
: TranMethod::VmsaTran;
if ((tranMethod == ArmFault::VmsaTran) && this->routeToMonitor(tc)) {
if ((tranMethod == TranMethod::VmsaTran) && this->routeToMonitor(tc)) {
// See ARM ARM B3-1416
bool override_LPAE = false;
TTBCR ttbcr_s = tc->readMiscReg(MISCREG_TTBCR_S);
@@ -1109,7 +1109,7 @@ AbortFault<T>::update(ThreadContext *tc)
"override detected.\n");
}
if (override_LPAE)
tranMethod = ArmFault::LpaeTran;
tranMethod = TranMethod::LpaeTran;
}
}
@@ -1139,8 +1139,8 @@ AbortFault<T>::getFaultStatusCode(ThreadContext *tc) const
if (!this->to64) {
// AArch32
assert(tranMethod != ArmFault::UnknownTran);
if (tranMethod == ArmFault::LpaeTran) {
assert(tranMethod != TranMethod::UnknownTran);
if (tranMethod == TranMethod::LpaeTran) {
fsc = ArmFault::longDescFaultSources[source];
} else {
fsc = ArmFault::shortDescFaultSources[source];
@@ -1162,8 +1162,8 @@ AbortFault<T>::getFsr(ThreadContext *tc) const
auto fsc = getFaultStatusCode(tc);
// AArch32
assert(tranMethod != ArmFault::UnknownTran);
if (tranMethod == ArmFault::LpaeTran) {
assert(tranMethod != TranMethod::UnknownTran);
if (tranMethod == TranMethod::LpaeTran) {
fsr.status = fsc;
fsr.lpae = 1;
} else {

View File

@@ -149,13 +149,6 @@ class ArmFault : public FaultBase
AR // DataAbort: Acquire/Release semantics
};
enum TranMethod
{
LpaeTran,
VmsaTran,
UnknownTran
};
enum DebugType
{
NODEBUG = 0,
@@ -482,18 +475,18 @@ class AbortFault : public ArmFaultVals<T>
*/
Addr OVAddr;
bool write;
TlbEntry::DomainType domain;
DomainType domain;
uint8_t source;
uint8_t srcEncoded;
bool stage2;
bool s1ptw;
ArmFault::TranMethod tranMethod;
TranMethod tranMethod;
ArmFault::DebugType debugType;
public:
AbortFault(Addr _faultAddr, bool _write, TlbEntry::DomainType _domain,
AbortFault(Addr _faultAddr, bool _write, DomainType _domain,
uint8_t _source, bool _stage2,
ArmFault::TranMethod _tranMethod = ArmFault::UnknownTran,
TranMethod _tranMethod = TranMethod::UnknownTran,
ArmFault::DebugType _debug = ArmFault::NODEBUG) :
faultAddr(_faultAddr), OVAddr(0), write(_write),
domain(_domain), source(_source), srcEncoded(0),
@@ -525,10 +518,10 @@ class PrefetchAbort : public AbortFault<PrefetchAbort>
static const MiscRegIndex HFarIndex = MISCREG_HIFAR;
PrefetchAbort(Addr _addr, uint8_t _source, bool _stage2 = false,
ArmFault::TranMethod _tranMethod = ArmFault::UnknownTran,
TranMethod _tran_method = TranMethod::UnknownTran,
ArmFault::DebugType _debug = ArmFault::NODEBUG) :
AbortFault<PrefetchAbort>(_addr, false, TlbEntry::DomainType::NoAccess,
_source, _stage2, _tranMethod, _debug)
AbortFault<PrefetchAbort>(_addr, false, DomainType::NoAccess,
_source, _stage2, _tran_method, _debug)
{}
// @todo: external aborts should be routed if SCR.EA == 1
@@ -558,12 +551,12 @@ class DataAbort : public AbortFault<DataAbort>
bool sf;
bool ar;
DataAbort(Addr _addr, TlbEntry::DomainType _domain, bool _write, uint8_t _source,
DataAbort(Addr _addr, DomainType _domain, bool _write, uint8_t _source,
bool _stage2=false,
ArmFault::TranMethod _tranMethod=ArmFault::UnknownTran,
TranMethod _tran_method=TranMethod::UnknownTran,
ArmFault::DebugType _debug_type=ArmFault::NODEBUG) :
AbortFault<DataAbort>(_addr, _write, _domain, _source, _stage2,
_tranMethod, _debug_type),
_tran_method, _debug_type),
isv(false), sas (0), sse(0), srt(0), cm(0), sf(false), ar(false)
{}
@@ -586,7 +579,7 @@ class VirtualDataAbort : public AbortFault<VirtualDataAbort>
static const MiscRegIndex FarIndex = MISCREG_DFAR;
static const MiscRegIndex HFarIndex = MISCREG_HDFAR;
VirtualDataAbort(Addr _addr, TlbEntry::DomainType _domain, bool _write,
VirtualDataAbort(Addr _addr, DomainType _domain, bool _write,
uint8_t _source) :
AbortFault<VirtualDataAbort>(_addr, _write, _domain, _source, false)
{}

View File

@@ -646,7 +646,7 @@ ArmStaticInst::softwareBreakpoint32(ExecContext *xc, uint16_t imm) const
return std::make_shared<PrefetchAbort>(readPC(xc),
ArmFault::DebugEvent,
false,
ArmFault::UnknownTran,
TranMethod::UnknownTran,
ArmFault::BRKPOINT);
}
}

View File

@@ -235,7 +235,7 @@ class Interrupts : public BaseInterrupts
return std::make_shared<SystemError>();
if (hcr.va && takeVirtualInt(INT_VIRT_ABT))
return std::make_shared<VirtualDataAbort>(
0, TlbEntry::DomainType::NoAccess, false,
0, DomainType::NoAccess, false,
ArmFault::AsynchronousExternalAbort);
if (interrupts[INT_RST])
return std::make_shared<Reset>();

View File

@@ -45,9 +45,9 @@
#include "arch/arm/reg_abi.hh"
#include "arch/arm/stage2_lookup.hh"
#include "arch/arm/table_walker.hh"
#include "arch/arm/tlb.hh"
#include "arch/arm/tlbi_op.hh"
#include "debug/TLB.hh"
#include "debug/TLBVerbose.hh"
#include "debug/MMU.hh"
#include "mem/packet_access.hh"
#include "sim/pseudo_inst.hh"
#include "sim/process.hh"
@@ -134,6 +134,18 @@ MMU::drainResume()
s2State.miscRegValid = false;
}
ArmISA::TLB *
MMU::getDTBPtr() const
{
return static_cast<ArmISA::TLB *>(dtb);
}
ArmISA::TLB *
MMU::getITBPtr() const
{
return static_cast<ArmISA::TLB *>(itb);
}
TLB *
MMU::getTlb(BaseMMU::Mode mode, bool stage2) const
{
@@ -173,7 +185,7 @@ MMU::translateFunctional(ThreadContext *tc, Addr va, Addr &pa)
auto tlb = getTlb(BaseMMU::Read, state.directToStage2);
TlbEntry::Lookup lookup_data;
TlbEntry::KeyType lookup_data;
lookup_data.va = va;
lookup_data.asn = state.asid;
@@ -200,6 +212,70 @@ MMU::invalidateMiscReg()
s2State.computeAddrTop.flush();
}
void
MMU::flush(const TLBIOp &tlbi_op)
{
if (tlbi_op.stage1Flush()) {
flushStage1(tlbi_op);
}
if (tlbi_op.stage2Flush()) {
flushStage2(tlbi_op);
}
}
void
MMU::flushStage1(const TLBIOp &tlbi_op)
{
for (auto tlb : instruction) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
for (auto tlb : data) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
for (auto tlb : unified) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
}
void
MMU::flushStage2(const TLBIOp &tlbi_op)
{
itbStage2->flush(tlbi_op);
dtbStage2->flush(tlbi_op);
}
void
MMU::iflush(const TLBIOp &tlbi_op)
{
for (auto tlb : instruction) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
for (auto tlb : unified) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
}
void
MMU::dflush(const TLBIOp &tlbi_op)
{
for (auto tlb : data) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
for (auto tlb : unified) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
}
void
MMU::flushAll()
{
BaseMMU::flushAll();
itbStage2->flushAll();
dtbStage2->flushAll();
}
Fault
MMU::testAndFinalize(const RequestPtr &req,
ThreadContext *tc, Mode mode,
@@ -207,7 +283,7 @@ MMU::testAndFinalize(const RequestPtr &req,
{
// If we don't have a valid tlb entry it means virtual memory
// is not enabled
auto domain = te ? te-> domain : TlbEntry::DomainType::NoAccess;
auto domain = te ? te-> domain : DomainType::NoAccess;
mpam::tagRequest(tc, req, mode == Execute);
@@ -277,9 +353,9 @@ MMU::translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
// LPAE is always disabled in SE mode
return std::make_shared<DataAbort>(
vaddr_tainted,
TlbEntry::DomainType::NoAccess, is_write,
DomainType::NoAccess, is_write,
ArmFault::AlignmentFault, state.isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
}
}
}
@@ -321,8 +397,8 @@ MMU::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
bool is_priv = state.isPriv && !(flags & UserMode);
// Get the translation type from the actuall table entry
ArmFault::TranMethod tranMethod = te->longDescFormat ? ArmFault::LpaeTran
: ArmFault::VmsaTran;
TranMethod tran_method = te->longDescFormat ?
TranMethod::LpaeTran : TranMethod::VmsaTran;
// If this is the second stage of translation and the request is for a
// stage 1 page table walk then we need to check the HCR.PTW bit. This
@@ -333,7 +409,7 @@ MMU::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
return std::make_shared<DataAbort>(
vaddr, te->domain, is_write,
ArmFault::PermissionLL + te->lookupLevel,
state.isStage2, tranMethod);
state.isStage2, tran_method);
}
// Generate an alignment fault for unaligned data accesses to device or
@@ -343,9 +419,9 @@ MMU::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
if (vaddr & mask(flags & AlignmentMask)) {
stats.alignFaults++;
return std::make_shared<DataAbort>(
vaddr, TlbEntry::DomainType::NoAccess, is_write,
vaddr, DomainType::NoAccess, is_write,
ArmFault::AlignmentFault, state.isStage2,
tranMethod);
tran_method);
}
}
}
@@ -357,7 +433,7 @@ MMU::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
// desc. format in all cases
return std::make_shared<PrefetchAbort>(
vaddr, ArmFault::PrefetchUncacheable,
state.isStage2, tranMethod);
state.isStage2, tran_method);
}
}
@@ -365,7 +441,7 @@ MMU::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
switch ((state.dacr >> (static_cast<uint8_t>(te->domain) * 2)) & 0x3) {
case 0:
stats.domainFaults++;
DPRINTF(TLB, "TLB Fault: Data abort on domain. DACR: %#x"
DPRINTF(MMU, "MMU Fault: Data abort on domain. DACR: %#x"
" domain: %#x write:%d\n", state.dacr,
static_cast<uint8_t>(te->domain), is_write);
if (is_fetch) {
@@ -375,12 +451,12 @@ MMU::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
return std::make_shared<PrefetchAbort>(
req->getPC(),
ArmFault::DomainLL + te->lookupLevel,
state.isStage2, tranMethod);
state.isStage2, tran_method);
} else
return std::make_shared<DataAbort>(
vaddr, te->domain, is_write,
ArmFault::DomainLL + te->lookupLevel,
state.isStage2, tranMethod);
state.isStage2, tran_method);
case 1:
// Continue with permissions check
break;
@@ -408,7 +484,7 @@ MMU::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
} else {
switch (ap) {
case 0:
DPRINTF(TLB, "Access permissions 0, checking rs:%#x\n",
DPRINTF(MMU, "Access permissions 0, checking rs:%#x\n",
(int)state.sctlr.rs);
if (!state.sctlr.xp) {
switch ((int)state.sctlr.rs) {
@@ -462,7 +538,7 @@ MMU::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
(state.securityState == SecurityState::Secure &&
te->ns && state.scr.sif))) {
stats.permsFaults++;
DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. AP:%d "
DPRINTF(MMU, "MMU Fault: Prefetch abort on permission check. AP:%d "
"priv:%d write:%d ns:%d sif:%d sctlr.afe: %d \n",
ap, is_priv, is_write, te->ns,
state.scr.sif, state.sctlr.afe);
@@ -471,15 +547,15 @@ MMU::checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
return std::make_shared<PrefetchAbort>(
req->getPC(),
ArmFault::PermissionLL + te->lookupLevel,
state.isStage2, tranMethod);
state.isStage2, tran_method);
} else if (abt | hapAbt) {
stats.permsFaults++;
DPRINTF(TLB, "TLB Fault: Data abort on permission check. AP:%d priv:%d"
DPRINTF(MMU, "MMU Fault: Data abort on permission check. AP:%d priv:%d"
" write:%d\n", ap, is_priv, is_write);
return std::make_shared<DataAbort>(
vaddr, te->domain, is_write,
ArmFault::PermissionLL + te->lookupLevel,
state.isStage2 | !abt, tranMethod);
state.isStage2 | !abt, tran_method);
}
return NoFault;
}
@@ -524,7 +600,7 @@ MMU::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
return std::make_shared<DataAbort>(
vaddr_tainted, te->domain, is_write,
ArmFault::PermissionLL + te->lookupLevel,
state.isStage2, ArmFault::LpaeTran);
state.isStage2, TranMethod::LpaeTran);
}
// Generate an alignment fault for unaligned accesses to device or
@@ -535,10 +611,10 @@ MMU::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
stats.alignFaults++;
return std::make_shared<DataAbort>(
vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : is_write,
ArmFault::AlignmentFault, state.isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
}
}
}
@@ -551,7 +627,7 @@ MMU::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
return std::make_shared<PrefetchAbort>(
vaddr_tainted,
ArmFault::PrefetchUncacheable,
state.isStage2, ArmFault::LpaeTran);
state.isStage2, TranMethod::LpaeTran);
}
}
@@ -573,7 +649,7 @@ MMU::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
if (!grant) {
if (is_fetch) {
stats.permsFaults++;
DPRINTF(TLB, "TLB Fault: Prefetch abort on permission check. "
DPRINTF(MMU, "MMU Fault: Prefetch abort on permission check. "
"ns:%d scr.sif:%d sctlr.afe: %d\n",
te->ns, state.scr.sif, state.sctlr.afe);
// Use PC value instead of vaddr because vaddr might be aligned to
@@ -581,16 +657,16 @@ MMU::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
return std::make_shared<PrefetchAbort>(
req->getPC(),
ArmFault::PermissionLL + te->lookupLevel,
state.isStage2, ArmFault::LpaeTran);
state.isStage2, TranMethod::LpaeTran);
} else {
stats.permsFaults++;
DPRINTF(TLB, "TLB Fault: Data abort on permission check."
DPRINTF(MMU, "MMU Fault: Data abort on permission check."
"ns:%d", te->ns);
return std::make_shared<DataAbort>(
vaddr_tainted, te->domain,
(is_atomic && !grant_read) ? false : is_write,
ArmFault::PermissionLL + te->lookupLevel,
state.isStage2, ArmFault::LpaeTran);
state.isStage2, TranMethod::LpaeTran);
}
}
@@ -619,7 +695,7 @@ MMU::s2PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode,
xn = true;
}
DPRINTF(TLBVerbose,
DPRINTF(MMU,
"Checking S2 permissions: hap:%d, xn:%d, pxn:%d, r:%d, "
"w:%d, x:%d\n", te->hap, xn, pxn, r, w, x);
@@ -651,7 +727,7 @@ MMU::s1PermBits64(TlbEntry *te, const RequestPtr &req, Mode mode,
uint8_t xn = te->xn;
uint8_t pxn = te->pxn;
DPRINTF(TLBVerbose, "Checking S1 permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
DPRINTF(MMU, "Checking S1 permissions: ap:%d, xn:%d, pxn:%d, r:%d, "
"w:%d, x:%d, is_priv: %d, wxn: %d\n", ap, xn,
pxn, r, w, x, is_priv, wxn);
@@ -825,13 +901,13 @@ MMU::translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode,
if (is_fetch)
f = std::make_shared<PrefetchAbort>(vaddr,
ArmFault::AddressSizeLL, state.isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
else
f = std::make_shared<DataAbort>( vaddr,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : mode==Write,
ArmFault::AddressSizeLL, state.isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
return f;
}
}
@@ -867,7 +943,7 @@ MMU::translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode,
temp_te.outerShareable = false;
}
temp_te.setAttributes(long_desc_format);
DPRINTF(TLBVerbose, "(No MMU) setting memory attributes: shareable: "
DPRINTF(MMU, "(No MMU) setting memory attributes: shareable: "
"%d, innerAttrs: %d, outerAttrs: %d, stage2: %d\n",
temp_te.shareable, temp_te.innerAttrs, temp_te.outerAttrs,
state.isStage2);
@@ -880,7 +956,7 @@ Fault
MMU::translateMmuOn(ThreadContext* tc, const RequestPtr &req, Mode mode,
Translation *translation, bool &delay, bool timing,
bool functional, Addr vaddr,
ArmFault::TranMethod tranMethod, CachedState &state)
TranMethod tran_method, CachedState &state)
{
TlbEntry *te = NULL;
bool is_fetch = (mode == Execute);
@@ -898,7 +974,7 @@ MMU::translateMmuOn(ThreadContext* tc, const RequestPtr &req, Mode mode,
// request that triggered the translation
if (isCompleteTranslation(te)) {
// Set memory attributes
DPRINTF(TLBVerbose,
DPRINTF(MMU,
"Setting memory attributes: shareable: %d, innerAttrs: %d, "
"outerAttrs: %d, mtype: %d, stage2: %d\n",
te->shareable, te->innerAttrs, te->outerAttrs,
@@ -931,9 +1007,9 @@ MMU::translateMmuOn(ThreadContext* tc, const RequestPtr &req, Mode mode,
bool is_write = (mode == Write);
return std::make_shared<DataAbort>(
vaddr_tainted,
TlbEntry::DomainType::NoAccess, is_write,
DomainType::NoAccess, is_write,
ArmFault::AlignmentFault, state.isStage2,
tranMethod);
tran_method);
}
if (fault == NoFault)
@@ -965,16 +1041,16 @@ MMU::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
bool is_fetch = (mode == Execute);
bool is_write = (mode == Write);
bool long_desc_format = state.aarch64 || longDescFormatInUse(tc);
ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
: ArmFault::VmsaTran;
TranMethod tran_method = long_desc_format ?
TranMethod::LpaeTran : TranMethod::VmsaTran;
DPRINTF(TLBVerbose,
DPRINTF(MMU,
"CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
state.isPriv, flags & UserMode,
state.securityState == SecurityState::Secure,
tran_type & S1S2NsTran);
DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
DPRINTF(MMU, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
"flags %#lx tranType 0x%x\n", vaddr_tainted, mode,
state.isStage2, state.scr, state.sctlr, flags, tran_type);
@@ -993,9 +1069,9 @@ MMU::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
stats.alignFaults++;
return std::make_shared<DataAbort>(
vaddr_tainted,
TlbEntry::DomainType::NoAccess, is_write,
DomainType::NoAccess, is_write,
ArmFault::AlignmentFault, state.isStage2,
tranMethod);
tran_method);
}
}
}
@@ -1013,11 +1089,11 @@ MMU::translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
fault = translateMmuOff(tc, req, mode, tran_type, vaddr,
long_desc_format, state);
} else {
DPRINTF(TLBVerbose, "Translating %s=%#x context=%d\n",
DPRINTF(MMU, "Translating %s=%#x context=%d\n",
state.isStage2 ? "IPA" : "VA", vaddr_tainted, state.asid);
// Translation enabled
fault = translateMmuOn(tc, req, mode, translation, delay, timing,
functional, vaddr, tranMethod, state);
functional, vaddr, tran_method, state);
}
// Check for Debug Exceptions
@@ -1119,7 +1195,7 @@ MMU::translateComplete(const RequestPtr &req, ThreadContext *tc,
else
fault = translateSe(req, tc, mode, translation, delay, true, state);
DPRINTF(TLBVerbose, "Translation returning delay=%d fault=%d\n", delay,
DPRINTF(MMU, "Translation returning delay=%d fault=%d\n", delay,
fault != NoFault);
// If we have a translation, and we're not in the middle of doing a stage
// 2 translation tell the translation that we've either finished or its
@@ -1180,7 +1256,7 @@ MMU::updateMiscReg(ThreadContext *tc,
((tran_type == state.curTranType) || stage2)) {
} else {
DPRINTF(TLBVerbose, "TLB variables changed!\n");
DPRINTF(MMU, "MMU variables changed!\n");
state.updateMiscReg(tc, tran_type);
itbStage2->setVMID(state.vmid);
@@ -1396,7 +1472,7 @@ MMU::lookup(Addr va, uint16_t asid, vmid_t vmid, SecurityState ss,
{
TLB *tlb = getTlb(mode, stage2);
TlbEntry::Lookup lookup_data;
TlbEntry::KeyType lookup_data;
lookup_data.va = va;
lookup_data.asn = asid;
@@ -1448,7 +1524,7 @@ MMU::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode,
// start translation table walk, pass variables rather than
// re-retreaving in table walker for speed
DPRINTF(TLB,
DPRINTF(MMU,
"TLB Miss: Starting hardware table walker for %#x(%d:%d)\n",
vaddr_tainted, state.asid, state.vmid);
@@ -1534,7 +1610,7 @@ MMU::getResultTe(TlbEntry **te, const RequestPtr &req,
// This case deals with an S1 hit (or bypass), followed by
// an S2 hit-but-perms issue
if (state.isStage2) {
DPRINTF(TLBVerbose, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
DPRINTF(MMU, "s2TLB: reqVa %#x, reqPa %#x, fault %p\n",
vaddr_tainted, req->hasPaddr() ? req->getPaddr() : ~0,
fault);
if (fault != NoFault) {
@@ -1587,7 +1663,7 @@ MMU::setTestInterface(SimObject *_ti)
Fault
MMU::testTranslation(const RequestPtr &req, Mode mode,
TlbEntry::DomainType domain, CachedState &state) const
DomainType domain, CachedState &state) const
{
if (!test || !req->hasSize() || req->getSize() == 0 ||
req->isCacheMaintenance()) {

View File

@@ -42,10 +42,10 @@
#define __ARCH_ARM_MMU_HH__
#include "arch/arm/page_size.hh"
#include "arch/arm/tlb.hh"
#include "arch/arm/utility.hh"
#include "arch/generic/mmu.hh"
#include "base/memoizer.hh"
#include "base/statistics.hh"
#include "enums/ArmLookupLevel.hh"
#include "params/ArmMMU.hh"
@@ -56,23 +56,18 @@ namespace gem5
namespace ArmISA {
class TableWalker;
class TLB;
class TlbEntry;
class TLBIOp;
class TlbTestInterface;
class MMU : public BaseMMU
{
protected:
using LookupLevel = enums::ArmLookupLevel;
ArmISA::TLB *
getDTBPtr() const
{
return static_cast<ArmISA::TLB *>(dtb);
}
ArmISA::TLB *
getITBPtr() const
{
return static_cast<ArmISA::TLB *>(itb);
}
ArmISA::TLB * getDTBPtr() const;
ArmISA::TLB * getITBPtr() const;
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const;
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const;
@@ -270,7 +265,7 @@ class MMU : public BaseMMU
CachedState &state);
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode,
Translation *translation, bool &delay, bool timing, bool functional,
Addr vaddr, ArmFault::TranMethod tranMethod,
Addr vaddr, TranMethod tran_method,
CachedState &state);
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
@@ -297,73 +292,13 @@ class MMU : public BaseMMU
void invalidateMiscReg();
template <typename OP>
void
flush(const OP &tlbi_op)
{
if (tlbi_op.stage1Flush()) {
flushStage1(tlbi_op);
}
void flush(const TLBIOp &tlbi_op);
void flushStage1(const TLBIOp &tlbi_op);
void flushStage2(const TLBIOp &tlbi_op);
void iflush(const TLBIOp &tlbi_op);
void dflush(const TLBIOp &tlbi_op);
if (tlbi_op.stage2Flush()) {
flushStage2(tlbi_op);
}
}
template <typename OP>
void
flushStage1(const OP &tlbi_op)
{
for (auto tlb : instruction) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
for (auto tlb : data) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
for (auto tlb : unified) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
}
template <typename OP>
void
flushStage2(const OP &tlbi_op)
{
itbStage2->flush(tlbi_op);
dtbStage2->flush(tlbi_op);
}
template <typename OP>
void
iflush(const OP &tlbi_op)
{
for (auto tlb : instruction) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
for (auto tlb : unified) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
}
template <typename OP>
void
dflush(const OP &tlbi_op)
{
for (auto tlb : data) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
for (auto tlb : unified) {
static_cast<TLB*>(tlb)->flush(tlbi_op);
}
}
void
flushAll() override
{
BaseMMU::flushAll();
itbStage2->flushAll();
dtbStage2->flushAll();
}
void flushAll() override;
uint64_t
getAttr() const
@@ -462,7 +397,7 @@ class MMU : public BaseMMU
void setTestInterface(SimObject *ti);
Fault testTranslation(const RequestPtr &req, Mode mode,
TlbEntry::DomainType domain, CachedState &state) const;
DomainType domain, CachedState &state) const;
protected:
bool checkWalkCache() const;

View File

@@ -489,5 +489,14 @@ getPageTableOps(GrainSize trans_granule)
}
}
TLBTypes::KeyType::KeyType(const TlbEntry &entry)
: va(entry.vpn << entry.N), pageSize(entry.N), size(0),
asn(entry.asid), ignoreAsn(false),
vmid(entry.vmid), ss(entry.ss),
functional(false),
targetRegime(entry.regime),
mode(BaseMMU::Read)
{}
} // namespace ArmISA
} // namespace gem5

View File

@@ -49,6 +49,10 @@
#include "arch/generic/mmu.hh"
#include "enums/TypeTLB.hh"
#include "enums/ArmLookupLevel.hh"
#include "mem/cache/replacement_policies/replaceable_entry.hh"
#include "mem/cache/tags/indexing_policies/base.hh"
#include "params/TLBIndexingPolicy.hh"
#include "params/TLBSetAssociative.hh"
#include "sim/serialize.hh"
namespace gem5
@@ -161,31 +165,20 @@ struct V8PageTableOps64k : public PageTableOps
LookupLevel lastLevel() const override;
};
// ITB/DTB table entry
struct TlbEntry : public Serializable
struct TlbEntry;
class TLBTypes
{
public:
typedef enums::ArmLookupLevel LookupLevel;
enum class MemoryType : std::uint8_t
struct KeyType
{
StronglyOrdered,
Device,
Normal
};
KeyType() = default;
explicit KeyType(const TlbEntry &entry);
enum class DomainType : std::uint8_t
{
NoAccess = 0,
Client,
Reserved,
Manager
};
struct Lookup
{
// virtual address
Addr va = 0;
// page size
Addr pageSize = Grain4KB;
// lookup size:
// * != 0 -> this is a range based lookup.
// end_address = va + size
@@ -208,6 +201,48 @@ struct TlbEntry : public Serializable
BaseMMU::Mode mode = BaseMMU::Read;
};
using Params = TLBIndexingPolicyParams;
};
using TLBIndexingPolicy = IndexingPolicyTemplate<TLBTypes>;
class TLBSetAssociative : public TLBIndexingPolicy
{
public:
PARAMS(TLBSetAssociative);
TLBSetAssociative(const Params &p)
: TLBIndexingPolicy(p, p.num_entries, 0)
{}
std::vector<ReplaceableEntry*>
getPossibleEntries(const KeyType &key) const override
{
Addr set_number = (key.va >> key.pageSize) & setMask;
return sets[set_number];
}
Addr
regenerateAddr(const KeyType &key,
const ReplaceableEntry *entry) const override
{
panic("Unimplemented\n");
}
};
// ITB/DTB table entry
struct TlbEntry : public ReplaceableEntry, Serializable
{
public:
using LookupLevel = enums::ArmLookupLevel;
using KeyType = TLBTypes::KeyType;
using IndexingPolicy = TLBIndexingPolicy;
enum class MemoryType : std::uint8_t
{
StronglyOrdered,
Device,
Normal
};
// Matching variables
Addr pfn;
Addr size; // Size of this entry, == Type of TLB Rec
@@ -305,6 +340,59 @@ struct TlbEntry : public Serializable
// @todo Check the memory type
}
TlbEntry(const TlbEntry &rhs) = default;
TlbEntry& operator=(TlbEntry rhs)
{
swap(rhs);
return *this;
}
void
swap(TlbEntry &rhs)
{
std::swap(pfn, rhs.pfn);
std::swap(size, rhs.size);
std::swap(vpn, rhs.vpn);
std::swap(attributes, rhs.attributes);
std::swap(lookupLevel, rhs.lookupLevel);
std::swap(asid, rhs.asid);
std::swap(vmid, rhs.vmid);
std::swap(tg, rhs.tg);
std::swap(N, rhs.N);
std::swap(innerAttrs, rhs.innerAttrs);
std::swap(outerAttrs, rhs.outerAttrs);
std::swap(ap, rhs.ap);
std::swap(hap, rhs.hap);
std::swap(domain, rhs.domain);
std::swap(mtype, rhs.mtype);
std::swap(longDescFormat, rhs.longDescFormat);
std::swap(global, rhs.global);
std::swap(valid, rhs.valid);
std::swap(ns, rhs.ns);
std::swap(ss, rhs.ss);
std::swap(regime, rhs.regime);
std::swap(type, rhs.type);
std::swap(partial, rhs.partial);
std::swap(nonCacheable, rhs.nonCacheable);
std::swap(shareable, rhs.shareable);
std::swap(outerShareable, rhs.outerShareable);
std::swap(xn, rhs.xn);
std::swap(pxn, rhs.pxn);
std::swap(xs, rhs.xs);
}
/** Need for compliance with the AssociativeCache interface */
void
invalidate()
{
valid = false;
}
/** Need for compliance with the AssociativeCache interface */
void insert(const KeyType &key) {}
/** Need for compliance with the AssociativeCache interface */
bool isValid() const { return valid; }
void
updateVaddr(Addr new_vaddr)
@@ -319,32 +407,32 @@ struct TlbEntry : public Serializable
}
bool
matchAddress(const Lookup &lookup) const
matchAddress(const KeyType &key) const
{
Addr page_addr = vpn << N;
if (lookup.size) {
if (key.size) {
// This is a range based loookup
return lookup.va <= page_addr + size &&
lookup.va + lookup.size > page_addr;
return key.va <= page_addr + size &&
key.va + key.size > page_addr;
} else {
// This is a normal lookup
return lookup.va >= page_addr && lookup.va <= page_addr + size;
return key.va >= page_addr && key.va <= page_addr + size;
}
}
bool
match(const Lookup &lookup) const
match(const KeyType &key) const
{
bool match = false;
if (valid && matchAddress(lookup) && lookup.ss == ss)
if (valid && matchAddress(key) && key.ss == ss)
{
match = checkRegime(lookup.targetRegime);
match = checkRegime(key.targetRegime);
if (match && !lookup.ignoreAsn) {
match = global || (lookup.asn == asid);
if (match && !key.ignoreAsn) {
match = global || (key.asn == asid);
}
if (match && useVMID(lookup.targetRegime)) {
match = lookup.vmid == vmid;
if (match && useVMID(key.targetRegime)) {
match = key.vmid == vmid;
}
}
return match;
@@ -487,6 +575,9 @@ struct TlbEntry : public Serializable
const PageTableOps *getPageTableOps(GrainSize trans_granule);
} // namespace ArmISA
template class IndexingPolicyTemplate<ArmISA::TLBTypes>;
} // namespace gem5
#endif // __ARCH_ARM_PAGETABLE_H__

View File

@@ -110,7 +110,7 @@ SelfDebug::triggerException(ThreadContext *tc, Addr vaddr)
if (to32) {
return std::make_shared<PrefetchAbort>(vaddr,
ArmFault::DebugEvent, false,
ArmFault::UnknownTran,
TranMethod::UnknownTran,
ArmFault::BRKPOINT);
} else {
return std::make_shared<HardwareBreakpoint>(vaddr, 0x22);
@@ -145,9 +145,9 @@ SelfDebug::triggerWatchpointException(ThreadContext *tc, Addr vaddr,
ArmFault::DebugType d = cm? ArmFault::WPOINT_CM:
ArmFault::WPOINT_NOCM;
return std::make_shared<DataAbort>(vaddr,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
write, ArmFault::DebugEvent, cm,
ArmFault::UnknownTran, d);
TranMethod::UnknownTran, d);
} else {
return std::make_shared<Watchpoint>(0, vaddr, write, cm);
}

View File

@@ -40,7 +40,6 @@
#define __ARCH_ARM_SELF_DEBUG_HH__
#include "arch/arm/faults.hh"
#include "arch/arm/regs/misc.hh"
#include "arch/arm/system.hh"
#include "arch/arm/types.hh"

View File

@@ -640,14 +640,14 @@ TableWalker::processWalk()
currState->vaddr_tainted,
ArmFault::TranslationLL + LookupLevel::L1,
isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
else
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + LookupLevel::L1, isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
}
ttbr = currState->tc->readMiscReg(snsBankedIndex(
MISCREG_TTBR0, currState->tc,
@@ -661,14 +661,14 @@ TableWalker::processWalk()
currState->vaddr_tainted,
ArmFault::TranslationLL + LookupLevel::L1,
isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
else
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + LookupLevel::L1, isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
}
ttbr = ttbr1;
currState->ttbcr.n = 0;
@@ -758,15 +758,15 @@ TableWalker::processWalkLPAE()
currState->vaddr_tainted,
ArmFault::TranslationLL + LookupLevel::L1,
isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
else
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + LookupLevel::L1,
isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
}
ttbr = currState->tc->readMiscReg(snsBankedIndex(
MISCREG_TTBR0, currState->tc,
@@ -784,15 +784,15 @@ TableWalker::processWalkLPAE()
currState->vaddr_tainted,
ArmFault::TranslationLL + LookupLevel::L1,
isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
else
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + LookupLevel::L1,
isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
}
ttbr = currState->tc->readMiscReg(snsBankedIndex(
MISCREG_TTBR1, currState->tc,
@@ -809,14 +809,14 @@ TableWalker::processWalkLPAE()
currState->vaddr_tainted,
ArmFault::TranslationLL + LookupLevel::L1,
isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
else
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + LookupLevel::L1,
isStage2, ArmFault::LpaeTran);
isStage2, TranMethod::LpaeTran);
}
}
@@ -1074,14 +1074,14 @@ TableWalker::processWalkAArch64()
return std::make_shared<PrefetchAbort>(
currState->vaddr_tainted,
ArmFault::TranslationLL + LookupLevel::L0, isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
} else {
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + LookupLevel::L0,
isStage2, ArmFault::LpaeTran);
isStage2, TranMethod::LpaeTran);
}
}
@@ -1111,15 +1111,15 @@ TableWalker::processWalkAArch64()
currState->vaddr_tainted,
ArmFault::AddressSizeLL + start_lookup_level,
isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
else
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : currState->isWrite,
ArmFault::AddressSizeLL + start_lookup_level,
isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
}
Request::Flags flag = Request::PT_WALK;
@@ -1687,15 +1687,15 @@ TableWalker::doL1Descriptor()
currState->vaddr_tainted,
ArmFault::TranslationLL + LookupLevel::L1,
isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
else
currState->fault =
std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + LookupLevel::L1, isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
return;
case L1Descriptor::Section:
if (currState->sctlr.afe && bits(currState->l1Desc.ap(), 0) == 0) {
@@ -1710,7 +1710,7 @@ TableWalker::doL1Descriptor()
is_atomic ? false : currState->isWrite,
ArmFault::AccessFlagLL + LookupLevel::L1,
isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
}
if (currState->l1Desc.supersection()) {
panic("Haven't implemented supersections\n");
@@ -1758,15 +1758,15 @@ TableWalker::generateLongDescFault(ArmFault::FaultSource src)
currState->vaddr_tainted,
src + currState->longDesc.lookupLevel,
isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
} else {
return std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
currState->req->isAtomic() ? false : currState->isWrite,
src + currState->longDesc.lookupLevel,
isStage2,
ArmFault::LpaeTran);
TranMethod::LpaeTran);
}
}
@@ -1953,14 +1953,14 @@ TableWalker::doL2Descriptor()
currState->vaddr_tainted,
ArmFault::TranslationLL + LookupLevel::L2,
isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
else
currState->fault = std::make_shared<DataAbort>(
currState->vaddr_tainted, currState->l1Desc.domain(),
is_atomic ? false : currState->isWrite,
ArmFault::TranslationLL + LookupLevel::L2,
isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
return;
}
@@ -1973,10 +1973,10 @@ TableWalker::doL2Descriptor()
currState->fault = std::make_shared<DataAbort>(
currState->vaddr_tainted,
TlbEntry::DomainType::NoAccess,
DomainType::NoAccess,
is_atomic ? false : currState->isWrite,
ArmFault::AccessFlagLL + LookupLevel::L2, isStage2,
ArmFault::VmsaTran);
TranMethod::VmsaTran);
}
insertTableEntry(currState->l2Desc, false);
@@ -2312,7 +2312,7 @@ TableWalker::insertPartialTableEntry(LongDescriptor &descriptor)
descriptor.getRawData());
// Insert the entry into the TLBs
tlb->multiInsert(te);
tlb->multiInsert(TlbEntry::KeyType(te), te);
}
void
@@ -2386,7 +2386,7 @@ TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor)
descriptor.getRawData());
// Insert the entry into the TLBs
tlb->multiInsert(te);
tlb->multiInsert(TlbEntry::KeyType(te), te);
if (!currState->timing) {
currState->tc = NULL;
currState->req = NULL;
@@ -2427,7 +2427,7 @@ TableWalker::pendingChange()
}
Fault
TableWalker::testWalk(const RequestPtr &walk_req, TlbEntry::DomainType domain,
TableWalker::testWalk(const RequestPtr &walk_req, DomainType domain,
LookupLevel lookup_level)
{
if (!test) {

View File

@@ -79,7 +79,7 @@ class TableWalker : public ClockedObject
LookupLevel lookupLevel;
virtual Addr pfn() const = 0;
virtual TlbEntry::DomainType domain() const = 0;
virtual DomainType domain() const = 0;
virtual bool xn() const = 0;
virtual uint8_t ap() const = 0;
virtual bool global(WalkerState *currState) const = 0;
@@ -209,10 +209,10 @@ class TableWalker : public ClockedObject
}
/** Domain Client/Manager: ARM DDI 0406B: B3-31 */
TlbEntry::DomainType
DomainType
domain() const override
{
return static_cast<TlbEntry::DomainType>(bits(data, 8, 5));
return static_cast<DomainType>(bits(data, 8, 5));
}
/** Address of L2 descriptor if it exists */
@@ -316,7 +316,7 @@ class TableWalker : public ClockedObject
return "Inserting L2 Descriptor into TLB\n";
}
TlbEntry::DomainType
DomainType
domain() const override
{
return l1Parent->domain();
@@ -744,11 +744,11 @@ class TableWalker : public ClockedObject
return ((!rw) << 2) | (user << 1);
}
TlbEntry::DomainType
DomainType
domain() const override
{
// Long-desc. format only supports Client domain
return TlbEntry::DomainType::Client;
return DomainType::Client;
}
/** Attribute index */
@@ -1249,7 +1249,7 @@ class TableWalker : public ClockedObject
void setTestInterface(TlbTestInterface *ti);
Fault testWalk(const RequestPtr &walk_req, TlbEntry::DomainType domain,
Fault testWalk(const RequestPtr &walk_req, DomainType domain,
LookupLevel lookup_level);
};

View File

@@ -58,8 +58,41 @@ namespace gem5
using namespace ArmISA;
TlbEntry*
TLB::Table::accessEntry(const KeyType &key)
{
auto entry = findEntry(key);
if (entry && !key.functional) {
accessEntry(entry);
}
return entry;
}
TlbEntry*
TLB::Table::findEntry(const KeyType &key) const
{
auto candidates = indexingPolicy->getPossibleEntries(key);
for (auto candidate : candidates) {
auto entry = static_cast<TlbEntry*>(candidate);
// We check for pageSize match outside of the Entry::match
// as the latter is also used to match entries in TLBI invalidation
// where we don't care about the pageSize
if (entry->N == key.pageSize && entry->match(key)) {
return entry;
}
}
return nullptr;
}
TLB::TLB(const ArmTLBParams &p)
: BaseTLB(p), table(new TlbEntry[p.size]), size(p.size),
: BaseTLB(p),
table(name().c_str(), p.size, p.assoc,
p.replacement_policy, p.indexing_policy),
size(p.size),
isStage2(p.is_stage2),
_walkCache(false),
tableWalker(nullptr),
@@ -88,11 +121,12 @@ TLB::TLB(const ArmTLBParams &p)
partialLevels[lookup_lvl] = false;
}
}
table.setDebugFlag(::gem5::debug::TLB);
}
TLB::~TLB()
{
delete[] table;
}
void
@@ -103,62 +137,19 @@ TLB::setTableWalker(TableWalker *table_walker)
}
TlbEntry*
TLB::match(const Lookup &lookup_data)
{
// Vector of TLB entry candidates.
// Only one of them will be assigned to retval and will
// be returned to the MMU (in case of a hit)
// The vector has one entry per lookup level as it stores
// both complete and partial matches
std::vector<std::pair<int, const TlbEntry*>> hits{
LookupLevel::Num_ArmLookupLevel, {0, nullptr}};
int x = 0;
while (x < size) {
if (table[x].match(lookup_data)) {
const TlbEntry &entry = table[x];
hits[entry.lookupLevel] = std::make_pair(x, &entry);
// This is a complete translation, no need to loop further
if (!entry.partial)
break;
}
++x;
}
// Loop over the list of TLB entries matching our translation
// request, starting from the highest lookup level (complete
// translation) and iterating backwards (using reverse iterators)
for (auto it = hits.rbegin(); it != hits.rend(); it++) {
const auto& [idx, entry] = *it;
if (!entry) {
// No match for the current LookupLevel
continue;
}
// Maintaining LRU array
// We only move the hit entry ahead when the position is higher
// than rangeMRU
if (idx > rangeMRU && !lookup_data.functional) {
TlbEntry tmp_entry = *entry;
for (int i = idx; i > 0; i--)
table[i] = table[i - 1];
table[0] = tmp_entry;
return &table[0];
} else {
return &table[idx];
}
}
return nullptr;
}
TlbEntry*
TLB::lookup(const Lookup &lookup_data)
TLB::lookup(Lookup lookup_data)
{
const auto mode = lookup_data.mode;
TlbEntry *retval = match(lookup_data);
TlbEntry *retval = nullptr;
// We iterate over all stored sizes, starting from the
// smallest until the biggest. In this way we prioritize
// complete translations over partial translations
for (const auto &page_size : observedPageSizes) {
lookup_data.pageSize = page_size;
if (retval = table.accessEntry(lookup_data); retval)
break;
}
DPRINTF(TLBVerbose, "Lookup %#x, asn %#x -> %s vmn 0x%x ss %s "
"ppn %#x size: %#x pa: %#x ap:%d ns:%d ss:%s g:%d asid: %d "
@@ -217,7 +208,7 @@ TLB::multiLookup(const Lookup &lookup_data)
// Insert entry only if this is not a functional
// lookup and if the translation is complete (unless this
// TLB caches partial translations)
insert(*te);
insert(lookup_data, *te);
}
}
}
@@ -240,59 +231,40 @@ TLB::checkPromotion(TlbEntry *entry, BaseMMU::Mode mode)
// insert a new TLB entry
void
TLB::insert(TlbEntry &entry)
TLB::insert(const Lookup &lookup_data, TlbEntry &entry)
{
DPRINTF(TLB, "Inserting entry into TLB with pfn:%#x size:%#x vpn: %#x"
" asid:%d vmid:%d N:%d global:%d valid:%d nc:%d xn:%d"
" ap:%#x domain:%#x ns:%d ss:%s xs:%d regime: %s\n", entry.pfn,
entry.size, entry.vpn, entry.asid, entry.vmid, entry.N,
entry.global, entry.valid, entry.nonCacheable, entry.xn,
entry.ap, static_cast<uint8_t>(entry.domain), entry.ns,
entry.ss, entry.xs, regimeToStr(entry.regime));
TlbEntry *victim = table.findVictim(lookup_data);
if (table[size - 1].valid)
DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x "
"size: %#x ap:%d ns:%d ss:%s g:%d xs:%d regime: %s\n",
table[size-1].vpn << table[size-1].N, table[size-1].asid,
table[size-1].vmid, table[size-1].pfn << table[size-1].N,
table[size-1].size, table[size-1].ap, table[size-1].ns,
table[size-1].ss, table[size-1].global,
table[size-1].xs, regimeToStr(table[size-1].regime));
*victim = entry;
// inserting to MRU position and evicting the LRU one
for (int i = size - 1; i > 0; --i)
table[i] = table[i-1];
table[0] = entry;
table.insertEntry(lookup_data, victim);
observedPageSizes.insert(entry.N);
stats.inserts++;
ppRefills->notify(1);
}
void
TLB::multiInsert(TlbEntry &entry)
TLB::multiInsert(const Lookup &lookup, TlbEntry &entry)
{
// Insert a partial translation only if the TLB is configured
// as a walk cache
if (!entry.partial || partialLevels[entry.lookupLevel]) {
insert(entry);
insert(lookup, entry);
}
if (auto next_level = static_cast<TLB*>(nextLevel())) {
next_level->multiInsert(entry);
next_level->multiInsert(lookup, entry);
}
}
void
TLB::printTlb() const
{
int x = 0;
TlbEntry *te;
DPRINTF(TLB, "Current TLB contents:\n");
while (x < size) {
te = &table[x];
if (te->valid)
DPRINTF(TLB, " * %s\n", te->print());
++x;
for (auto& te : table) {
if (te.valid)
DPRINTF(TLB, " * %s\n", te.print());
}
}
@@ -300,38 +272,34 @@ void
TLB::flushAll()
{
DPRINTF(TLB, "Flushing all TLB entries\n");
int x = 0;
TlbEntry *te;
while (x < size) {
te = &table[x];
if (te->valid) {
DPRINTF(TLB, " - %s\n", te->print());
te->valid = false;
for (auto& te : table) {
if (te.valid) {
DPRINTF(TLB, " - %s\n", te.print());
table.invalidate(&te);
stats.flushedEntries++;
}
++x;
}
stats.flushTlb++;
observedPageSizes.clear();
}
void
TLB::flush(const TLBIOp& tlbi_op)
{
int x = 0;
TlbEntry *te;
while (x < size) {
te = &table[x];
if (tlbi_op.match(te, vmid)) {
DPRINTF(TLB, " - %s\n", te->print());
te->valid = false;
bool valid_entry = false;
for (auto& te : table) {
if (tlbi_op.match(&te, vmid)) {
DPRINTF(TLB, " - %s\n", te.print());
table.invalidate(&te);
stats.flushedEntries++;
}
++x;
valid_entry = valid_entry || te.valid;
}
stats.flushTlb++;
if (!valid_entry)
observedPageSizes.clear();
}
void

View File

@@ -46,6 +46,7 @@
#include "arch/arm/pagetable.hh"
#include "arch/arm/utility.hh"
#include "arch/generic/tlb.hh"
#include "base/cache/associative_cache.hh"
#include "base/statistics.hh"
#include "enums/TypeTLB.hh"
#include "mem/request.hh"
@@ -79,7 +80,7 @@ class TlbTestInterface
*/
virtual Fault translationCheck(const RequestPtr &req, bool is_priv,
BaseMMU::Mode mode,
TlbEntry::DomainType domain) = 0;
DomainType domain) = 0;
/**
* Check if a page table walker access should be forced to fail.
@@ -95,14 +96,21 @@ class TlbTestInterface
virtual Fault walkCheck(const RequestPtr &walk_req,
Addr va, bool is_secure,
Addr is_priv, BaseMMU::Mode mode,
TlbEntry::DomainType domain,
DomainType domain,
enums::ArmLookupLevel lookup_level) = 0;
};
class TLB : public BaseTLB
{
protected:
TlbEntry* table;
class Table : public AssociativeCache<TlbEntry>
{
public:
using AssociativeCache<TlbEntry>::AssociativeCache;
using AssociativeCache<TlbEntry>::accessEntry;
TlbEntry* accessEntry(const KeyType &key) override;
TlbEntry* findEntry(const KeyType &key) const override;
} table;
/** TLB Size */
int size;
@@ -156,9 +164,19 @@ class TLB : public BaseTLB
int rangeMRU; //On lookup, only move entries ahead when outside rangeMRU
vmid_t vmid;
/** Set of observed page sizes in the TLB
* We update the set conservatively, therefore allowing
* false positives but not false negatives.
* This means there could be a stored page size with
* no matching TLB entry (e.g. it has been invalidated),
* but if the page size is not in the set, we are certain
* there is no associated TLB with that size
*/
std::set<Addr> observedPageSizes;
public:
using Params = ArmTLBParams;
using Lookup = TlbEntry::Lookup;
using Lookup = TlbEntry::KeyType;
using LookupLevel = enums::ArmLookupLevel;
TLB(const Params &p);
@@ -167,7 +185,7 @@ class TLB : public BaseTLB
/** Lookup an entry in the TLB
* @return pointer to TLB entry if it exists
*/
TlbEntry *lookup(const Lookup &lookup_data);
TlbEntry *lookup(Lookup lookup_data);
/** Lookup an entry in the TLB and in the next levels by
* following the nextLevel pointer
@@ -192,10 +210,10 @@ class TLB : public BaseTLB
void setVMID(vmid_t _vmid) { vmid = _vmid; }
/** Insert a PTE in the current TLB */
void insert(TlbEntry &pte);
void insert(const Lookup &lookup_data, TlbEntry &pte);
/** Insert a PTE in the current TLB and in the higher levels */
void multiInsert(TlbEntry &pte);
void multiInsert(const Lookup &lookup_data, TlbEntry &pte);
/** Reset the entire TLB. Used for CPU switching to prevent stale
* translations after multiple switches
@@ -275,10 +293,6 @@ class TLB : public BaseTLB
* data access or a data TLB entry on an instruction access:
*/
void checkPromotion(TlbEntry *entry, BaseMMU::Mode mode);
/** Helper function looking up for a matching TLB entry
* Does not update stats; see lookup method instead */
TlbEntry *match(const Lookup &lookup_data);
};
} // namespace ArmISA

View File

@@ -206,10 +206,10 @@ TLBIALLN::matchEntry(TlbEntry* te, vmid_t vmid) const
te->checkRegime(targetRegime);
}
TlbEntry::Lookup
TlbEntry::KeyType
TLBIMVAA::lookupGen(vmid_t vmid) const
{
TlbEntry::Lookup lookup_data;
TlbEntry::KeyType lookup_data;
lookup_data.va = sext<56>(addr);
lookup_data.ignoreAsn = true;
lookup_data.vmid = vmid;
@@ -234,15 +234,15 @@ TLBIMVAA::operator()(ThreadContext* tc)
bool
TLBIMVAA::matchEntry(TlbEntry* te, vmid_t vmid) const
{
TlbEntry::Lookup lookup_data = lookupGen(vmid);
TlbEntry::KeyType lookup_data = lookupGen(vmid);
return te->match(lookup_data) && (!lastLevel || !te->partial);
}
TlbEntry::Lookup
TlbEntry::KeyType
TLBIMVA::lookupGen(vmid_t vmid) const
{
TlbEntry::Lookup lookup_data;
TlbEntry::KeyType lookup_data;
lookup_data.va = sext<56>(addr);
lookup_data.asn = asid;
lookup_data.ignoreAsn = false;
@@ -269,7 +269,7 @@ TLBIMVA::operator()(ThreadContext* tc)
bool
TLBIMVA::matchEntry(TlbEntry* te, vmid_t vmid) const
{
TlbEntry::Lookup lookup_data = lookupGen(vmid);
TlbEntry::KeyType lookup_data = lookupGen(vmid);
return te->match(lookup_data) && (!lastLevel || !te->partial);
}
@@ -309,10 +309,10 @@ TLBIIPA::operator()(ThreadContext* tc)
}
}
TlbEntry::Lookup
TlbEntry::KeyType
TLBIIPA::lookupGen(vmid_t vmid) const
{
TlbEntry::Lookup lookup_data;
TlbEntry::KeyType lookup_data;
lookup_data.va = szext<56>(addr);
lookup_data.ignoreAsn = true;
lookup_data.vmid = vmid;
@@ -326,7 +326,7 @@ TLBIIPA::lookupGen(vmid_t vmid) const
bool
TLBIIPA::matchEntry(TlbEntry* te, vmid_t vmid) const
{
TlbEntry::Lookup lookup_data = lookupGen(vmid);
TlbEntry::KeyType lookup_data = lookupGen(vmid);
return te->match(lookup_data) && (!lastLevel || !te->partial) &&
ipaSpace == te->ipaSpace;
@@ -335,7 +335,7 @@ TLBIIPA::matchEntry(TlbEntry* te, vmid_t vmid) const
bool
TLBIRMVA::matchEntry(TlbEntry* te, vmid_t vmid) const
{
TlbEntry::Lookup lookup_data = lookupGen(vmid);
TlbEntry::KeyType lookup_data = lookupGen(vmid);
lookup_data.size = rangeSize();
auto addr_match = te->match(lookup_data) && (!lastLevel || !te->partial);
@@ -351,7 +351,7 @@ TLBIRMVA::matchEntry(TlbEntry* te, vmid_t vmid) const
bool
TLBIRMVAA::matchEntry(TlbEntry* te, vmid_t vmid) const
{
TlbEntry::Lookup lookup_data = lookupGen(vmid);
TlbEntry::KeyType lookup_data = lookupGen(vmid);
lookup_data.size = rangeSize();
auto addr_match = te->match(lookup_data) && (!lastLevel || !te->partial);
@@ -367,7 +367,7 @@ TLBIRMVAA::matchEntry(TlbEntry* te, vmid_t vmid) const
bool
TLBIRIPA::matchEntry(TlbEntry* te, vmid_t vmid) const
{
TlbEntry::Lookup lookup_data = lookupGen(vmid);
TlbEntry::KeyType lookup_data = lookupGen(vmid);
lookup_data.size = rangeSize();
auto addr_match = te->match(lookup_data) && (!lastLevel || !te->partial);

View File

@@ -279,7 +279,7 @@ class TLBIALLN : public TLBIOp
class TLBIMVAA : public TLBIOp
{
protected:
TlbEntry::Lookup lookupGen(vmid_t vmid) const;
TlbEntry::KeyType lookupGen(vmid_t vmid) const;
public:
TLBIMVAA(TranslationRegime _target_regime, SecurityState _ss,
Addr _addr, bool last_level, Attr _attr=Attr::None)
@@ -299,7 +299,7 @@ class TLBIMVAA : public TLBIOp
class TLBIMVA : public TLBIOp
{
protected:
TlbEntry::Lookup lookupGen(vmid_t vmid) const;
TlbEntry::KeyType lookupGen(vmid_t vmid) const;
public:
TLBIMVA(TranslationRegime _target_regime, SecurityState _ss,
@@ -405,7 +405,7 @@ class TLBIRange
class TLBIIPA : public TLBIOp
{
protected:
TlbEntry::Lookup lookupGen(vmid_t vmid) const;
TlbEntry::KeyType lookupGen(vmid_t vmid) const;
public:
TLBIIPA(TranslationRegime _target_regime, SecurityState _ss, Addr _addr,
bool last_level, Attr _attr=Attr::None)

View File

@@ -282,6 +282,21 @@ namespace ArmISA
Secure
};
enum class TranMethod
{
LpaeTran,
VmsaTran,
UnknownTran
};
enum class DomainType : std::uint8_t
{
NoAccess = 0,
Client,
Reserved,
Manager
};
enum ExceptionLevel
{
EL0 = 0,