arch-arm: Move translation logic from the ArmTLB to the ArmMMU

This patch is moving most of the TLB code to the MMU.
In this way the TLB stops being the main translating agent and becomes a
simple "passive" translation cache.

All the logic behind virtual memory translation, like

* Checking permission/alignment
* Issuing page table walks
* etc

Is now embedded in the MMU model. This will allow us to stack multiple
TLBs and to compose arbitrary hierarchies as their sole purpose now is
to cache translations

JIRA: https://gem5.atlassian.net/browse/GEM5-790

Change-Id: I687c639a56263d5e3bb6633dd8c9666c85edba3a
Signed-off-by: Giacomo Travaglini <giacomo.travaglini@arm.com>
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/48141
Tested-by: kokoro <noreply+kokoro@google.com>
This commit is contained in:
Giacomo Travaglini
2021-06-09 15:04:56 +01:00
parent 0d998a3c53
commit 870f93301f
24 changed files with 1846 additions and 1712 deletions

View File

@@ -1,6 +1,6 @@
# -*- mode:python -*-
# Copyright (c) 2020 ARM Limited
# Copyright (c) 2020-2021 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
@@ -80,10 +80,15 @@ class ArmMMU(BaseMMU):
stage2_dtb_walker = Param.ArmTableWalker(
ArmStage2TableWalker(), "HW Table walker")
sys = Param.System(Parent.any, "system object parameter")
@classmethod
def walkerPorts(cls):
return ["mmu.itb_walker.port", "mmu.dtb_walker.port"]
return ["mmu.itb_walker.port", "mmu.dtb_walker.port",
"mmu.stage2_itb_walker.port", "mmu.stage2_dtb_walker.port"]
def connectWalkerPorts(self, iport, dport):
self.itb_walker.port = iport
self.dtb_walker.port = dport
self.stage2_itb_walker.port = iport
self.stage2_dtb_walker.port = dport

View File

@@ -1146,7 +1146,7 @@ VldMultOp64::VldMultOp64(const char *mnem, ExtMachInst machInst,
microOps = new StaticInstPtr[numMicroops];
unsigned uopIdx = 0;
uint32_t memaccessFlags = (TLB::ArmFlags)eSize | TLB::AllowUnaligned;
uint32_t memaccessFlags = (MMU::ArmFlags)eSize | MMU::AllowUnaligned;
int i = 0;
for (; i < numMemMicroops - 1; ++i) {
@@ -1254,7 +1254,7 @@ VstMultOp64::VstMultOp64(const char *mnem, ExtMachInst machInst,
}
}
uint32_t memaccessFlags = (TLB::ArmFlags)eSize | TLB::AllowUnaligned;
uint32_t memaccessFlags = (MMU::ArmFlags)eSize | MMU::AllowUnaligned;
int i = 0;
for (; i < numMemMicroops - 1; ++i) {
@@ -1322,7 +1322,7 @@ VldSingleOp64::VldSingleOp64(const char *mnem, ExtMachInst machInst,
microOps = new StaticInstPtr[numMicroops];
unsigned uopIdx = 0;
uint32_t memaccessFlags = (TLB::ArmFlags)eSize | TLB::AllowUnaligned;
uint32_t memaccessFlags = (MMU::ArmFlags)eSize | MMU::AllowUnaligned;
int i = 0;
for (; i < numMemMicroops - 1; ++i) {
@@ -1401,7 +1401,7 @@ VstSingleOp64::VstSingleOp64(const char *mnem, ExtMachInst machInst,
numStructElems, index, i /* step */, replicate);
}
uint32_t memaccessFlags = (TLB::ArmFlags)eSize | TLB::AllowUnaligned;
uint32_t memaccessFlags = (MMU::ArmFlags)eSize | MMU::AllowUnaligned;
int i = 0;
for (; i < numMemMicroops - 1; ++i) {

View File

@@ -395,7 +395,7 @@ class MicroMemOp : public MicroIntImmOp
MicroMemOp(const char *mnem, ExtMachInst machInst, OpClass __opClass,
RegIndex _ura, RegIndex _urb, bool _up, uint8_t _imm)
: MicroIntImmOp(mnem, machInst, __opClass, _ura, _urb, _imm),
up(_up), memAccessFlags(TLB::AlignWord)
up(_up), memAccessFlags(MMU::AlignWord)
{
}
@@ -416,7 +416,7 @@ class MicroMemPairOp : public MicroOp
bool _up, uint8_t _imm)
: MicroOp(mnem, machInst, __opClass),
dest(_dreg1), dest2(_dreg2), urb(_base), up(_up), imm(_imm),
memAccessFlags(TLB::AlignWord)
memAccessFlags(MMU::AlignWord)
{
}

View File

@@ -78,7 +78,7 @@ Memory64::setExcAcRel(bool exclusive, bool acrel)
if (exclusive)
memAccessFlags |= Request::LLSC;
else
memAccessFlags |= ArmISA::TLB::AllowUnaligned;
memAccessFlags |= ArmISA::MMU::AllowUnaligned;
if (acrel) {
flags[IsWriteBarrier] = true;
flags[IsReadBarrier] = true;

View File

@@ -64,7 +64,7 @@ class SveMemVecFillSpill : public ArmStaticInst
IntRegIndex _base, uint64_t _imm)
: ArmStaticInst(mnem, _machInst, __opClass),
dest(_dest), base(_base), imm(_imm),
memAccessFlags(ArmISA::TLB::AllowUnaligned)
memAccessFlags(ArmISA::MMU::AllowUnaligned)
{
baseIsSP = isSP(_base);
}
@@ -90,7 +90,7 @@ class SveMemPredFillSpill : public ArmStaticInst
IntRegIndex _base, uint64_t _imm)
: ArmStaticInst(mnem, _machInst, __opClass),
dest(_dest), base(_base), imm(_imm),
memAccessFlags(ArmISA::TLB::AllowUnaligned)
memAccessFlags(ArmISA::MMU::AllowUnaligned)
{
baseIsSP = isSP(_base);
}
@@ -117,7 +117,7 @@ class SveContigMemSS : public ArmStaticInst
IntRegIndex _offset)
: ArmStaticInst(mnem, _machInst, __opClass),
dest(_dest), gp(_gp), base(_base), offset(_offset),
memAccessFlags(ArmISA::TLB::AllowUnaligned)
memAccessFlags(ArmISA::MMU::AllowUnaligned)
{
baseIsSP = isSP(_base);
}
@@ -144,7 +144,7 @@ class SveContigMemSI : public ArmStaticInst
uint64_t _imm)
: ArmStaticInst(mnem, _machInst, __opClass),
dest(_dest), gp(_gp), base(_base), imm(_imm),
memAccessFlags(ArmISA::TLB::AllowUnaligned)
memAccessFlags(ArmISA::MMU::AllowUnaligned)
{
baseIsSP = isSP(_base);
}

View File

@@ -2076,46 +2076,46 @@ ISA::setMiscReg(int misc_reg, RegVal val)
misc_reg = MISCREG_IFAR_S;
break;
case MISCREG_ATS1CPR:
addressTranslation(TLB::S1CTran, BaseMMU::Read, 0, val);
addressTranslation(MMU::S1CTran, BaseMMU::Read, 0, val);
return;
case MISCREG_ATS1CPW:
addressTranslation(TLB::S1CTran, BaseMMU::Write, 0, val);
addressTranslation(MMU::S1CTran, BaseMMU::Write, 0, val);
return;
case MISCREG_ATS1CUR:
addressTranslation(TLB::S1CTran, BaseMMU::Read,
TLB::UserMode, val);
addressTranslation(MMU::S1CTran, BaseMMU::Read,
MMU::UserMode, val);
return;
case MISCREG_ATS1CUW:
addressTranslation(TLB::S1CTran, BaseMMU::Write,
TLB::UserMode, val);
addressTranslation(MMU::S1CTran, BaseMMU::Write,
MMU::UserMode, val);
return;
case MISCREG_ATS12NSOPR:
if (!haveSecurity)
panic("Security Extensions required for ATS12NSOPR");
addressTranslation(TLB::S1S2NsTran, BaseMMU::Read, 0, val);
addressTranslation(MMU::S1S2NsTran, BaseMMU::Read, 0, val);
return;
case MISCREG_ATS12NSOPW:
if (!haveSecurity)
panic("Security Extensions required for ATS12NSOPW");
addressTranslation(TLB::S1S2NsTran, BaseMMU::Write, 0, val);
addressTranslation(MMU::S1S2NsTran, BaseMMU::Write, 0, val);
return;
case MISCREG_ATS12NSOUR:
if (!haveSecurity)
panic("Security Extensions required for ATS12NSOUR");
addressTranslation(TLB::S1S2NsTran, BaseMMU::Read,
TLB::UserMode, val);
addressTranslation(MMU::S1S2NsTran, BaseMMU::Read,
MMU::UserMode, val);
return;
case MISCREG_ATS12NSOUW:
if (!haveSecurity)
panic("Security Extensions required for ATS12NSOUW");
addressTranslation(TLB::S1S2NsTran, BaseMMU::Write,
TLB::UserMode, val);
addressTranslation(MMU::S1S2NsTran, BaseMMU::Write,
MMU::UserMode, val);
return;
case MISCREG_ATS1HR:
addressTranslation(TLB::HypMode, BaseMMU::Read, 0, val);
addressTranslation(MMU::HypMode, BaseMMU::Read, 0, val);
return;
case MISCREG_ATS1HW:
addressTranslation(TLB::HypMode, BaseMMU::Write, 0, val);
addressTranslation(MMU::HypMode, BaseMMU::Write, 0, val);
return;
case MISCREG_TTBCR:
{
@@ -2252,44 +2252,44 @@ ISA::setMiscReg(int misc_reg, RegVal val)
}
break;
case MISCREG_AT_S1E1R_Xt:
addressTranslation64(TLB::S1E1Tran, BaseMMU::Read, 0, val);
addressTranslation64(MMU::S1E1Tran, BaseMMU::Read, 0, val);
return;
case MISCREG_AT_S1E1W_Xt:
addressTranslation64(TLB::S1E1Tran, BaseMMU::Write, 0, val);
addressTranslation64(MMU::S1E1Tran, BaseMMU::Write, 0, val);
return;
case MISCREG_AT_S1E0R_Xt:
addressTranslation64(TLB::S1E0Tran, BaseMMU::Read,
TLB::UserMode, val);
addressTranslation64(MMU::S1E0Tran, BaseMMU::Read,
MMU::UserMode, val);
return;
case MISCREG_AT_S1E0W_Xt:
addressTranslation64(TLB::S1E0Tran, BaseMMU::Write,
TLB::UserMode, val);
addressTranslation64(MMU::S1E0Tran, BaseMMU::Write,
MMU::UserMode, val);
return;
case MISCREG_AT_S1E2R_Xt:
addressTranslation64(TLB::S1E2Tran, BaseMMU::Read, 0, val);
addressTranslation64(MMU::S1E2Tran, BaseMMU::Read, 0, val);
return;
case MISCREG_AT_S1E2W_Xt:
addressTranslation64(TLB::S1E2Tran, BaseMMU::Write, 0, val);
addressTranslation64(MMU::S1E2Tran, BaseMMU::Write, 0, val);
return;
case MISCREG_AT_S12E1R_Xt:
addressTranslation64(TLB::S12E1Tran, BaseMMU::Read, 0, val);
addressTranslation64(MMU::S12E1Tran, BaseMMU::Read, 0, val);
return;
case MISCREG_AT_S12E1W_Xt:
addressTranslation64(TLB::S12E1Tran, BaseMMU::Write, 0, val);
addressTranslation64(MMU::S12E1Tran, BaseMMU::Write, 0, val);
return;
case MISCREG_AT_S12E0R_Xt:
addressTranslation64(TLB::S12E0Tran, BaseMMU::Read,
TLB::UserMode, val);
addressTranslation64(MMU::S12E0Tran, BaseMMU::Read,
MMU::UserMode, val);
return;
case MISCREG_AT_S12E0W_Xt:
addressTranslation64(TLB::S12E0Tran, BaseMMU::Write,
TLB::UserMode, val);
addressTranslation64(MMU::S12E0Tran, BaseMMU::Write,
MMU::UserMode, val);
return;
case MISCREG_AT_S1E3R_Xt:
addressTranslation64(TLB::S1E3Tran, BaseMMU::Read, 0, val);
addressTranslation64(MMU::S1E3Tran, BaseMMU::Read, 0, val);
return;
case MISCREG_AT_S1E3W_Xt:
addressTranslation64(TLB::S1E3Tran, BaseMMU::Write, 0, val);
addressTranslation64(MMU::S1E3Tran, BaseMMU::Write, 0, val);
return;
case MISCREG_SPSR_EL3:
case MISCREG_SPSR_EL2:
@@ -2415,7 +2415,7 @@ ISA::unserialize(CheckpointIn &cp)
}
void
ISA::addressTranslation64(TLB::ArmTranslationType tran_type,
ISA::addressTranslation64(MMU::ArmTranslationType tran_type,
BaseMMU::Mode mode, Request::Flags flags, RegVal val)
{
// If we're in timing mode then doing the translation in
@@ -2466,7 +2466,7 @@ ISA::addressTranslation64(TLB::ArmTranslationType tran_type,
}
void
ISA::addressTranslation(TLB::ArmTranslationType tran_type,
ISA::addressTranslation(MMU::ArmTranslationType tran_type,
BaseMMU::Mode mode, Request::Flags flags, RegVal val)
{
// If we're in timing mode then doing the translation in
@@ -2491,8 +2491,8 @@ ISA::addressTranslation(TLB::ArmTranslationType tran_type,
HCR hcr = readMiscRegNoEffect(MISCREG_HCR);
uint8_t max_paddr_bit = 0;
if (haveLPAE && (ttbcr.eae || tran_type & TLB::HypMode ||
((tran_type & TLB::S1S2NsTran) && hcr.vm) )) {
if (haveLPAE && (ttbcr.eae || tran_type & MMU::HypMode ||
((tran_type & MMU::S1S2NsTran) && hcr.vm) )) {
max_paddr_bit = 39;
} else {

View File

@@ -42,11 +42,11 @@
#define __ARCH_ARM_ISA_HH__
#include "arch/arm/isa_device.hh"
#include "arch/arm/mmu.hh"
#include "arch/arm/regs/int.hh"
#include "arch/arm/regs/misc.hh"
#include "arch/arm/self_debug.hh"
#include "arch/arm/system.hh"
#include "arch/arm/tlb.hh"
#include "arch/arm/types.hh"
#include "arch/arm/utility.hh"
#include "arch/generic/isa.hh"
@@ -514,9 +514,9 @@ namespace ArmISA
void initID32(const ArmISAParams &p);
void initID64(const ArmISAParams &p);
void addressTranslation(TLB::ArmTranslationType tran_type,
void addressTranslation(MMU::ArmTranslationType tran_type,
BaseMMU::Mode mode, Request::Flags flags, RegVal val);
void addressTranslation64(TLB::ArmTranslationType tran_type,
void addressTranslation64(MMU::ArmTranslationType tran_type,
BaseMMU::Mode mode, Request::Flags flags, RegVal val);
public:

View File

@@ -152,7 +152,7 @@ let {{
if (singleAll) {
size = bits(machInst, 7, 6);
bool t = bits(machInst, 5);
align = size | TLB::AllowUnaligned;
align = size | MMU::AllowUnaligned;
if (width == 1) {
regs = t ? 2 : 1;
inc = 1;
@@ -185,7 +185,7 @@ let {{
}
} else {
size = bits(machInst, 11, 10);
align = size | TLB::AllowUnaligned;
align = size | MMU::AllowUnaligned;
regs = width;
unsigned indexAlign = bits(machInst, 7, 4);
// If width is 1, inc is always 1. That's overridden later.
@@ -251,7 +251,7 @@ let {{
align = bits(machInst, 5, 4);
if (align == 0) {
// @align wasn't specified, so alignment can be turned off.
align = size | TLB::AllowUnaligned;
align = size | MMU::AllowUnaligned;
} else {
align = align + 2;
}

View File

@@ -83,7 +83,7 @@ let {{
# Add memory request flags where necessary
if self.user:
self.memFlags.append("ArmISA::TLB::UserMode")
self.memFlags.append("ArmISA::MMU::UserMode")
sz = self.size*2 if paired else self.size
self.memFlags.append("%d" % int(math.log(sz, 2)))

View File

@@ -186,16 +186,16 @@ let {{
for isTbh in (0, 1):
if isTbh:
eaCode = '''
unsigned memAccessFlags = ArmISA::TLB::AllowUnaligned |
ArmISA::TLB::AlignHalfWord;
unsigned memAccessFlags = ArmISA::MMU::AllowUnaligned |
ArmISA::MMU::AlignHalfWord;
EA = Op1 + Op2 * 2
'''
accCode = 'NPC = PC + 2 * (Mem_uh);\n'
mnem = "tbh"
else:
eaCode = '''
unsigned memAccessFlags = ArmISA::TLB::AllowUnaligned |
ArmISA::TLB::AlignByte;
unsigned memAccessFlags = ArmISA::MMU::AllowUnaligned |
ArmISA::MMU::AlignByte;
EA = Op1 + Op2
'''
accCode = 'NPC = PC + 2 * (Mem_ub)'

View File

@@ -92,7 +92,7 @@ let {{
super(RfeInst, self).__init__(mnem, post, add, writeback)
self.Name = "RFE_" + loadImmClassName(post, add, writeback, 8)
self.memFlags.append("ArmISA::TLB::AlignWord")
self.memFlags.append("ArmISA::MMU::AlignWord")
def emit(self):
offset = 0
@@ -163,7 +163,7 @@ let {{
# Add memory request flags where necessary
self.memFlags.append("%d" % int(math.log(self.size, 2)))
if self.user:
self.memFlags.append("ArmISA::TLB::UserMode")
self.memFlags.append("ArmISA::MMU::UserMode")
self.instFlags = []
if self.flavor == "dprefetch":
@@ -173,7 +173,7 @@ let {{
self.memFlags.append("Request::PREFETCH")
self.instFlags = ['IsInstPrefetch']
elif self.flavor == "normal":
self.memFlags.append("ArmISA::TLB::AllowUnaligned")
self.memFlags.append("ArmISA::MMU::AllowUnaligned")
if self.flavor in ("exclusive", "acex"):
self.memFlags.append("Request::LLSC")
@@ -249,9 +249,9 @@ let {{
# Add memory request flags where necessary
if self.flavor in ("exclusive", "acex"):
self.memFlags.append("Request::LLSC")
self.memFlags.append("ArmISA::TLB::AlignDoubleWord")
self.memFlags.append("ArmISA::MMU::AlignDoubleWord")
else:
self.memFlags.append("ArmISA::TLB::AlignWord")
self.memFlags.append("ArmISA::MMU::AlignWord")
# Disambiguate the class name for different flavors of loads
if self.flavor != "normal":

View File

@@ -88,7 +88,7 @@ let {{
if self.flavor not in ("acquire", "acex", "exclusive",
"acexp", "exp"):
self.memFlags.append("ArmISA::TLB::AllowUnaligned")
self.memFlags.append("ArmISA::MMU::AllowUnaligned")
if self.flavor in ("acquire", "acex", "acexp"):
self.instFlags.extend(["IsWriteBarrier", "IsReadBarrier"])
@@ -136,7 +136,7 @@ let {{
if self.user:
eaCode += " uint8_t userFlag = 0;\n"\
" if(isUnpriviledgeAccess(xc->tcBase()))\n"\
" userFlag = ArmISA::TLB::UserMode;"
" userFlag = ArmISA::MMU::UserMode;"
self.codeBlobs["ea_code"] = eaCode

View File

@@ -137,7 +137,7 @@ let {{
(newHeader,
newDecoder,
newExec) = self.fillTemplates(self.name, self.Name, codeBlobs,
["ArmISA::TLB::AlignWord"], [], 'SrsOp', wbDecl)
["ArmISA::MMU::AlignWord"], [], 'SrsOp', wbDecl)
header_output += newHeader
decoder_output += newDecoder
@@ -178,13 +178,13 @@ let {{
# Add memory request flags where necessary
self.memFlags.append("%d" % int(math.log(self.size, 2)))
if self.user:
self.memFlags.append("ArmISA::TLB::UserMode")
self.memFlags.append("ArmISA::MMU::UserMode")
if self.flavor in ("exclusive", "relex"):
self.instFlags.append("IsStoreConditional")
self.memFlags.append("Request::LLSC")
elif self.flavor != "fp":
self.memFlags.append("ArmISA::TLB::AllowUnaligned")
self.memFlags.append("ArmISA::MMU::AllowUnaligned")
if self.flavor in ("release", "relex"):
self.instFlags.extend(["IsWriteBarrier",
@@ -263,9 +263,9 @@ let {{
if self.flavor in ("exclusive", "relex"):
self.instFlags.append("IsStoreConditional")
self.memFlags.append("Request::LLSC")
self.memFlags.append("ArmISA::TLB::AlignDoubleWord")
self.memFlags.append("ArmISA::MMU::AlignDoubleWord")
else:
self.memFlags.append("ArmISA::TLB::AlignWord")
self.memFlags.append("ArmISA::MMU::AlignWord")
if self.flavor in ("release", "relex"):
self.instFlags.extend(["IsWriteBarrier",

View File

@@ -73,7 +73,7 @@ let {{
if self.flavor not in ("release", "relex", "exclusive",
"relexp", "exp"):
self.memFlags.append("ArmISA::TLB::AllowUnaligned")
self.memFlags.append("ArmISA::MMU::AllowUnaligned")
if self.micro:
self.instFlags.append("IsMicroop")
@@ -137,7 +137,7 @@ let {{
if self.user:
eaCode += " uint8_t userFlag = 0;\n"\
" if(isUnpriviledgeAccess(xc->tcBase()))\n"\
" userFlag = ArmISA::TLB::UserMode;"
" userFlag = ArmISA::MMU::UserMode;"
self.codeBlobs["ea_code"] = eaCode

View File

@@ -438,7 +438,7 @@ def template SveIndexedMemVIMicroopDeclare {{
dest(_dest), gp(_gp), base(_base), imm(_imm),
elemIndex(_elemIndex), numElems(_numElems),
firstFault(_firstFault),
memAccessFlags(ArmISA::TLB::AllowUnaligned)
memAccessFlags(ArmISA::MMU::AllowUnaligned)
{
%(set_reg_idx_arr)s;
%(constructor)s;
@@ -526,7 +526,7 @@ def template SveIndexedMemSVMicroopDeclare {{
offsetIs32(_offsetIs32), offsetIsSigned(_offsetIsSigned),
offsetIsScaled(_offsetIsScaled), elemIndex(_elemIndex),
numElems(_numElems), firstFault(_firstFault),
memAccessFlags(ArmISA::TLB::AllowUnaligned)
memAccessFlags(ArmISA::MMU::AllowUnaligned)
{
%(set_reg_idx_arr)s;
%(constructor)s;
@@ -909,7 +909,7 @@ def template SveStructMemSIMicroopDeclare {{
%(base_class)s(mnem, machInst, %(op_class)s),
dest(_dest), gp(_gp), base(_base), imm(_imm),
numRegs(_numRegs), regIndex(_regIndex),
memAccessFlags(ArmISA::TLB::AllowUnaligned)
memAccessFlags(ArmISA::MMU::AllowUnaligned)
{
%(set_reg_idx_arr)s;
%(constructor)s;
@@ -1186,7 +1186,7 @@ def template SveStructMemSSMicroopDeclare {{
%(base_class)s(mnem, machInst, %(op_class)s),
dest(_dest), gp(_gp), base(_base), offset(_offset),
numRegs(_numRegs), regIndex(_regIndex),
memAccessFlags(ArmISA::TLB::AllowUnaligned)
memAccessFlags(ArmISA::MMU::AllowUnaligned)
{
%(set_reg_idx_arr)s;
%(constructor)s;

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020 ARM Limited
* Copyright (c) 2010-2013, 2016, 2019-2021 Arm Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -11,6 +11,9 @@
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2001-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
@@ -38,7 +41,6 @@
#ifndef __ARCH_ARM_MMU_HH__
#define __ARCH_ARM_MMU_HH__
#include "arch/arm/table_walker.hh"
#include "arch/arm/tlb.hh"
#include "arch/generic/mmu.hh"
@@ -49,6 +51,8 @@ namespace gem5
namespace ArmISA {
class TableWalker;
class MMU : public BaseMMU
{
protected:
@@ -65,20 +69,54 @@ class MMU : public BaseMMU
}
TLB * getTlb(BaseMMU::Mode mode, bool stage2) const;
TableWalker * getTableWalker(BaseMMU::Mode mode, bool stage2) const;
protected:
TLB *itbStage2;
TLB *dtbStage2;
TableWalker::Port iport;
TableWalker::Port dport;
TableWalker *itbWalker;
TableWalker *dtbWalker;
TableWalker *itbStage2Walker;
TableWalker *dtbStage2Walker;
public:
enum ArmFlags
{
AlignmentMask = 0x7,
AlignByte = 0x0,
AlignHalfWord = 0x1,
AlignWord = 0x2,
AlignDoubleWord = 0x3,
AlignQuadWord = 0x4,
AlignOctWord = 0x5,
AllowUnaligned = 0x8,
// Priv code operating as if it wasn't
UserMode = 0x10
};
enum ArmTranslationType
{
NormalTran = 0,
S1CTran = 0x1,
HypMode = 0x2,
// Secure code operating as if it wasn't (required by some Address
// Translate operations)
S1S2NsTran = 0x4,
// Address translation instructions (eg AT S1E0R_Xt) need to be handled
// in special ways during translation because they could need to act
// like a different EL than the current EL. The following flags are
// for these instructions
S1E0Tran = 0x8,
S1E1Tran = 0x10,
S1E2Tran = 0x20,
S1E3Tran = 0x40,
S12E0Tran = 0x80,
S12E1Tran = 0x100
};
enum TLBType
{
I_TLBS = 0x01,
@@ -86,27 +124,131 @@ class MMU : public BaseMMU
ALL_TLBS = 0x11
};
struct CachedState {
explicit CachedState(MMU *_mmu, bool stage2)
: mmu(_mmu), isStage2(stage2)
{}
void updateMiscReg(ThreadContext *tc, ArmTranslationType tran_type);
/** Returns the current VMID
* (information stored in the VTTBR_EL2 register) */
vmid_t getVMID(ThreadContext *tc) const;
MMU *mmu;
bool isStage2 = false;
CPSR cpsr = 0;
bool aarch64 = false;
ExceptionLevel aarch64EL = EL0;
SCTLR sctlr = 0;
SCR scr = 0;
bool isPriv = false;
bool isSecure = false;
bool isHyp = false;
TTBCR ttbcr = 0;
uint16_t asid = 0;
vmid_t vmid = 0;
PRRR prrr = 0;
NMRR nmrr = 0;
HCR hcr = 0;
uint32_t dacr = 0;
bool miscRegValid = false;
ArmTranslationType curTranType = NormalTran;
// Indicates whether a stage 2 lookup is also required
bool stage2Req = false;
// Indicates whether a stage 2 lookup of the table descriptors is
// required. Certain address translation instructions will
// intercept the IPA but the table descriptors still need to be
// translated by the stage2.
bool stage2DescReq = false;
// Indicates whether all translation requests should
// be routed directly to the stage 2 TLB
bool directToStage2 = false;
};
MMU(const ArmMMUParams &p);
void init() override;
using BaseMMU::translateFunctional;
/**
* Do a functional lookup on the TLB (for debugging)
* and don't modify any internal state
* @param tc thread context to get the context id from
* @param vaddr virtual address to translate
* @param pa returned physical address
* @return if the translation was successful
*/
bool translateFunctional(ThreadContext *tc, Addr vaddr, Addr &paddr);
Fault translateFunctional(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode, TLB::ArmTranslationType tran_type);
BaseMMU::Mode mode) override;
/**
* Do a functional lookup on the TLB (for checker cpu) that
* behaves like a normal lookup without modifying any page table state.
*/
Fault translateFunctional(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode, ArmTranslationType tran_type);
Fault translateFunctional(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode, TLB::ArmTranslationType tran_type,
bool stage2);
BaseMMU::Mode mode, ArmTranslationType tran_type, bool stage2);
using BaseMMU::translateAtomic;
Fault
translateAtomic(const RequestPtr &req,
ThreadContext *tc, Mode mode) override
{
return translateAtomic(req, tc, mode, NormalTran);
}
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode, bool stage2);
BaseMMU::Mode mode, ArmTranslationType tran_type, bool stage2);
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode,
ArmTranslationType tran_type);
using BaseMMU::translateTiming;
void
translateTiming(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode) override
{
translateTiming(req, tc, translation, mode, NormalTran, false);
}
void translateTiming(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Translation *translation, BaseMMU::Mode mode, bool stage2);
void translateTiming(
const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode,
ArmTranslationType tran_type, bool stage2);
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode,
ArmTranslationType tran_type, Addr vaddr, bool long_desc_format,
CachedState &state);
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req, Mode mode,
Translation *translation, bool &delay, bool timing, bool functional,
Addr vaddr, ArmFault::TranMethod tranMethod,
CachedState &state);
Fault translateFs(const RequestPtr &req, ThreadContext *tc, Mode mode,
Translation *translation, bool &delay,
bool timing, ArmTranslationType tran_type, bool functional,
CachedState &state);
Fault translateSe(const RequestPtr &req, ThreadContext *tc, Mode mode,
Translation *translation, bool &delay, bool timing,
CachedState &state);
Fault translateComplete(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode, ArmTranslationType tran_type,
bool call_from_s2);
Fault translateComplete(const RequestPtr &req, ThreadContext *tc,
Translation *translation, Mode mode, ArmTranslationType tran_type,
bool call_from_s2, CachedState &state);
Fault finalizePhysical(
const RequestPtr &req,
ThreadContext *tc, Mode mode) const override;
void drainResume() override;
void takeOverFrom(BaseMMU *old_mmu) override;
void invalidateMiscReg(TLBType type = ALL_TLBS);
@@ -164,8 +306,98 @@ class MMU : public BaseMMU
uint64_t
getAttr() const
{
return getDTBPtr()->getAttr();
return _attr;
}
/** Accessor functions for memory attributes for last accessed TLB entry
*/
void
setAttr(uint64_t attr)
{
_attr = attr;
}
/**
* Determine the EL to use for the purpose of a translation given
* a specific translation type. If the translation type doesn't
* specify an EL, we use the current EL.
*/
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type);
public:
Fault getTE(TlbEntry **te, const RequestPtr &req,
ThreadContext *tc, Mode mode,
Translation *translation, bool timing, bool functional,
bool is_secure, ArmTranslationType tran_type,
bool stage2);
Fault getTE(TlbEntry **te, const RequestPtr &req,
ThreadContext *tc, Mode mode,
Translation *translation, bool timing, bool functional,
bool is_secure, ArmTranslationType tran_type,
CachedState &state);
Fault getResultTe(TlbEntry **te, const RequestPtr &req,
ThreadContext *tc, Mode mode,
Translation *translation, bool timing,
bool functional, TlbEntry *mergeTe,
CachedState &state);
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
bool stage2);
Fault checkPermissions(TlbEntry *te, const RequestPtr &req, Mode mode,
CachedState &state);
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
ThreadContext *tc, bool stage2);
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
ThreadContext *tc, CachedState &state);
bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req,
Mode mode, const bool is_priv, CachedState &state);
public: /* Testing */
TlbTestInterface *test;
void setTestInterface(SimObject *ti);
Fault testTranslation(const RequestPtr &req, Mode mode,
TlbEntry::DomainType domain, CachedState &state);
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
TlbEntry::DomainType domain,
LookupLevel lookup_level, bool stage2);
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure, Mode mode,
TlbEntry::DomainType domain,
LookupLevel lookup_level, CachedState &state);
protected:
ContextID miscRegContext;
public:
CachedState s1State, s2State;
protected:
uint64_t _attr; // Memory attributes for last accessed TLB entry
// Cached copies of system-level properties
bool haveLPAE;
bool haveVirtualization;
bool haveLargeAsid64;
uint8_t physAddrRange;
AddrRange m5opRange;
CachedState& updateMiscReg(
ThreadContext *tc, ArmTranslationType tran_type,
bool stage2);
struct Stats : public statistics::Group
{
Stats(statistics::Group *parent);
// Access Stats
mutable statistics::Scalar alignFaults;
mutable statistics::Scalar prefetchFaults;
mutable statistics::Scalar domainFaults;
mutable statistics::Scalar permsFaults;
} stats;
};
template<typename T>

View File

@@ -55,10 +55,10 @@ using namespace ArmISA;
Fault
Stage2LookUp::getTe(ThreadContext *tc, TlbEntry *destTe)
{
fault = stage2Tlb->getTE(&stage2Te, req, tc, mode, this, timing,
functional, secure, tranType);
fault = mmu->getTE(&stage2Te, req, tc, mode, this, timing,
functional, secure, tranType, true);
// Call finish if we're done already
if ((fault != NoFault) || (stage2Te != NULL)) {
// Since we directly requested the table entry (which we need later on
@@ -67,19 +67,19 @@ Stage2LookUp::getTe(ThreadContext *tc, TlbEntry *destTe)
// entry is now in the TLB this should always hit the cache.
if (fault == NoFault) {
if (ELIs64(tc, EL2))
fault = stage2Tlb->checkPermissions64(stage2Te, req, mode, tc);
fault = mmu->checkPermissions64(stage2Te, req, mode, tc, true);
else
fault = stage2Tlb->checkPermissions(stage2Te, req, mode);
fault = mmu->checkPermissions(stage2Te, req, mode, true);
}
mergeTe(req, mode);
mergeTe(mode);
*destTe = stage1Te;
}
return fault;
}
void
Stage2LookUp::mergeTe(const RequestPtr &req, BaseMMU::Mode mode)
Stage2LookUp::mergeTe(BaseMMU::Mode mode)
{
// Check again that we haven't got a fault
if (fault == NoFault) {
@@ -169,8 +169,9 @@ Stage2LookUp::mergeTe(const RequestPtr &req, BaseMMU::Mode mode)
if (fault != NoFault) {
// If the second stage of translation generated a fault add the
// details of the original stage 1 virtual address
reinterpret_cast<ArmFault *>(fault.get())->annotate(ArmFault::OVA,
s1Req->getVaddr());
if (auto arm_fault = reinterpret_cast<ArmFault *>(fault.get())) {
arm_fault->annotate(ArmFault::OVA, s1Req->getVaddr());
}
}
complete = true;
}
@@ -182,13 +183,14 @@ Stage2LookUp::finish(const Fault &_fault, const RequestPtr &req,
fault = _fault;
// if we haven't got the table entry get it now
if ((fault == NoFault) && (stage2Te == NULL)) {
fault = stage2Tlb->getTE(&stage2Te, req, tc, mode, this,
timing, functional, secure, tranType);
// OLD_LOOK: stage2Tlb
fault = mmu->getTE(&stage2Te, req, tc, mode, this,
timing, functional, secure, tranType, true);
}
// Now we have the stage 2 table entry we need to merge it with the stage
// 1 entry we were given at the start
mergeTe(req, mode);
mergeTe(mode);
if (fault != NoFault) {
// Returning with a fault requires the original request
@@ -196,7 +198,10 @@ Stage2LookUp::finish(const Fault &_fault, const RequestPtr &req,
} else if (timing) {
// Now notify the original stage 1 translation that we finally have
// a result
stage1Tlb->translateComplete(s1Req, tc, transState, mode, tranType, true);
// tran_s1.callFromStage2 = true;
// OLD_LOOK: stage1Tlb
mmu->translateComplete(
s1Req, tc, transState, mode, tranType, true);
}
// if we have been asked to delete ourselfs do it now
if (selfDelete) {

View File

@@ -59,15 +59,14 @@ class TLB;
class Stage2LookUp : public BaseMMU::Translation
{
private:
TLB *stage1Tlb;
TLB *stage2Tlb;
MMU *mmu;
TlbEntry stage1Te;
RequestPtr s1Req;
BaseMMU::Translation *transState;
BaseMMU::Mode mode;
bool timing;
bool functional;
TLB::ArmTranslationType tranType;
MMU::ArmTranslationType tranType;
TlbEntry *stage2Te;
RequestPtr req;
Fault fault;
@@ -76,22 +75,22 @@ class Stage2LookUp : public BaseMMU::Translation
bool secure;
public:
Stage2LookUp(TLB *s1Tlb, TLB *s2Tlb, TlbEntry s1Te, const RequestPtr &_req,
BaseMMU::Translation *_transState, BaseMMU::Mode _mode, bool _timing,
bool _functional, bool _secure, TLB::ArmTranslationType _tranType) :
stage1Tlb(s1Tlb), stage2Tlb(s2Tlb), stage1Te(s1Te), s1Req(_req),
Stage2LookUp(MMU *_mmu, TlbEntry s1_te, const RequestPtr &_req,
MMU::Translation *_transState, BaseMMU::Mode _mode, bool _timing,
bool _functional, bool _secure, MMU::ArmTranslationType _tranType) :
mmu(_mmu), stage1Te(s1_te), s1Req(_req),
transState(_transState), mode(_mode), timing(_timing),
functional(_functional), tranType(_tranType), stage2Te(nullptr),
fault(NoFault), complete(false), selfDelete(false), secure(_secure)
{
req = std::make_shared<Request>();
req->setVirt(s1Te.pAddr(s1Req->getVaddr()), s1Req->getSize(),
req->setVirt(s1_te.pAddr(s1Req->getVaddr()), s1Req->getSize(),
s1Req->getFlags(), s1Req->requestorId(), 0);
}
Fault getTe(ThreadContext *tc, TlbEntry *destTe);
void mergeTe(const RequestPtr &req, BaseMMU::Mode mode);
void mergeTe(BaseMMU::Mode mode);
void setSelfDelete() { selfDelete = true; }

View File

@@ -61,7 +61,7 @@ using namespace ArmISA;
TableWalker::TableWalker(const Params &p)
: ClockedObject(p),
requestorId(p.sys->getRequestorId(this)),
port(nullptr),
port(new Port(this, requestorId)),
isStage2(p.is_stage2), tlb(NULL),
currState(NULL), pending(false),
numSquashable(p.num_squash_per_cycle),
@@ -128,7 +128,7 @@ TableWalker::WalkerState::WalkerState() :
secureLookup(false), rwTable(false), userTable(false), xnTable(false),
pxnTable(false), hpd(false), stage2Req(false),
stage2Tran(nullptr), timing(false), functional(false),
mode(BaseMMU::Read), tranType(TLB::NormalTran), l2Desc(l1Desc),
mode(BaseMMU::Read), tranType(MMU::NormalTran), l2Desc(l1Desc),
delayed(false), tableWalker(nullptr)
{
}
@@ -280,9 +280,9 @@ TableWalker::drainResume()
Fault
TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
vmid_t _vmid, bool _isHyp, BaseMMU::Mode _mode,
BaseMMU::Translation *_trans, bool _timing, bool _functional,
bool secure, TLB::ArmTranslationType tranType,
vmid_t _vmid, bool _isHyp, MMU::Mode _mode,
MMU::Translation *_trans, bool _timing, bool _functional,
bool secure, MMU::ArmTranslationType tranType,
bool _stage2Req)
{
assert(!(_functional && _timing));
@@ -331,7 +331,7 @@ TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid,
currState->aarch64 = ELIs64(_tc, EL2);
} else {
currState->el =
TLB::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
MMU::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType);
currState->aarch64 =
ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el);
}
@@ -533,9 +533,9 @@ TableWalker::processWalkWrapper()
} else {
// translate the request now that we know it will work
stats.walkServiceTime.sample(curTick() - currState->startTime);
tlb->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode);
mmu->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode,
currState->tranType, isStage2);
}
// delete the current request
@@ -634,7 +634,7 @@ TableWalker::processWalk()
// Trickbox address check
Fault f;
f = testWalk(l1desc_addr, sizeof(uint32_t),
TlbEntry::DomainType::NoAccess, L1);
TlbEntry::DomainType::NoAccess, L1, isStage2);
if (f) {
DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
if (currState->timing) {
@@ -807,7 +807,8 @@ TableWalker::processWalkLPAE()
// Trickbox address check
Fault f = testWalk(desc_addr, sizeof(uint64_t),
TlbEntry::DomainType::NoAccess, start_lookup_level);
TlbEntry::DomainType::NoAccess, start_lookup_level,
isStage2);
if (f) {
DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
if (currState->timing) {
@@ -1197,7 +1198,7 @@ TableWalker::processWalkAArch64()
// Trickbox address check
Fault f = testWalk(desc_addr, sizeof(uint64_t),
TlbEntry::DomainType::NoAccess, start_lookup_level);
TlbEntry::DomainType::NoAccess, start_lookup_level, isStage2);
if (f) {
DPRINTF(TLB, "Trickbox check caused fault on %#x\n", currState->vaddr_tainted);
if (currState->timing) {
@@ -1742,7 +1743,8 @@ TableWalker::doL1Descriptor()
// Trickbox address check
currState->fault = testWalk(l2desc_addr, sizeof(uint32_t),
currState->l1Desc.domain(), L2);
currState->l1Desc.domain(), L2,
isStage2);
if (currState->fault) {
if (!currState->timing) {
@@ -1910,7 +1912,7 @@ TableWalker::doLongDescriptor()
// Trickbox address check
currState->fault = testWalk(
next_desc_addr, sizeof(uint64_t), TlbEntry::DomainType::Client,
toLookupLevel(currState->longDesc.lookupLevel +1));
toLookupLevel(currState->longDesc.lookupLevel +1), isStage2);
if (currState->fault) {
if (!currState->timing) {
@@ -2054,8 +2056,11 @@ TableWalker::doL1DescriptorWrapper()
// Don't finish the translation if a stage 2 look up is underway
stats.walkServiceTime.sample(curTick() - currState->startTime);
DPRINTF(PageTableWalker, "calling translateTiming again\n");
tlb->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode);
mmu->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode,
currState->tranType, isStage2);
stats.walksShortTerminatedAtLevel[0]++;
pending = false;
@@ -2095,8 +2100,11 @@ TableWalker::doL2DescriptorWrapper()
} else {
stats.walkServiceTime.sample(curTick() - currState->startTime);
DPRINTF(PageTableWalker, "calling translateTiming again\n");
tlb->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode);
mmu->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode,
currState->tranType, isStage2);
stats.walksShortTerminatedAtLevel[1]++;
}
@@ -2172,8 +2180,11 @@ TableWalker::doLongDescriptorWrapper(LookupLevel curr_lookup_level)
// No additional lookups required
DPRINTF(PageTableWalker, "calling translateTiming again\n");
stats.walkServiceTime.sample(curTick() - currState->startTime);
tlb->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode);
mmu->translateTiming(currState->req, currState->tc,
currState->transState, currState->mode,
currState->tranType, isStage2);
stats.walksLongTerminatedAtLevel[(unsigned) curr_lookup_level]++;
pending = false;
@@ -2221,13 +2232,16 @@ TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes,
if (isTiming) {
auto *tran = new
Stage2Walk(*this, data, event, currState->vaddr);
Stage2Walk(*this, data, event, currState->vaddr,
currState->mode, currState->tranType);
currState->stage2Tran = tran;
readDataTimed(currState->tc, descAddr, tran, numBytes, flags);
fault = tran->fault;
} else {
fault = readDataUntimed(currState->tc,
currState->vaddr, descAddr, data, numBytes, flags,
currState->mode,
currState->tranType,
currState->functional);
}
@@ -2380,10 +2394,10 @@ TableWalker::pendingChange()
Fault
TableWalker::testWalk(Addr pa, Addr size, TlbEntry::DomainType domain,
LookupLevel lookup_level)
LookupLevel lookup_level, bool stage2)
{
return tlb->testWalk(pa, size, currState->vaddr, currState->isSecure,
currState->mode, domain, lookup_level);
return mmu->testWalk(pa, size, currState->vaddr, currState->isSecure,
currState->mode, domain, lookup_level, stage2);
}
@@ -2410,7 +2424,8 @@ TableWalker::pageSizeNtoStatBin(uint8_t N)
Fault
TableWalker::readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr,
uint8_t *data, int num_bytes, Request::Flags flags, bool functional)
uint8_t *data, int num_bytes, Request::Flags flags, BaseMMU::Mode mode,
MMU::ArmTranslationType tran_type, bool functional)
{
Fault fault;
@@ -2418,11 +2433,13 @@ TableWalker::readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr,
auto req = std::make_shared<Request>();
req->setVirt(desc_addr, num_bytes, flags | Request::PT_WALK,
requestorId, 0);
if (functional) {
fault = mmu->translateFunctional(req, tc, BaseMMU::Read,
TLB::NormalTran, true);
tran_type, true);
} else {
fault = mmu->translateAtomic(req, tc, BaseMMU::Read, true);
fault = mmu->translateAtomic(req, tc, BaseMMU::Read,
tran_type, true);
}
// Now do the access.
@@ -2459,9 +2476,10 @@ TableWalker::readDataTimed(ThreadContext *tc, Addr desc_addr,
}
TableWalker::Stage2Walk::Stage2Walk(TableWalker &_parent,
uint8_t *_data, Event *_event, Addr vaddr)
uint8_t *_data, Event *_event, Addr vaddr, BaseMMU::Mode _mode,
MMU::ArmTranslationType tran_type)
: data(_data), numBytes(0), event(_event), parent(_parent),
oVAddr(vaddr), fault(NoFault)
oVAddr(vaddr), mode(_mode), tranType(tran_type), fault(NoFault)
{
req = std::make_shared<Request>();
}
@@ -2495,7 +2513,7 @@ TableWalker::Stage2Walk::finish(const Fault &_fault,
void
TableWalker::Stage2Walk::translateTiming(ThreadContext *tc)
{
parent.mmu->translateTiming(req, tc, this, BaseMMU::Read, true);
parent.mmu->translateTiming(req, tc, this, mode, tranType, true);
}
TableWalker::TableWalkerStats::TableWalkerStats(Stats::Group *parent)

View File

@@ -41,6 +41,7 @@
#include <list>
#include "arch/arm/faults.hh"
#include "arch/arm/mmu.hh"
#include "arch/arm/regs/misc.hh"
#include "arch/arm/system.hh"
#include "arch/arm/tlb.hh"
@@ -61,7 +62,6 @@ class ThreadContext;
namespace ArmISA {
class Translation;
class TLB;
class MMU;
class TableWalker : public ClockedObject
{
@@ -828,7 +828,7 @@ class TableWalker : public ClockedObject
BaseMMU::Mode mode;
/** The translation type that has been requested */
TLB::ArmTranslationType tranType;
MMU::ArmTranslationType tranType;
/** Short-format descriptors */
L1Descriptor l1Desc;
@@ -912,12 +912,15 @@ class TableWalker : public ClockedObject
Event *event;
TableWalker &parent;
Addr oVAddr;
BaseMMU::Mode mode;
MMU::ArmTranslationType tranType;
public:
Fault fault;
Stage2Walk(TableWalker &_parent, uint8_t *_data, Event *_event,
Addr vaddr);
Addr vaddr, BaseMMU::Mode mode,
MMU::ArmTranslationType tran_type);
void markDelayed() {}
@@ -937,6 +940,7 @@ class TableWalker : public ClockedObject
Fault readDataUntimed(ThreadContext *tc, Addr vaddr, Addr desc_addr,
uint8_t *data, int num_bytes, Request::Flags flags,
BaseMMU::Mode mode, MMU::ArmTranslationType tran_type,
bool functional);
void readDataTimed(ThreadContext *tc, Addr desc_addr,
Stage2Walk *translation, int num_bytes,
@@ -1033,11 +1037,10 @@ class TableWalker : public ClockedObject
uint16_t asid, vmid_t _vmid,
bool _isHyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans,
bool timing, bool functional, bool secure,
TLB::ArmTranslationType tranType, bool _stage2Req);
MMU::ArmTranslationType tranType, bool _stage2Req);
void setMmu(MMU *_mmu) { mmu = _mmu; }
void setTlb(TLB *_tlb) { tlb = _tlb; }
void setPort(Port *_port) { port = _port; }
TLB* getTlb() { return tlb; }
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
uint8_t texcb, bool s);
@@ -1101,7 +1104,7 @@ class TableWalker : public ClockedObject
static uint8_t pageSizeNtoStatBin(uint8_t N);
Fault testWalk(Addr pa, Addr size, TlbEntry::DomainType domain,
LookupLevel lookup_level);
LookupLevel lookup_level, bool stage2);
};
} // namespace ArmISA

File diff suppressed because it is too large Load Diff

View File

@@ -59,7 +59,6 @@ class ThreadContext;
namespace ArmISA {
class TableWalker;
class Stage2LookUp;
class TLB;
class TLBIALL;
@@ -108,67 +107,12 @@ class TlbTestInterface
class TLB : public BaseTLB
{
public:
enum ArmFlags
{
AlignmentMask = 0x7,
AlignByte = 0x0,
AlignHalfWord = 0x1,
AlignWord = 0x2,
AlignDoubleWord = 0x3,
AlignQuadWord = 0x4,
AlignOctWord = 0x5,
AllowUnaligned = 0x8,
// Priv code operating as if it wasn't
UserMode = 0x10
};
enum ArmTranslationType
{
NormalTran = 0,
S1CTran = 0x1,
HypMode = 0x2,
// Secure code operating as if it wasn't (required by some Address
// Translate operations)
S1S2NsTran = 0x4,
// Address translation instructions (eg AT S1E0R_Xt) need to be handled
// in special ways during translation because they could need to act
// like a different EL than the current EL. The following flags are
// for these instructions
S1E0Tran = 0x8,
S1E1Tran = 0x10,
S1E2Tran = 0x20,
S1E3Tran = 0x40,
S12E0Tran = 0x80,
S12E1Tran = 0x100
};
/**
* Determine the EL to use for the purpose of a translation given
* a specific translation type. If the translation type doesn't
* specify an EL, we use the current EL.
*/
static ExceptionLevel tranTypeEL(CPSR cpsr, ArmTranslationType type);
protected:
TlbEntry* table; // the Page Table
int size; // TLB Size
bool isStage2; // Indicates this TLB is part of the second stage MMU
bool stage2Req; // Indicates whether a stage 2 lookup is also required
// Indicates whether a stage 2 lookup of the table descriptors is required.
// Certain address translation instructions will intercept the IPA but the
// table descriptors still need to be translated by the stage2.
bool stage2DescReq;
uint64_t _attr; // Memory attributes for last accessed TLB entry
bool directToStage2; // Indicates whether all translation requests should
// be routed directly to the stage 2 TLB
TableWalker *tableWalker;
TLB *stage2Tlb;
TlbTestInterface *test;
struct TlbStats : public statistics::Group
{
@@ -186,10 +130,6 @@ class TLB : public BaseTLB
mutable statistics::Scalar flushTlbMvaAsid;
mutable statistics::Scalar flushTlbAsid;
mutable statistics::Scalar flushedEntries;
mutable statistics::Scalar alignFaults;
mutable statistics::Scalar prefetchFaults;
mutable statistics::Scalar domainFaults;
mutable statistics::Scalar permsFaults;
statistics::Formula readAccesses;
statistics::Formula writeAccesses;
@@ -203,6 +143,7 @@ class TLB : public BaseTLB
probing::PMUUPtr ppRefills;
int rangeMRU; //On lookup, only move entries ahead when outside rangeMRU
vmid_t vmid;
public:
using Params = ArmTLBParams;
@@ -231,36 +172,16 @@ class TLB : public BaseTLB
void takeOverFrom(BaseTLB *otlb) override;
void setTestInterface(SimObject *ti);
void setStage2Tlb(TLB *stage2_tlb) { stage2Tlb = stage2_tlb; }
void setTableWalker(TableWalker *table_walker);
TableWalker *getTableWalker() { return tableWalker; }
int getsize() const { return size; }
void setVMID(vmid_t _vmid) { vmid = _vmid; }
void insert(Addr vaddr, TlbEntry &pte);
Fault getTE(TlbEntry **te, const RequestPtr &req,
ThreadContext *tc, BaseMMU::Mode mode,
BaseMMU::Translation *translation,
bool timing, bool functional,
bool is_secure, ArmTranslationType tranType);
Fault getResultTe(TlbEntry **te, const RequestPtr &req,
ThreadContext *tc, BaseMMU::Mode mode,
BaseMMU::Translation *translation, bool timing,
bool functional, TlbEntry *mergeTe);
Fault checkPermissions(TlbEntry *te, const RequestPtr &req,
BaseMMU::Mode mode);
Fault checkPermissions64(TlbEntry *te, const RequestPtr &req,
BaseMMU::Mode mode, ThreadContext *tc);
bool checkPAN(ThreadContext *tc, uint8_t ap, const RequestPtr &req,
BaseMMU::Mode mode, const bool is_priv);
/** Reset the entire TLB. Used for CPU switching to prevent stale
* translations after multiple switches
*/
@@ -314,87 +235,27 @@ class TLB : public BaseTLB
panic("demapPage() is not implemented.\n");
}
/**
* Do a functional lookup on the TLB (for debugging)
* and don't modify any internal state
* @param tc thread context to get the context id from
* @param vaddr virtual address to translate
* @param pa returned physical address
* @return if the translation was successful
*/
bool translateFunctional(ThreadContext *tc, Addr vaddr, Addr &paddr);
/**
* Do a functional lookup on the TLB (for checker cpu) that
* behaves like a normal lookup without modifying any page table state.
*/
Fault translateFunctional(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode, ArmTranslationType tranType);
Fault
translateFunctional(const RequestPtr &req,
ThreadContext *tc, BaseMMU::Mode mode) override
translateAtomic(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode) override
{
return translateFunctional(req, tc, mode, NormalTran);
panic("unimplemented");
}
/** Accessor functions for memory attributes for last accessed TLB entry
*/
void
setAttr(uint64_t attr)
{
_attr = attr;
}
uint64_t
getAttr() const
{
return _attr;
}
Fault translateMmuOff(ThreadContext *tc, const RequestPtr &req,
BaseMMU::Mode mode, TLB::ArmTranslationType tranType,
Addr vaddr, bool long_desc_format);
Fault translateMmuOn(ThreadContext *tc, const RequestPtr &req,
BaseMMU::Mode mode, BaseMMU::Translation *translation, bool &delay,
bool timing, bool functional,
Addr vaddr, ArmFault::TranMethod tranMethod);
Fault translateFs(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode, BaseMMU::Translation *translation,
bool &delay, bool timing, ArmTranslationType tranType,
bool functional = false);
Fault translateSe(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode, BaseMMU::Translation *translation,
bool &delay, bool timing);
Fault translateAtomic(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode, ArmTranslationType tranType);
Fault
translateAtomic(const RequestPtr &req,
ThreadContext *tc, BaseMMU::Mode mode) override
{
return translateAtomic(req, tc, mode, NormalTran);
}
void translateTiming(
const RequestPtr &req, ThreadContext *tc,
BaseMMU::Translation *translation, BaseMMU::Mode mode,
ArmTranslationType tranType);
void
translateTiming(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Translation *translation,
BaseMMU::Mode mode) override
{
translateTiming(req, tc, translation, mode, NormalTran);
panic("unimplemented");
}
Fault translateComplete(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Translation *translation, BaseMMU::Mode mode,
ArmTranslationType tranType, bool callFromS2);
Fault finalizePhysical(
const RequestPtr &req,
ThreadContext *tc, BaseMMU::Mode mode) const override;
void drainResume() override;
Fault
finalizePhysical(const RequestPtr &req, ThreadContext *tc,
BaseMMU::Mode mode) const override
{
panic("unimplemented");
}
void regProbePoints() override;
@@ -414,45 +275,8 @@ class TLB : public BaseTLB
// Writing to misc registers needs to invalidate them.
// translateFunctional/translateSe/translateFs checks if they are
// invalid and call updateMiscReg if necessary.
protected:
CPSR cpsr;
bool aarch64;
ExceptionLevel aarch64EL;
SCTLR sctlr;
SCR scr;
bool isPriv;
bool isSecure;
bool isHyp;
TTBCR ttbcr;
uint16_t asid;
vmid_t vmid;
PRRR prrr;
NMRR nmrr;
HCR hcr;
uint32_t dacr;
bool miscRegValid;
ContextID miscRegContext;
ArmTranslationType curTranType;
// Cached copies of system-level properties
bool haveLPAE;
bool haveVirtualization;
bool haveLargeAsid64;
uint8_t physAddrRange;
AddrRange m5opRange;
void updateMiscReg(ThreadContext *tc,
ArmTranslationType tranType = NormalTran);
/** Returns the current VMID
* (information stored in the VTTBR_EL2 register) */
vmid_t getVMID(ThreadContext *tc) const;
public:
void invalidateMiscReg() { miscRegValid = false; }
private:
private:
/** Remove any entries that match both a va and asn
* @param mva virtual address to flush
* @param asn contextid/asn to flush on match
@@ -463,14 +287,6 @@ private:
void _flushMva(Addr mva, uint64_t asn, bool secure_lookup,
bool ignore_asn, ExceptionLevel target_el,
bool in_host);
public: /* Testing */
Fault testTranslation(const RequestPtr &req, BaseMMU::Mode mode,
TlbEntry::DomainType domain);
Fault testWalk(Addr pa, Addr size, Addr va, bool is_secure,
BaseMMU::Mode mode, TlbEntry::DomainType domain,
LookupLevel lookup_level);
};
} // namespace ArmISA

View File

@@ -985,7 +985,7 @@ TarmacParserRecord::dump()
std::ostream &outs = Trace::output();
uint64_t written_data = 0;
unsigned mem_flags = 3 | ArmISA::TLB::AllowUnaligned;
unsigned mem_flags = 3 | ArmISA::MMU::AllowUnaligned;
ISetState isetstate;