From dbae09e4d9fb377803cb48a1af8edff0cdaa92d1 Mon Sep 17 00:00:00 2001 From: Yu-Cheng Chang Date: Tue, 19 Mar 2024 03:59:13 +0800 Subject: [PATCH] arch-riscv: Move alignment check to Physical Memory Attribute(PMA) (#914) In the RISC-V unprivileged spec[1], the misaligned load/store support is depend on the EEI. In the RISC-V privileged spec ver1.12[2], the PMA specify wether the misaligned access is support for each data width and the memory region. In the [3] of `mcause` spec, we cloud directly raise misalign exception if there is no memory region misalignment support. If the part of memory region support misaligned-access, we need to translate the `vaddr` to `paddr` first then check the `paddr` later. The page-fault or access-fault is rose before misalign-fault. The benefit of moving check_alignment option from ISA option to PMA option is we can specify the part region of memory support misalign load/store. MMU will check alignment with virtual addresss if there is no misaligned memory region specified. If there are some misaligned memory region supported, translate address first and check alignment at final. [1] https://github.com/riscv/riscv-isa-manual/blob/main/src/rv32.adoc#base-instruction-formats [2] https://github.com/riscv/riscv-isa-manual/blob/main/src/machine.adoc#physical-memory-attributes [3] https://github.com/riscv/riscv-isa-manual/blob/main/src/machine.adoc#machine-cause-register-mcause --- src/arch/riscv/PMAChecker.py | 4 ++ src/arch/riscv/RiscvISA.py | 3 - src/arch/riscv/insts/static_inst.cc | 13 ----- src/arch/riscv/insts/static_inst.hh | 2 - src/arch/riscv/isa.cc | 3 +- src/arch/riscv/isa.hh | 3 - src/arch/riscv/isa/formats/amo.isa | 36 +++++------- src/arch/riscv/isa/formats/mem.isa | 36 +++++++----- src/arch/riscv/mmu.hh | 17 ++++++ src/arch/riscv/pagetable_walker.cc | 17 ++++-- src/arch/riscv/pma_checker.cc | 90 ++++++++++++++++++++++++++++- src/arch/riscv/pma_checker.hh | 47 ++++++++++++++- src/arch/riscv/tlb.cc | 27 +++++---- 13 files changed, 221 insertions(+), 77 deletions(-) diff --git a/src/arch/riscv/PMAChecker.py b/src/arch/riscv/PMAChecker.py index 154132611f..19b80b8fc2 100644 --- a/src/arch/riscv/PMAChecker.py +++ b/src/arch/riscv/PMAChecker.py @@ -53,3 +53,7 @@ class PMAChecker(BasePMAChecker): cxx_class = "gem5::RiscvISA::PMAChecker" uncacheable = VectorParam.AddrRange([], "Uncacheable address ranges") + misaligned = VectorParam.AddrRange( + [], + "Address ranges support misaligned load/store to memory", + ) diff --git a/src/arch/riscv/RiscvISA.py b/src/arch/riscv/RiscvISA.py index 0b0e8ce5a9..27b7e5d372 100644 --- a/src/arch/riscv/RiscvISA.py +++ b/src/arch/riscv/RiscvISA.py @@ -92,9 +92,6 @@ class RiscvISA(BaseISA): cxx_class = "gem5::RiscvISA::ISA" cxx_header = "arch/riscv/isa.hh" - check_alignment = Param.Bool( - True, "whether to check memory access alignment" - ) riscv_type = Param.RiscvType("RV64", "RV32 or RV64") enable_rvv = Param.Bool(True, "Enable vector extension") diff --git a/src/arch/riscv/insts/static_inst.cc b/src/arch/riscv/insts/static_inst.cc index fc615c8d31..db814dacc5 100644 --- a/src/arch/riscv/insts/static_inst.cc +++ b/src/arch/riscv/insts/static_inst.cc @@ -40,19 +40,6 @@ namespace gem5 namespace RiscvISA { -bool -RiscvStaticInst::alignmentOk(ExecContext* xc, Addr addr, Addr size) const -{ - if (addr % size == 0) { - return true; - } - // Even if it's not aligned, we're still fine if the check is not enabled. - // We perform the check first because detecting whether the check itself is - // enabled involves multiple indirect references and is quite slow. - auto *isa = static_cast(xc->tcBase()->getIsaPtr()); - return !isa->alignmentCheckEnabled(); -} - void RiscvMicroInst::advancePC(PCStateBase &pcState) const { diff --git a/src/arch/riscv/insts/static_inst.hh b/src/arch/riscv/insts/static_inst.hh index 2e4d94864a..8ccb9a7e65 100644 --- a/src/arch/riscv/insts/static_inst.hh +++ b/src/arch/riscv/insts/static_inst.hh @@ -57,8 +57,6 @@ class RiscvStaticInst : public StaticInst StaticInst(_mnemonic, __opClass), machInst(_machInst) {} - bool alignmentOk(ExecContext* xc, Addr addr, Addr size) const; - template T rvSelect(T v32, T v64) const diff --git a/src/arch/riscv/isa.cc b/src/arch/riscv/isa.cc index b27869eb20..1280a77b87 100644 --- a/src/arch/riscv/isa.cc +++ b/src/arch/riscv/isa.cc @@ -257,8 +257,7 @@ RegClass ccRegClass(CCRegClass, CCRegClassName, 0, debug::IntRegs); } // anonymous namespace ISA::ISA(const Params &p) : BaseISA(p, "riscv"), - _rvType(p.riscv_type), checkAlignment(p.check_alignment), - enableRvv(p.enable_rvv), vlen(p.vlen), elen(p.elen), + _rvType(p.riscv_type), enableRvv(p.enable_rvv), vlen(p.vlen), elen(p.elen), _privilegeModeSet(p.privilege_mode_set) { _regClasses.push_back(&intRegClass); diff --git a/src/arch/riscv/isa.hh b/src/arch/riscv/isa.hh index 9a24a76745..8622bf2338 100644 --- a/src/arch/riscv/isa.hh +++ b/src/arch/riscv/isa.hh @@ -74,7 +74,6 @@ class ISA : public BaseISA protected: RiscvType _rvType; std::vector miscRegFile; - bool checkAlignment; bool enableRvv; bool hpmCounterEnabled(int counter) const; @@ -132,8 +131,6 @@ class ISA : public BaseISA return CSRMasks[_rvType][_privilegeModeSet]; } - bool alignmentCheckEnabled() const { return checkAlignment; } - bool inUserMode() const override; void copyRegsFrom(ThreadContext *src) override; diff --git a/src/arch/riscv/isa/formats/amo.isa b/src/arch/riscv/isa/formats/amo.isa index fe497536cc..1c385fb357 100644 --- a/src/arch/riscv/isa/formats/amo.isa +++ b/src/arch/riscv/isa/formats/amo.isa @@ -241,9 +241,6 @@ def template LoadReservedExecute {{ %(op_rd)s; %(ea_code)s; - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, LOAD_ADDR_MISALIGNED); - } { Fault fault = readMemAtomicLE(xc, traceData, EA, Mem, memAccessFlags); @@ -271,9 +268,6 @@ def template StoreCondExecute {{ %(memacc_code)s; - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, STORE_ADDR_MISALIGNED); - } { Fault fault = writeMemAtomicLE(xc, traceData, Mem, EA, memAccessFlags, @@ -305,9 +299,6 @@ def template AtomicMemOpRMWExecute {{ assert(amo_op); - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, AMO_ADDR_MISALIGNED); - } { Fault fault = amoMemAtomicLE(xc, traceData, Mem, EA, memAccessFlags, amo_op); @@ -336,9 +327,6 @@ def template LoadReservedInitiateAcc {{ %(op_rd)s; %(ea_code)s; - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, LOAD_ADDR_MISALIGNED); - } return initiateMemRead(xc, traceData, EA, Mem, memAccessFlags); } }}; @@ -355,9 +343,6 @@ def template StoreCondInitiateAcc {{ %(ea_code)s; %(memacc_code)s; - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, STORE_ADDR_MISALIGNED); - } { Fault fault = writeMemTimingLE(xc, traceData, Mem, EA, memAccessFlags, nullptr); @@ -385,9 +370,6 @@ def template AtomicMemOpRMWInitiateAcc {{ assert(amo_op); - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, AMO_ADDR_MISALIGNED); - } return initiateMemAMO(xc, traceData, EA, Mem, memAccessFlags, amo_op); } }}; @@ -463,8 +445,12 @@ def format LoadReserved(memacc_code, postacc_code={{ }}, iop = InstObjParams(name, Name, 'LoadReservedMicro', {'ea_code': ea_code, 'memacc_code': memacc_code, 'postacc_code': postacc_code}, inst_flags) + mem_flags = ['Request::%s' % flag for flag in mem_flags] + align_flag = getAlignFlag(iop) + if align_flag: + mem_flags.append(align_flag) iop.constructor += '\n\tmemAccessFlags = memAccessFlags | ' + \ - '|'.join(['Request::%s' % flag for flag in mem_flags]) + ';' + '|'.join(mem_flags) + ';' header_output += LRSCMicroDeclare.subst(iop) decoder_output += LRSCMicroConstructor.subst(iop) @@ -490,8 +476,12 @@ def format StoreCond(memacc_code, postacc_code={{ }}, iop = InstObjParams(name, Name, 'StoreCondMicro', {'ea_code': ea_code, 'memacc_code': memacc_code, 'postacc_code': postacc_code}, inst_flags) + mem_flags = ['Request::%s' % flag for flag in mem_flags] + align_flag = getAlignFlag(iop) + if align_flag: + mem_flags.append(align_flag) iop.constructor += '\n\tmemAccessFlags = memAccessFlags | ' + \ - '|'.join(['Request::%s' % flag for flag in mem_flags]) + ';' + '|'.join(mem_flags) + ';' header_output += LRSCMicroDeclare.subst(iop) decoder_output += LRSCMicroConstructor.subst(iop) @@ -521,8 +511,12 @@ def format AtomicMemOp(memacc_code, amoop_code, postacc_code={{ }}, 'amoop_code': amoop_code}, rmw_inst_flags) + rmw_mem_flags = ['Request::%s' % flag for flag in rmw_mem_flags] + align_flag = getAlignFlag(rmw_iop) + if align_flag: + rmw_mem_flags.append(align_flag) rmw_iop.constructor += '\n\tmemAccessFlags = memAccessFlags | ' + \ - '|'.join(['Request::%s' % flag for flag in rmw_mem_flags]) + ';' + '|'.join(rmw_mem_flags) + ';' header_output += AtomicMemOpRMWDeclare.subst(rmw_iop) decoder_output += AtomicMemOpRMWConstructor.subst(rmw_iop) diff --git a/src/arch/riscv/isa/formats/mem.isa b/src/arch/riscv/isa/formats/mem.isa index 53de4af8b4..e5da80a06c 100644 --- a/src/arch/riscv/isa/formats/mem.isa +++ b/src/arch/riscv/isa/formats/mem.isa @@ -63,6 +63,25 @@ def template LoadStoreConstructor {{ }}; let {{ +def getAlignFlag(iop): + align_map = { + 'uint8_t': 'MMU::ByteAlign', + 'int8_t': 'MMU::ByteAlign', + 'uint16_t': 'MMU::HalfWordAlign', + 'int16_t': 'MMU::HalfWordAlign', + 'uint32_t': 'MMU::WordAlign', + 'int32_t': 'MMU::WordAlign', + 'uint64_t': 'MMU::DoubleWordAlign', + 'int64_t': 'MMU::DoubleWordAlign', + } + flag = '' + operands = iop.operands + if operands.bases.get('Mem'): + Mem = operands.bases['Mem'] + flag = align_map.get(Mem.ctype) + + return flag + def LoadStoreBase(name, Name, offset_code, ea_code, memacc_code, mem_flags, inst_flags, base_class, postacc_code='', decode_template=BasicDecode, exec_template_base=''): @@ -75,8 +94,11 @@ def LoadStoreBase(name, Name, offset_code, ea_code, memacc_code, mem_flags, 'memacc_code': memacc_code, 'postacc_code': postacc_code }, inst_flags) + mem_flags = [ 'Request::%s' % flag for flag in mem_flags ] + align_flag = getAlignFlag(iop) + if align_flag: + mem_flags.append(align_flag) if mem_flags: - mem_flags = [ 'Request::%s' % flag for flag in mem_flags ] s = '\n\tmemAccessFlags = ' + '|'.join(mem_flags) + ';' iop.constructor += s @@ -106,9 +128,6 @@ def template LoadExecute {{ %(op_rd)s; %(ea_code)s; - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, LOAD_ADDR_MISALIGNED); - } { Fault fault = readMemAtomicLE(xc, traceData, EA, Mem, memAccessFlags); @@ -135,9 +154,6 @@ def template LoadInitiateAcc {{ %(op_rd)s; %(ea_code)s; - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, LOAD_ADDR_MISALIGNED); - } return initiateMemRead(xc, traceData, EA, Mem, memAccessFlags); } }}; @@ -172,9 +188,6 @@ def template StoreExecute {{ %(memacc_code)s; - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, STORE_ADDR_MISALIGNED); - } { Fault fault = writeMemAtomicLE(xc, traceData, Mem, EA, memAccessFlags, @@ -203,9 +216,6 @@ def template StoreInitiateAcc {{ %(memacc_code)s; - if (!alignmentOk(xc, EA, sizeof(Mem))) { - return std::make_shared(EA, STORE_ADDR_MISALIGNED); - } { Fault fault = writeMemTimingLE(xc, traceData, Mem, EA, memAccessFlags, nullptr); diff --git a/src/arch/riscv/mmu.hh b/src/arch/riscv/mmu.hh index b2e3f4a289..ebe7e23153 100644 --- a/src/arch/riscv/mmu.hh +++ b/src/arch/riscv/mmu.hh @@ -94,6 +94,23 @@ class MMU : public BaseMMU { return static_cast(dtb)->pmp; } + + /* + * The usage of Memory Request Arch Flags for RISC-V + * | 7 ------------- 3 | 2 ------ 0 | + * | Reserved | LDST Size | + * | ------------------| -----------| + */ + enum RiscvFlags + { + ByteAlign = 0, + HalfWordAlign = 1, + WordAlign = 2, + DoubleWordAlign = 3, + QuadWordAlign = 4, + + AlignmentMask = 0x7, + }; }; } // namespace RiscvISA diff --git a/src/arch/riscv/pagetable_walker.cc b/src/arch/riscv/pagetable_walker.cc index f6cdca05ec..f998a6445e 100644 --- a/src/arch/riscv/pagetable_walker.cc +++ b/src/arch/riscv/pagetable_walker.cc @@ -331,12 +331,15 @@ Walker::WalkerState::stepWalk(PacketPtr &write) // step 2: // Performing PMA/PMP checks on physical address of PTE - walker->pma->check(read->req); // Effective privilege mode for pmp checks for page table // walks is S mode according to specs fault = walker->pmp->pmpCheck(read->req, BaseMMU::Read, RiscvISA::PrivilegeMode::PRV_S, tc, entry.vaddr); + if (fault == NoFault) { + fault = walker->pma->check(read->req, BaseMMU::Read, entry.vaddr); + } + if (fault == NoFault) { // step 3: if (!pte.v || (!pte.r && pte.w)) { @@ -383,11 +386,14 @@ Walker::WalkerState::stepWalk(PacketPtr &write) // this read will eventually become write // if doWrite is True - walker->pma->check(read->req); - fault = walker->pmp->pmpCheck(read->req, BaseMMU::Write, pmode, tc, entry.vaddr); + if (fault == NoFault) { + fault = walker->pma->check(read->req, + BaseMMU::Write, entry.vaddr); + } + } // perform step 8 only if pmp checks pass if (fault == NoFault) { @@ -567,13 +573,16 @@ Walker::WalkerState::recvPacket(PacketPtr pkt) vaddr = Addr(sext(vaddr)); Addr paddr = walker->tlb->translateWithTLB(vaddr, satp.asid, mode); req->setPaddr(paddr); - walker->pma->check(req); // do pmp check if any checking condition is met. // timingFault will be NoFault if pmp checks are // passed, otherwise an address fault will be returned. timingFault = walker->pmp->pmpCheck(req, mode, pmode, tc); + if (timingFault == NoFault) { + timingFault = walker->pma->check(req, mode); + } + // Let the CPU continue. translation->finish(timingFault, req, tc, mode); } else { diff --git a/src/arch/riscv/pma_checker.cc b/src/arch/riscv/pma_checker.cc index e0ca80cb64..1655a7accd 100644 --- a/src/arch/riscv/pma_checker.cc +++ b/src/arch/riscv/pma_checker.cc @@ -37,6 +37,8 @@ #include "arch/riscv/pma_checker.hh" +#include "arch/riscv/faults.hh" +#include "arch/riscv/mmu.hh" #include "base/addr_range.hh" #include "base/types.hh" #include "mem/packet.hh" @@ -54,14 +56,41 @@ PMAChecker::PMAChecker(const Params ¶ms) : BasePMAChecker(params), uncacheable(params.uncacheable.begin(), params.uncacheable.end()) { + for (auto& range: params.misaligned) { + misaligned.insert(range, true); + } } -void -PMAChecker::check(const RequestPtr &req) +Fault +PMAChecker::check(const RequestPtr &req, BaseMMU::Mode mode, Addr vaddr) { if (isUncacheable(req->getPaddr(), req->getSize())) { req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER); } + + return hasMisaligned() ? checkPAddrAlignment(req, mode, vaddr) : NoFault; +} + +Fault +PMAChecker::checkVAddrAlignment( + const RequestPtr &req, BaseMMU::Mode mode) +{ + // We need to translate address before alignment check + // if there are some memory ranges support misaligned load/store + if (hasMisaligned()) { + return NoFault; + } + + // Ingore alignment check for instruction fetching + if (mode == BaseMMU::Execute) { + return NoFault; + } + assert(req->hasVaddr()); + Addr alignSize = mask(req->getArchFlags() & MMU::AlignmentMask) + 1; + if (addressAlign(req->getVaddr(), alignSize)) { + return NoFault; + } + return createMisalignFault(req->getVaddr(), mode); } bool @@ -94,6 +123,63 @@ PMAChecker::takeOverFrom(BasePMAChecker *old) PMAChecker* derived_old = dynamic_cast(old); assert(derived_old != nullptr); uncacheable = derived_old->uncacheable; + misaligned = derived_old->misaligned; +} + +Fault +PMAChecker::checkPAddrAlignment( + const RequestPtr &req, BaseMMU::Mode mode, Addr vaddr) +{ + Addr paddr = 0; + // Ingore alignment check for instruction fetching + if (mode == BaseMMU::Execute) { + return NoFault; + } + assert(req->hasPaddr()); + paddr = req->getPaddr(); + Addr alignSize = mask(req->getArchFlags() & MMU::AlignmentMask) + 1; + if (addressAlign(paddr, alignSize)) { + return NoFault; + } + if (misalignedSupport(RangeSize(paddr, req->getSize()))){ + return NoFault; + } + return createMisalignFault( + (req->hasVaddr() ? req->getVaddr() : vaddr), mode); +} + +Fault +PMAChecker::createMisalignFault(Addr vaddr, BaseMMU::Mode mode) +{ + RiscvISA::ExceptionCode code; + switch (mode) { + case BaseMMU::Read: + code = ExceptionCode::LOAD_ADDR_MISALIGNED; + break; + case BaseMMU::Write: + code = ExceptionCode::STORE_ADDR_MISALIGNED; + break; + default: + panic("Execute mode request should not reach here."); + } + return std::make_shared(vaddr, code); +} + +bool +PMAChecker::addressAlign(const Addr addr, const Addr size) { + return (addr & (size - 1)) == 0; +} + +bool +PMAChecker::misalignedSupport(const AddrRange &range) +{ + return misaligned.contains(range) != misaligned.end(); +} + +bool +PMAChecker::hasMisaligned() +{ + return !misaligned.empty(); } } // namespace RiscvISA diff --git a/src/arch/riscv/pma_checker.hh b/src/arch/riscv/pma_checker.hh index 20e64ae803..5cd4b0dda4 100644 --- a/src/arch/riscv/pma_checker.hh +++ b/src/arch/riscv/pma_checker.hh @@ -38,7 +38,9 @@ #ifndef __ARCH_RISCV_PMA_CHECKER_HH__ #define __ARCH_RISCV_PMA_CHECKER_HH__ +#include "arch/generic/mmu.hh" #include "base/addr_range.hh" +#include "base/addr_range_map.hh" #include "base/types.hh" #include "mem/packet.hh" #include "params/BasePMAChecker.hh" @@ -61,7 +63,10 @@ class BasePMAChecker : public SimObject { public: BasePMAChecker(const BasePMACheckerParams ¶ms) : SimObject(params) {}; - virtual void check(const RequestPtr &req) = 0; + virtual Fault check( + const RequestPtr &req, BaseMMU::Mode mode, Addr vaddr = 0) = 0; + virtual Fault checkVAddrAlignment( + const RequestPtr &req, BaseMMU::Mode mode) = 0; virtual void takeOverFrom(BasePMAChecker *old) = 0; }; @@ -84,15 +89,51 @@ class PMAChecker : public BasePMAChecker } PMAChecker(const Params ¶ms); - AddrRangeList uncacheable; + /* + * Check if any exception for given address + */ + Fault check( + const RequestPtr &req, BaseMMU::Mode mode, Addr vaddr = 0) override; - void check(const RequestPtr &req) override; + /* + * Check alignment for virtual address + */ + Fault checkVAddrAlignment( + const RequestPtr &req, BaseMMU::Mode mode) override; bool isUncacheable(const AddrRange &range); bool isUncacheable(const Addr &addr, const unsigned size); bool isUncacheable(PacketPtr pkt); void takeOverFrom(BasePMAChecker *old) override; + + protected: + /* + * Check alignment for physical address + */ + Fault checkPAddrAlignment( + const RequestPtr &req, BaseMMU::Mode mode, Addr vaddr); + + /* + * Create address-misaligned exception based on the MMU mode and + * virtual address + */ + Fault createMisalignFault(Addr vaddr, BaseMMU::Mode mode); + + inline bool addressAlign(const Addr addr, const Addr size); + + /* + * Check if the address range support misaligned load/store + */ + inline bool misalignedSupport(const AddrRange &range); + + /* + * Check if there is any region support misaligned load/store + */ + inline bool hasMisaligned(); + + AddrRangeList uncacheable; + AddrRangeMap misaligned; }; } // namespace RiscvISA diff --git a/src/arch/riscv/tlb.cc b/src/arch/riscv/tlb.cc index d1f5e5a6c1..dc9201d3e4 100644 --- a/src/arch/riscv/tlb.cc +++ b/src/arch/riscv/tlb.cc @@ -343,31 +343,36 @@ TLB::translate(const RequestPtr &req, ThreadContext *tc, PrivilegeMode pmode = getMemPriv(tc, mode); MISA misa = tc->readMiscRegNoEffect(MISCREG_ISA); SATP satp = tc->readMiscReg(MISCREG_SATP); + Fault fault = NoFault; + + fault = pma->checkVAddrAlignment(req, mode); + if (!misa.rvs || pmode == PrivilegeMode::PRV_M || satp.mode == AddrXlateMode::BARE) { req->setFlags(Request::PHYSICAL); } - Fault fault; - if (req->getFlags() & Request::PHYSICAL) { - /** - * we simply set the virtual address to physical address - */ - req->setPaddr(req->getVaddr()); - fault = NoFault; - } else { - fault = doTranslate(req, tc, translation, mode, delayed); + if (fault == NoFault) { + if (req->getFlags() & Request::PHYSICAL) { + /** + * we simply set the virtual address to physical address + */ + req->setPaddr(req->getVaddr()); + } else { + fault = doTranslate(req, tc, translation, mode, delayed); + } } if (!delayed && fault == NoFault) { - pma->check(req); - // do pmp check if any checking condition is met. // timingFault will be NoFault if pmp checks are // passed, otherwise an address fault will be returned. fault = pmp->pmpCheck(req, mode, pmode, tc); } + if (!delayed && fault == NoFault) { + fault = pma->check(req, mode); + } return fault; } else { // In the O3 CPU model, sometimes a memory access will be speculatively