diff --git a/src/arch/arm/mmu.cc b/src/arch/arm/mmu.cc index 8fe39204d3..604f2ee43b 100644 --- a/src/arch/arm/mmu.cc +++ b/src/arch/arm/mmu.cc @@ -178,10 +178,9 @@ MMU::translateFunctional(ThreadContext *tc, Addr va, Addr &pa) lookup_data.asn = state.asid; lookup_data.ignoreAsn = false; lookup_data.vmid = state.vmid; - lookup_data.hyp = state.isHyp; lookup_data.secure = state.isSecure; lookup_data.functional = true; - lookup_data.targetEL = state.aarch64 ? state.aarch64EL : EL1; + lookup_data.targetEL = state.aarch64EL; lookup_data.inHost = false; lookup_data.mode = BaseMMU::Read; @@ -839,9 +838,7 @@ MMU::translateMmuOff(ThreadContext *tc, const RequestPtr &req, Mode mode, bool dc = (HaveExt(tc, ArmExtension::FEAT_VHE) && state.hcr.e2h == 1 && state.hcr.tge == 1) ? 0: state.hcr.dc; bool i_cacheability = state.sctlr.i && !state.sctlr.m; - if (state.isStage2 || !dc || state.isSecure || - (state.isHyp && !(tran_type & S1CTran))) { - + if (state.isStage2 || !dc || state.aarch64EL == EL2) { temp_te.mtype = is_fetch ? TlbEntry::MemoryType::Normal : TlbEntry::MemoryType::StronglyOrdered; temp_te.innerAttrs = i_cacheability? 0x2: 0x0; @@ -1217,8 +1214,6 @@ MMU::CachedState::updateMiscReg(ThreadContext *tc, ELIs64(tc, EL2) : ELIs64(tc, aarch64EL == EL0 ? EL1 : aarch64EL); - isHyp = aarch64EL == EL2; - if (aarch64) { // AArch64 // determine EL we need to translate in switch (aarch64EL) { @@ -1288,7 +1283,6 @@ MMU::CachedState::updateMiscReg(ThreadContext *tc, if (hcr.e2h == 1 && (aarch64EL == EL2 || (hcr.tge ==1 && aarch64EL == EL0))) { - isHyp = true; directToStage2 = false; stage2Req = false; stage2DescReq = false; @@ -1296,18 +1290,17 @@ MMU::CachedState::updateMiscReg(ThreadContext *tc, // Work out if we should skip the first stage of translation and go // directly to stage 2. This value is cached so we don't have to // compute it for every translation. - bool sec = !isSecure || (isSecure && IsSecureEL2Enabled(tc)); + const bool el2_enabled = EL2Enabled(tc); stage2Req = isStage2 || - (vm && !isHyp && sec && - !(tran_type & S1CTran) && (aarch64EL < EL2) && - !(tran_type & S1E1Tran)); // <--- FIX THIS HACK - stage2DescReq = isStage2 || (vm && !isHyp && sec && - (aarch64EL < EL2)); + (vm && aarch64EL < EL2 && el2_enabled && + !(tran_type & S1CTran) && + !(tran_type & S1E1Tran)); // <--- FIX THIS HACK + stage2DescReq = isStage2 || + (vm && aarch64EL < EL2 && el2_enabled); directToStage2 = !isStage2 && stage2Req && !sctlr.m; } } else { vmid = 0; - isHyp = false; directToStage2 = false; stage2Req = false; stage2DescReq = false; @@ -1338,21 +1331,22 @@ MMU::CachedState::updateMiscReg(ThreadContext *tc, if (mmu->release()->has(ArmExtension::VIRTUALIZATION)) { vmid = bits(tc->readMiscReg(MISCREG_VTTBR), 55, 48); - if (isHyp) { + if (aarch64EL == EL2) { sctlr = tc->readMiscReg(MISCREG_HSCTLR); } // Work out if we should skip the first stage of translation and go // directly to stage 2. This value is cached so we don't have to // compute it for every translation. - bool sec = !isSecure || (isSecure && IsSecureEL2Enabled(tc)); - stage2Req = hcr.vm && !isStage2 && !isHyp && sec && - !(tran_type & S1CTran); - stage2DescReq = hcr.vm && !isStage2 && !isHyp && sec; - directToStage2 = stage2Req && !sctlr.m; + const bool el2_enabled = EL2Enabled(tc); + stage2Req = isStage2 || + (hcr.vm && aarch64EL < EL2 && el2_enabled && + !(tran_type & S1CTran)); + stage2DescReq = isStage2 || + (hcr.vm && aarch64EL < EL2 && el2_enabled); + directToStage2 = !isStage2 && stage2Req && !sctlr.m; } else { vmid = 0; stage2Req = false; - isHyp = false; directToStage2 = false; stage2DescReq = false; } @@ -1404,7 +1398,7 @@ MMU::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, } TlbEntry* -MMU::lookup(Addr va, uint16_t asid, vmid_t vmid, bool hyp, bool secure, +MMU::lookup(Addr va, uint16_t asid, vmid_t vmid, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode) { @@ -1416,7 +1410,6 @@ MMU::lookup(Addr va, uint16_t asid, vmid_t vmid, bool hyp, bool secure, lookup_data.asn = asid; lookup_data.ignoreAsn = ignore_asn; lookup_data.vmid = vmid; - lookup_data.hyp = hyp; lookup_data.secure = secure; lookup_data.functional = functional; lookup_data.targetEL = target_el; @@ -1440,7 +1433,7 @@ MMU::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Addr vaddr_tainted = req->getVaddr(); Addr vaddr = 0; - ExceptionLevel target_el = state.aarch64 ? state.aarch64EL : EL1; + ExceptionLevel target_el = state.aarch64EL; if (state.aarch64) { vaddr = purifyTaggedAddr(vaddr_tainted, tc, target_el, static_cast(state.ttbcr), mode==Execute, state); @@ -1448,7 +1441,7 @@ MMU::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, vaddr = vaddr_tainted; } - *te = lookup(vaddr, state.asid, state.vmid, state.isHyp, is_secure, false, + *te = lookup(vaddr, state.asid, state.vmid, is_secure, false, false, target_el, false, state.isStage2, mode); if (!isCompleteTranslation(*te)) { @@ -1469,7 +1462,7 @@ MMU::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, Fault fault; fault = getTableWalker(mode, state.isStage2)->walk( - req, tc, state.asid, state.vmid, state.isHyp, mode, + req, tc, state.asid, state.vmid, mode, translation, timing, functional, is_secure, tran_type, state.stage2DescReq, *te); @@ -1478,7 +1471,7 @@ MMU::getTE(TlbEntry **te, const RequestPtr &req, ThreadContext *tc, Mode mode, return fault; } - *te = lookup(vaddr, state.asid, state.vmid, state.isHyp, is_secure, + *te = lookup(vaddr, state.asid, state.vmid, is_secure, true, false, target_el, false, state.isStage2, mode); assert(*te); } diff --git a/src/arch/arm/mmu.hh b/src/arch/arm/mmu.hh index de7dcea30c..9c59cd0026 100644 --- a/src/arch/arm/mmu.hh +++ b/src/arch/arm/mmu.hh @@ -149,7 +149,6 @@ class MMU : public BaseMMU scr = rhs.scr; isPriv = rhs.isPriv; isSecure = rhs.isSecure; - isHyp = rhs.isHyp; ttbcr = rhs.ttbcr; asid = rhs.asid; vmid = rhs.vmid; @@ -184,7 +183,6 @@ class MMU : public BaseMMU SCR scr = 0; bool isPriv = false; bool isSecure = false; - bool isHyp = false; TTBCR ttbcr = 0; uint16_t asid = 0; vmid_t vmid = 0; @@ -398,7 +396,6 @@ class MMU : public BaseMMU * @param asn context id/address space id to use * @param vmid The virtual machine ID used for stage 2 translation * @param secure if the lookup is secure - * @param hyp if the lookup is done from hyp mode * @param functional if the lookup should modify state * @param ignore_asn if on lookup asn should be ignored * @param target_el selecting the translation regime @@ -406,7 +403,7 @@ class MMU : public BaseMMU * @param mode to differentiate between read/writes/fetches. * @return pointer to TLB entry if it exists */ - TlbEntry *lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool hyp, + TlbEntry *lookup(Addr vpn, uint16_t asn, vmid_t vmid, bool secure, bool functional, bool ignore_asn, ExceptionLevel target_el, bool in_host, bool stage2, BaseMMU::Mode mode); diff --git a/src/arch/arm/pagetable.hh b/src/arch/arm/pagetable.hh index a1e9028e8f..45652c4a1d 100644 --- a/src/arch/arm/pagetable.hh +++ b/src/arch/arm/pagetable.hh @@ -198,8 +198,6 @@ struct TlbEntry : public Serializable bool ignoreAsn = false; // The virtual machine ID used for stage 2 translation vmid_t vmid = 0; - // if the lookup is done from hyp mode - bool hyp = false; // if the lookup is secure bool secure = false; // if the lookup should modify state @@ -238,7 +236,6 @@ struct TlbEntry : public Serializable // True if the long descriptor format is used for this entry (LPAE only) bool longDescFormat; // @todo use this in the update attribute bethod - bool isHyp; bool global; bool valid; @@ -273,7 +270,7 @@ struct TlbEntry : public Serializable asid(_asn), vmid(0), tg(Grain4KB), N(0), innerAttrs(0), outerAttrs(0), ap(read_only ? 0x3 : 0), hap(0x3), domain(DomainType::Client), mtype(MemoryType::StronglyOrdered), - longDescFormat(false), isHyp(false), global(false), valid(true), + longDescFormat(false), global(false), valid(true), ns(true), nstid(true), el(EL0), type(TypeTLB::unified), partial(false), nonCacheable(uncacheable), @@ -291,7 +288,7 @@ struct TlbEntry : public Serializable asid(0), vmid(0), tg(ReservedGrain), N(0), innerAttrs(0), outerAttrs(0), ap(0), hap(0x3), domain(DomainType::Client), mtype(MemoryType::StronglyOrdered), - longDescFormat(false), isHyp(false), global(false), valid(false), + longDescFormat(false), global(false), valid(false), ns(true), nstid(true), el(EL0), type(TypeTLB::unified), partial(false), nonCacheable(false), shareable(false), outerShareable(false), xn(0), pxn(0) @@ -332,7 +329,7 @@ struct TlbEntry : public Serializable { bool match = false; if (valid && matchAddress(lookup) && - (lookup.secure == !nstid) && (lookup.hyp == isHyp)) + lookup.secure == !nstid) { match = checkELMatch(lookup.targetEL, lookup.inHost); @@ -424,7 +421,7 @@ struct TlbEntry : public Serializable { return csprintf("%#x, asn %d vmn %d hyp %d ppn %#x size: %#x ap:%d " "ns:%d nstid:%d g:%d el:%d", vpn << N, asid, vmid, - isHyp, pfn << N, size, ap, ns, nstid, global, el); + el == EL2, pfn << N, size, ap, ns, nstid, global, el); } void @@ -436,7 +433,6 @@ struct TlbEntry : public Serializable SERIALIZE_SCALAR(vpn); SERIALIZE_SCALAR(asid); SERIALIZE_SCALAR(vmid); - SERIALIZE_SCALAR(isHyp); SERIALIZE_SCALAR(N); SERIALIZE_SCALAR(global); SERIALIZE_SCALAR(valid); @@ -467,7 +463,6 @@ struct TlbEntry : public Serializable UNSERIALIZE_SCALAR(vpn); UNSERIALIZE_SCALAR(asid); UNSERIALIZE_SCALAR(vmid); - UNSERIALIZE_SCALAR(isHyp); UNSERIALIZE_SCALAR(N); UNSERIALIZE_SCALAR(global); UNSERIALIZE_SCALAR(valid); diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc index 5938755d86..261c0d6c60 100644 --- a/src/arch/arm/table_walker.cc +++ b/src/arch/arm/table_walker.cc @@ -124,7 +124,7 @@ TableWalker::setMmu(MMU *_mmu) TableWalker::WalkerState::WalkerState() : tc(nullptr), aarch64(false), el(EL0), physAddrRange(0), req(nullptr), - asid(0), vmid(0), isHyp(false), transState(nullptr), + asid(0), vmid(0), transState(nullptr), vaddr(0), vaddr_tainted(0), sctlr(0), scr(0), cpsr(0), tcr(0), htcr(0), hcr(0), vtcr(0), @@ -288,7 +288,7 @@ TableWalker::drainResume() Fault TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid, - vmid_t _vmid, bool _isHyp, MMU::Mode _mode, + vmid_t _vmid, MMU::Mode _mode, MMU::Translation *_trans, bool _timing, bool _functional, bool secure, MMU::ArmTranslationType tranType, bool _stage2Req, const TlbEntry *walk_entry) @@ -339,7 +339,9 @@ TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid, currState->aarch64 = ELIs64(_tc, EL2); } else { currState->el = - MMU::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), tranType); + MMU::tranTypeEL(_tc->readMiscReg(MISCREG_CPSR), + _tc->readMiscReg(MISCREG_SCR_EL3), + tranType); currState->aarch64 = ELIs64(_tc, currState->el == EL0 ? EL1 : currState->el); } @@ -353,7 +355,6 @@ TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid, currState->fault = NoFault; currState->asid = _asid; currState->vmid = _vmid; - currState->isHyp = _isHyp; currState->timing = _timing; currState->functional = _functional; currState->mode = _mode; @@ -429,7 +430,8 @@ TableWalker::walk(const RequestPtr &_req, ThreadContext *_tc, uint16_t _asid, currState->stage2Req = _stage2Req && !isStage2; - bool long_desc_format = currState->aarch64 || _isHyp || isStage2 || + bool hyp = currState->el == EL2; + bool long_desc_format = currState->aarch64 || hyp || isStage2 || longDescFormatInUse(currState->tc); if (long_desc_format) { @@ -492,7 +494,7 @@ TableWalker::processWalkWrapper() // Check if a previous walk filled this request already // @TODO Should this always be the TLB or should we look in the stage2 TLB? TlbEntry* te = mmu->lookup(currState->vaddr, currState->asid, - currState->vmid, currState->isHyp, currState->isSecure, true, false, + currState->vmid, currState->isSecure, true, false, currState->el, false, isStage2, currState->mode); // Check if we still need to have a walk for this request. If the requesting @@ -513,8 +515,9 @@ TableWalker::processWalkWrapper() Fault f; if (currState->aarch64) f = processWalkAArch64(); - else if (longDescFormatInUse(currState->tc) || - currState->isHyp || isStage2) + else if (bool hyp = currState->el == EL2; + longDescFormatInUse(currState->tc) || + hyp || isStage2) f = processWalkLPAE(); else f = processWalk(); @@ -563,7 +566,7 @@ TableWalker::processWalkWrapper() if (pendingQueue.size()) { currState = pendingQueue.front(); te = mmu->lookup(currState->vaddr, currState->asid, - currState->vmid, currState->isHyp, currState->isSecure, true, + currState->vmid, currState->isSecure, true, false, currState->el, false, isStage2, currState->mode); } else { // Terminate the loop, nothing more to do @@ -713,7 +716,7 @@ TableWalker::processWalkLPAE() start_lookup_level = currState->vtcr.sl0 ? LookupLevel::L1 : LookupLevel::L2; currState->isUncacheable = currState->vtcr.irgn0 == 0; - } else if (currState->isHyp) { + } else if (currState->el == EL2) { DPRINTF(TLB, " - Selecting HTTBR (long-desc.)\n"); ttbr = currState->tc->readMiscReg(MISCREG_HTTBR); tsz = currState->htcr.t0sz; @@ -2301,7 +2304,6 @@ TableWalker::insertPartialTableEntry(LongDescriptor &descriptor) // to differentiate translation contexts te.global = !mmu->hasUnprivRegime( currState->el, currState->hcr.e2h); - te.isHyp = currState->isHyp; te.asid = currState->asid; te.vmid = currState->vmid; te.N = descriptor.offsetBits(); @@ -2315,10 +2317,7 @@ TableWalker::insertPartialTableEntry(LongDescriptor &descriptor) te.nstid = !currState->isSecure; te.type = TypeTLB::unified; - if (currState->aarch64) - te.el = currState->el; - else - te.el = EL1; + te.el = currState->el; te.xn = currState->xnTable; te.pxn = currState->pxnTable; @@ -2330,8 +2329,8 @@ TableWalker::insertPartialTableEntry(LongDescriptor &descriptor) te.N, te.pfn, te.size, te.global, te.valid); DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d " "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn, - te.ap, static_cast(te.domain), te.asid, te.vmid, te.isHyp, - te.nonCacheable, te.ns); + te.ap, static_cast(te.domain), te.asid, te.vmid, + te.el == EL2, te.nonCacheable, te.ns); DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n", descriptor.lookupLevel, static_cast(descriptor.domain()), descriptor.getRawData()); @@ -2349,7 +2348,6 @@ TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor) // Create and fill a new page table entry te.valid = true; te.longDescFormat = long_descriptor; - te.isHyp = currState->isHyp; te.asid = currState->asid; te.vmid = currState->vmid; te.N = descriptor.offsetBits(); @@ -2364,10 +2362,7 @@ TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor) te.type = currState->mode == BaseMMU::Execute ? TypeTLB::instruction : TypeTLB::data; - if (currState->aarch64) - te.el = currState->el; - else - te.el = EL1; + te.el = currState->el; stats.pageSizes[pageSizeNtoStatBin(te.N)]++; stats.requestOrigin[COMPLETED][currState->isFetch]++; @@ -2405,8 +2400,8 @@ TableWalker::insertTableEntry(DescriptorBase &descriptor, bool long_descriptor) DPRINTF(TLB, " - N:%d pfn:%#x size:%#x global:%d valid:%d\n", te.N, te.pfn, te.size, te.global, te.valid); DPRINTF(TLB, " - vpn:%#x xn:%d pxn:%d ap:%d domain:%d asid:%d " - "vmid:%d hyp:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn, - te.ap, static_cast(te.domain), te.asid, te.vmid, te.isHyp, + "vmid:%d nc:%d ns:%d\n", te.vpn, te.xn, te.pxn, + te.ap, static_cast(te.domain), te.asid, te.vmid, te.nonCacheable, te.ns); DPRINTF(TLB, " - domain from L%d desc:%d data:%#x\n", descriptor.lookupLevel, static_cast(descriptor.domain()), diff --git a/src/arch/arm/table_walker.hh b/src/arch/arm/table_walker.hh index b511fd44d0..66a276d661 100644 --- a/src/arch/arm/table_walker.hh +++ b/src/arch/arm/table_walker.hh @@ -822,7 +822,6 @@ class TableWalker : public ClockedObject /** ASID that we're servicing the request under */ uint16_t asid; vmid_t vmid; - bool isHyp; /** Translation state for delayed requests */ BaseMMU::Translation *transState; @@ -1105,7 +1104,7 @@ class TableWalker : public ClockedObject Fault walk(const RequestPtr &req, ThreadContext *tc, uint16_t asid, vmid_t _vmid, - bool hyp, BaseMMU::Mode mode, BaseMMU::Translation *_trans, + BaseMMU::Mode mode, BaseMMU::Translation *_trans, bool timing, bool functional, bool secure, MMU::ArmTranslationType tran_type, bool stage2, const TlbEntry *walk_entry); diff --git a/src/arch/arm/tlb.cc b/src/arch/arm/tlb.cc index e2979f5c7c..a594766d25 100644 --- a/src/arch/arm/tlb.cc +++ b/src/arch/arm/tlb.cc @@ -164,7 +164,7 @@ TLB::lookup(const Lookup &lookup_data) "ppn %#x size: %#x pa: %#x ap:%d ns:%d nstid:%d g:%d asid: %d " "el: %d\n", lookup_data.va, lookup_data.asn, retval ? "hit" : "miss", - lookup_data.vmid, lookup_data.hyp, lookup_data.secure, + lookup_data.vmid, lookup_data.targetEL == EL2, lookup_data.secure, retval ? retval->pfn : 0, retval ? retval->size : 0, retval ? retval->pAddr(lookup_data.va) : 0, retval ? retval->ap : 0, @@ -246,15 +246,15 @@ TLB::insert(TlbEntry &entry) entry.size, entry.vpn, entry.asid, entry.vmid, entry.N, entry.global, entry.valid, entry.nonCacheable, entry.xn, entry.ap, static_cast(entry.domain), entry.ns, entry.nstid, - entry.isHyp); + entry.el == EL2); if (table[size - 1].valid) DPRINTF(TLB, " - Replacing Valid entry %#x, asn %d vmn %d ppn %#x " - "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp:%d el: %d\n", + "size: %#x ap:%d ns:%d nstid:%d g:%d isHyp: %d el: %d\n", table[size-1].vpn << table[size-1].N, table[size-1].asid, table[size-1].vmid, table[size-1].pfn << table[size-1].N, table[size-1].size, table[size-1].ap, table[size-1].ns, - table[size-1].nstid, table[size-1].global, table[size-1].isHyp, + table[size-1].nstid, table[size-1].global, table[size-1].el == EL2, table[size-1].el); // inserting to MRU position and evicting the LRU one diff --git a/src/arch/arm/tlbi_op.cc b/src/arch/arm/tlbi_op.cc index b49139bf3e..1e945cf33a 100644 --- a/src/arch/arm/tlbi_op.cc +++ b/src/arch/arm/tlbi_op.cc @@ -205,7 +205,6 @@ bool TLBIALLN::match(TlbEntry* te, vmid_t vmid) const { return te->valid && te->nstid && - te->isHyp == (targetEL == EL2) && te->checkELMatch(targetEL, false); } @@ -216,7 +215,6 @@ TLBIMVAA::lookupGen(vmid_t vmid) const lookup_data.va = sext<56>(addr); lookup_data.ignoreAsn = true; lookup_data.vmid = vmid; - lookup_data.hyp = targetEL == EL2; lookup_data.secure = secureLookup; lookup_data.functional = true; lookup_data.targetEL = targetEL; @@ -254,7 +252,6 @@ TLBIMVA::lookupGen(vmid_t vmid) const lookup_data.asn = asid; lookup_data.ignoreAsn = false; lookup_data.vmid = vmid; - lookup_data.hyp = targetEL == EL2; lookup_data.secure = secureLookup; lookup_data.functional = true; lookup_data.targetEL = targetEL;