arch-arm: Fixed error in choosing vector offset

The old code chose vector offset associated with exceptions taken
to EL3 by incorrectly using "from64", which is associated with the
exception level where the exception was taken from. However, the
offset should depends on the ISA of the lower EL and not of the
starting EL itself, as specified in ARM ARM. This patch corrects
this by implementing the method in AArch64.TakeException in ARM ARM.

Change-Id: I8f7c9aa777c5f2eef9e2d89c36e9daee23f3a822
Reviewed-by: Jack Travaglini <giacomo.travaglini@arm.com>
Reviewed-on: https://gem5-review.googlesource.com/8001
Reviewed-by: Andreas Sandberg <andreas.sandberg@arm.com>
Maintainer: Andreas Sandberg <andreas.sandberg@arm.com>
This commit is contained in:
Chuan Zhu
2018-01-05 10:26:37 +00:00
committed by Giacomo Travaglini
parent ad36e61ce4
commit 73b1160bd8
2 changed files with 31 additions and 14 deletions

View File

@@ -350,7 +350,7 @@ ArmFault::getVector64(ThreadContext *tc)
panic("Invalid target exception level");
break;
}
return vbar + offset64();
return vbar + offset64(tc);
}
MiscRegIndex
@@ -654,6 +654,8 @@ ArmFault::invoke64(ThreadContext *tc, const StaticInstPtr &inst)
ret_addr += spsr.t ? thumbPcElrOffset() : armPcElrOffset();
tc->setMiscReg(elr_idx, ret_addr);
Addr vec_address = getVector64(tc);
// Update process state
OperatingMode64 mode = 0;
mode.spX = 1;
@@ -666,7 +668,7 @@ ArmFault::invoke64(ThreadContext *tc, const StaticInstPtr &inst)
tc->setMiscReg(MISCREG_CPSR, cpsr);
// Set PC to start of exception handler
Addr new_pc = purifyTaggedAddr(getVector64(tc), tc, toEL);
Addr new_pc = purifyTaggedAddr(vec_address, tc, toEL);
DPRINTF(Faults, "Invoking Fault (AArch64 target EL):%s cpsr:%#x PC:%#x "
"elr:%#x newVec: %#x\n", name(), cpsr, curr_pc, ret_addr, new_pc);
PCState pc(new_pc);
@@ -893,6 +895,31 @@ ArmFaultVals<T>::offset(ThreadContext *tc)
return isHypTrap ? 0x14 : vals.offset;
}
template<class T>
FaultOffset
ArmFaultVals<T>::offset64(ThreadContext *tc)
{
if (toEL == fromEL) {
if (opModeIsT(fromMode))
return vals.currELTOffset;
return vals.currELHOffset;
} else {
bool lower_32 = false;
if (toEL == EL3) {
if (!inSecureState(tc) && ArmSystem::haveEL(tc, EL2))
lower_32 = ELIs32(tc, EL2);
else
lower_32 = ELIs32(tc, EL1);
} else {
lower_32 = ELIs32(tc, static_cast<ExceptionLevel>(toEL - 1));
}
if (lower_32)
return vals.lowerEL32Offset;
return vals.lowerEL64Offset;
}
}
// void
// SupervisorCall::setSyndrome64(ThreadContext *tc, MiscRegIndex esr_idx)
// {

View File

@@ -191,7 +191,7 @@ class ArmFault : public FaultBase
virtual void annotate(AnnotationIDs id, uint64_t val) {}
virtual FaultStat& countStat() = 0;
virtual FaultOffset offset(ThreadContext *tc) = 0;
virtual FaultOffset offset64() = 0;
virtual FaultOffset offset64(ThreadContext *tc) = 0;
virtual OperatingMode nextMode() = 0;
virtual bool routeToMonitor(ThreadContext *tc) const = 0;
virtual bool routeToHyp(ThreadContext *tc) const { return false; }
@@ -221,17 +221,7 @@ class ArmFaultVals : public ArmFault
FaultStat & countStat() override { return vals.count; }
FaultOffset offset(ThreadContext *tc) override;
FaultOffset offset64() override {
if (toEL == fromEL) {
if (opModeIsT(fromMode))
return vals.currELTOffset;
return vals.currELHOffset;
} else {
if (from64)
return vals.lowerEL64Offset;
return vals.lowerEL32Offset;
}
}
FaultOffset offset64(ThreadContext *tc) override;
OperatingMode nextMode() override { return vals.nextMode; }
virtual bool routeToMonitor(ThreadContext *tc) const override {