arch-riscv: Add flag for misaligned access check

Misaligned access is an optional feature of riscv isa, but it is always
enabled in current gem5 model. We add a flag into the ISA class to turn
off the feature.

Note this CL only consider the load/store instruction, but not the
instruction fetch itself. To support instruction address fault, we'll
need to modify the riscv decoder.

Change-Id: Iec4cba0e4fdcb96ce400deb00cff47e56c6d1e93
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/63211
Maintainer: Gabe Black <gabeblack@google.com>
Tested-by: kokoro <noreply+kokoro@google.com>
Reviewed-by: Jason Lowe-Power <power.jg@gmail.com>
Reviewed-by: Gabe Black <gabeblack@google.com>
This commit is contained in:
Jui-Min Lee
2022-09-06 17:01:48 +08:00
committed by Jui-min Lee
parent 17a46091fa
commit e1ba253438
7 changed files with 56 additions and 1 deletions

View File

@@ -38,6 +38,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import Param
from m5.objects.BaseISA import BaseISA
@@ -45,3 +46,7 @@ class RiscvISA(BaseISA):
type = "RiscvISA"
cxx_class = "gem5::RiscvISA::ISA"
cxx_header = "arch/riscv/isa.hh"
check_alignment = Param.Bool(
False, "whether to check memory access alignment"
)

View File

@@ -29,6 +29,7 @@
#include "arch/riscv/insts/static_inst.hh"
#include "arch/riscv/isa.hh"
#include "arch/riscv/pcstate.hh"
#include "arch/riscv/types.hh"
#include "cpu/static_inst.hh"
@@ -39,6 +40,19 @@ namespace gem5
namespace RiscvISA
{
bool
RiscvStaticInst::alignmentOk(ExecContext* xc, Addr addr, Addr size) const
{
if (addr % size == 0) {
return true;
}
// Even if it's not aligned, we're still fine if the check is not enabled.
// We perform the check first because detecting whether the check itself is
// enabled involves multiple indirect references and is quite slow.
auto *isa = static_cast<ISA*>(xc->tcBase()->getIsaPtr());
return !isa->alignmentCheckEnabled();
}
void
RiscvMicroInst::advancePC(PCStateBase &pcState) const
{

View File

@@ -56,6 +56,8 @@ class RiscvStaticInst : public StaticInst
StaticInst(_mnemonic, __opClass), machInst(_machInst)
{}
bool alignmentOk(ExecContext* xc, Addr addr, Addr size) const;
public:
ExtMachInst machInst;

View File

@@ -203,7 +203,8 @@ RegClass ccRegClass(CCRegClass, CCRegClassName, 0, debug::IntRegs);
} // anonymous namespace
ISA::ISA(const Params &p) : BaseISA(p)
ISA::ISA(const Params &p) :
BaseISA(p), checkAlignment(p.check_alignment)
{
_regClasses.push_back(&intRegClass);
_regClasses.push_back(&floatRegClass);

View File

@@ -71,6 +71,7 @@ class ISA : public BaseISA
{
protected:
std::vector<RegVal> miscRegFile;
bool checkAlignment;
bool hpmCounterEnabled(int counter) const;
@@ -106,6 +107,8 @@ class ISA : public BaseISA
return CSRMasks;
}
bool alignmentCheckEnabled() const { return checkAlignment; }
bool inUserMode() const override;
void copyRegsFrom(ThreadContext *src) override;

View File

@@ -241,6 +241,9 @@ def template LoadReservedExecute {{
%(op_rd)s;
%(ea_code)s;
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, LOAD_ADDR_MISALIGNED);
}
{
Fault fault =
readMemAtomicLE(xc, traceData, EA, Mem, memAccessFlags);
@@ -268,6 +271,9 @@ def template StoreCondExecute {{
%(memacc_code)s;
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, STORE_ADDR_MISALIGNED);
}
{
Fault fault =
writeMemAtomicLE(xc, traceData, Mem, EA, memAccessFlags,
@@ -299,6 +305,9 @@ def template AtomicMemOpRMWExecute {{
assert(amo_op);
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, AMO_ADDR_MISALIGNED);
}
{
Fault fault =
amoMemAtomicLE(xc, traceData, Mem, EA, memAccessFlags, amo_op);
@@ -327,6 +336,9 @@ def template LoadReservedInitiateAcc {{
%(op_rd)s;
%(ea_code)s;
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, LOAD_ADDR_MISALIGNED);
}
return initiateMemRead(xc, traceData, EA, Mem, memAccessFlags);
}
}};
@@ -343,6 +355,9 @@ def template StoreCondInitiateAcc {{
%(ea_code)s;
%(memacc_code)s;
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, STORE_ADDR_MISALIGNED);
}
{
Fault fault = writeMemTimingLE(xc, traceData, Mem, EA,
memAccessFlags, nullptr);
@@ -370,6 +385,9 @@ def template AtomicMemOpRMWInitiateAcc {{
assert(amo_op);
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, AMO_ADDR_MISALIGNED);
}
return initiateMemAMO(xc, traceData, EA, Mem, memAccessFlags, amo_op);
}
}};

View File

@@ -106,6 +106,9 @@ def template LoadExecute {{
%(op_rd)s;
%(ea_code)s;
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, LOAD_ADDR_MISALIGNED);
}
{
Fault fault =
readMemAtomicLE(xc, traceData, EA, Mem, memAccessFlags);
@@ -132,6 +135,9 @@ def template LoadInitiateAcc {{
%(op_rd)s;
%(ea_code)s;
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, LOAD_ADDR_MISALIGNED);
}
return initiateMemRead(xc, traceData, EA, Mem, memAccessFlags);
}
}};
@@ -166,6 +172,9 @@ def template StoreExecute {{
%(memacc_code)s;
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, STORE_ADDR_MISALIGNED);
}
{
Fault fault =
writeMemAtomicLE(xc, traceData, Mem, EA, memAccessFlags,
@@ -194,6 +203,9 @@ def template StoreInitiateAcc {{
%(memacc_code)s;
if (!alignmentOk(xc, EA, sizeof(Mem))) {
return std::make_shared<AddressFault>(EA, STORE_ADDR_MISALIGNED);
}
{
Fault fault = writeMemTimingLE(xc, traceData, Mem, EA,
memAccessFlags, nullptr);