arch-riscv: Add risc-v vector ext v1.0 mem insts support
* TODOs: + Vector Segment Load/Store + Vector Fault-only-first Load Change-Id: I2815c76404e62babab7e9466e4ea33ea87e66e75 Co-authored-by: Yang Liu <numbksco@gmail.com> Co-authored-by: Fan Yang <1209202421@qq.com> Co-authored-by: Jerin Joy <joy@rivosinc.com>
This commit is contained in:
@@ -122,5 +122,178 @@ VConfOp::generateZimmDisassembly() const
|
||||
return s.str();
|
||||
}
|
||||
|
||||
std::string VleMicroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(destRegIdx(0)) << ", "
|
||||
<< VLENB * microIdx << '(' << registerName(srcRegIdx(0)) << ')' << ", "
|
||||
<< registerName(srcRegIdx(1));
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VlWholeMicroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(destRegIdx(0)) << ", "
|
||||
<< VLENB * microIdx << '(' << registerName(srcRegIdx(0)) << ')';
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VseMicroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(srcRegIdx(1)) << ", "
|
||||
<< VLENB * microIdx << '(' << registerName(srcRegIdx(0)) << ')';
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VsWholeMicroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(srcRegIdx(1)) << ", "
|
||||
<< VLENB * microIdx << '(' << registerName(srcRegIdx(0)) << ')';
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VleMacroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(destRegIdx(0)) << ", " <<
|
||||
'(' << registerName(srcRegIdx(0)) << ')';
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VlWholeMacroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(destRegIdx(0)) << ", " <<
|
||||
'(' << registerName(srcRegIdx(0)) << ')';
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VseMacroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(srcRegIdx(1)) << ", " <<
|
||||
'(' << registerName(srcRegIdx(0)) << ')';
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VsWholeMacroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(srcRegIdx(1)) << ", " <<
|
||||
'(' << registerName(srcRegIdx(0)) << ')';
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VlStrideMacroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(destRegIdx(0)) << ", " <<
|
||||
'(' << registerName(srcRegIdx(0)) << ')' <<
|
||||
", " << registerName(srcRegIdx(1));
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VlStrideMicroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(destRegIdx(0)) << ", " <<
|
||||
'(' << registerName(srcRegIdx(0)) << ')' <<
|
||||
", "<< registerName(srcRegIdx(1));
|
||||
if (microIdx != 0 || machInst.vtype8.vma == 0 || machInst.vtype8.vta == 0)
|
||||
ss << ", " << registerName(srcRegIdx(2));
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VsStrideMacroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(srcRegIdx(2)) << ", " <<
|
||||
'(' << registerName(srcRegIdx(0)) << ')' <<
|
||||
", " << registerName(srcRegIdx(1));
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VsStrideMicroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(srcRegIdx(2)) << ", " <<
|
||||
'(' << registerName(srcRegIdx(0)) << ')' <<
|
||||
", "<< registerName(srcRegIdx(1));
|
||||
if (microIdx != 0 || machInst.vtype8.vma == 0 || machInst.vtype8.vta == 0)
|
||||
ss << ", " << registerName(srcRegIdx(2));
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VlIndexMacroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(destRegIdx(0)) << ", "
|
||||
<< '(' << registerName(srcRegIdx(0)) << "),"
|
||||
<< registerName(srcRegIdx(1));
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VlIndexMicroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' '
|
||||
<< registerName(destRegIdx(0)) << "[" << uint16_t(vdElemIdx) << "], "
|
||||
<< '(' << registerName(srcRegIdx(0)) << "), "
|
||||
<< registerName(srcRegIdx(1)) << "[" << uint16_t(vs2ElemIdx) << "]";
|
||||
if (microIdx != 0 || machInst.vtype8.vma == 0 || machInst.vtype8.vta == 0)
|
||||
ss << ", " << registerName(srcRegIdx(2));
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VsIndexMacroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' ' << registerName(srcRegIdx(2)) << ", "
|
||||
<< '(' << registerName(srcRegIdx(0)) << "),"
|
||||
<< registerName(srcRegIdx(1));
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string VsIndexMicroInst::generateDisassembly(Addr pc,
|
||||
const loader::SymbolTable *symtab) const
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic << ' '
|
||||
<< registerName(srcRegIdx(2)) << "[" << uint16_t(vs3ElemIdx) << "], "
|
||||
<< '(' << registerName(srcRegIdx(0)) << "), "
|
||||
<< registerName(srcRegIdx(1)) << "[" << uint16_t(vs2ElemIdx) << "]";
|
||||
if (!machInst.vm) ss << ", v0.t";
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
} // namespace RiscvISA
|
||||
} // namespace gem5
|
||||
|
||||
@@ -80,6 +80,347 @@ class VConfOp : public RiscvStaticInst
|
||||
std::string generateZimmDisassembly() const;
|
||||
};
|
||||
|
||||
inline uint8_t checked_vtype(bool vill, uint8_t vtype) {
|
||||
panic_if(vill, "vill has been set");
|
||||
const uint8_t vsew = bits(vtype, 5, 3);
|
||||
panic_if(vsew >= 0b100, "vsew: %#x not supported", vsew);
|
||||
const uint8_t vlmul = bits(vtype, 2, 0);
|
||||
panic_if(vlmul == 0b100, "vlmul: %#x not supported", vlmul);
|
||||
return vtype;
|
||||
}
|
||||
|
||||
class VectorMacroInst : public RiscvMacroInst
|
||||
{
|
||||
protected:
|
||||
uint32_t vl;
|
||||
uint8_t vtype;
|
||||
VectorMacroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: RiscvMacroInst(mnem, _machInst, __opClass),
|
||||
vl(_machInst.vl),
|
||||
vtype(checked_vtype(_machInst.vill, _machInst.vtype8))
|
||||
{
|
||||
this->flags[IsVector] = true;
|
||||
}
|
||||
};
|
||||
|
||||
class VectorMicroInst : public RiscvMicroInst
|
||||
{
|
||||
protected:
|
||||
uint8_t microVl;
|
||||
uint8_t microIdx;
|
||||
uint8_t vtype;
|
||||
VectorMicroInst(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
uint8_t _microVl, uint8_t _microIdx)
|
||||
: RiscvMicroInst(mnem, _machInst, __opClass),
|
||||
microVl(_microVl),
|
||||
microIdx(_microIdx),
|
||||
vtype(_machInst.vtype8)
|
||||
{
|
||||
this->flags[IsVector] = true;
|
||||
}
|
||||
};
|
||||
|
||||
class VectorNopMicroInst : public RiscvMicroInst
|
||||
{
|
||||
public:
|
||||
VectorNopMicroInst(ExtMachInst _machInst)
|
||||
: RiscvMicroInst("vnop", _machInst, No_OpClass)
|
||||
{}
|
||||
|
||||
Fault execute(ExecContext* xc, trace::InstRecord* traceData)
|
||||
const override
|
||||
{
|
||||
return NoFault;
|
||||
}
|
||||
|
||||
std::string generateDisassembly(Addr pc, const loader::SymbolTable *symtab)
|
||||
const override
|
||||
{
|
||||
std::stringstream ss;
|
||||
ss << mnemonic;
|
||||
return ss.str();
|
||||
}
|
||||
};
|
||||
|
||||
class VectorArithMicroInst : public VectorMicroInst
|
||||
{
|
||||
protected:
|
||||
VectorArithMicroInst(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, uint8_t _microVl,
|
||||
uint8_t _microIdx)
|
||||
: VectorMicroInst(mnem, _machInst, __opClass, _microVl, _microIdx)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VectorArithMacroInst : public VectorMacroInst
|
||||
{
|
||||
protected:
|
||||
VectorArithMacroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMacroInst(mnem, _machInst, __opClass)
|
||||
{
|
||||
this->flags[IsVector] = true;
|
||||
}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VectorMemMicroInst : public VectorMicroInst
|
||||
{
|
||||
protected:
|
||||
uint32_t offset; // Used to calculate EA.
|
||||
Request::Flags memAccessFlags;
|
||||
|
||||
VectorMemMicroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, uint8_t _microVl, uint8_t _microIdx,
|
||||
uint32_t _offset)
|
||||
: VectorMicroInst(mnem, _machInst, __opClass, _microVl, _microIdx)
|
||||
, offset(_offset)
|
||||
, memAccessFlags(0)
|
||||
{}
|
||||
};
|
||||
|
||||
class VectorMemMacroInst : public VectorMacroInst
|
||||
{
|
||||
protected:
|
||||
VectorMemMacroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMacroInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
};
|
||||
|
||||
class VleMacroInst : public VectorMemMacroInst
|
||||
{
|
||||
protected:
|
||||
VleMacroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMemMacroInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VseMacroInst : public VectorMemMacroInst
|
||||
{
|
||||
protected:
|
||||
VseMacroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMemMacroInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VleMicroInst : public VectorMicroInst
|
||||
{
|
||||
protected:
|
||||
Request::Flags memAccessFlags;
|
||||
|
||||
VleMicroInst(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
uint8_t _microVl, uint8_t _microIdx)
|
||||
: VectorMicroInst(mnem, _machInst, __opClass, _microVl, _microIdx)
|
||||
{
|
||||
this->flags[IsLoad] = true;
|
||||
}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VseMicroInst : public VectorMicroInst
|
||||
{
|
||||
protected:
|
||||
Request::Flags memAccessFlags;
|
||||
|
||||
VseMicroInst(const char *mnem, ExtMachInst _machInst, OpClass __opClass,
|
||||
uint8_t _microVl, uint8_t _microIdx)
|
||||
: VectorMicroInst(mnem, _machInst, __opClass, _microVl, _microIdx)
|
||||
{
|
||||
this->flags[IsStore] = true;
|
||||
}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VlWholeMacroInst : public VectorMemMacroInst
|
||||
{
|
||||
protected:
|
||||
VlWholeMacroInst(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMemMacroInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VlWholeMicroInst : public VectorMicroInst
|
||||
{
|
||||
protected:
|
||||
Request::Flags memAccessFlags;
|
||||
|
||||
VlWholeMicroInst(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, uint8_t _microVl, uint8_t _microIdx)
|
||||
: VectorMicroInst(mnem, _machInst, __opClass, _microVl, _microIdx)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VsWholeMacroInst : public VectorMemMacroInst
|
||||
{
|
||||
protected:
|
||||
VsWholeMacroInst(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMemMacroInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VsWholeMicroInst : public VectorMicroInst
|
||||
{
|
||||
protected:
|
||||
Request::Flags memAccessFlags;
|
||||
|
||||
VsWholeMicroInst(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, uint8_t _microVl, uint8_t _microIdx)
|
||||
: VectorMicroInst(mnem, _machInst, __opClass, _microIdx, _microIdx)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VlStrideMacroInst : public VectorMemMacroInst
|
||||
{
|
||||
protected:
|
||||
VlStrideMacroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMemMacroInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VlStrideMicroInst : public VectorMemMicroInst
|
||||
{
|
||||
protected:
|
||||
uint8_t regIdx;
|
||||
VlStrideMicroInst(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, uint8_t _regIdx,
|
||||
uint8_t _microIdx, uint8_t _microVl)
|
||||
: VectorMemMicroInst(mnem, _machInst, __opClass, _microVl,
|
||||
_microIdx, 0)
|
||||
, regIdx(_regIdx)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VsStrideMacroInst : public VectorMemMacroInst
|
||||
{
|
||||
protected:
|
||||
VsStrideMacroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMemMacroInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VsStrideMicroInst : public VectorMemMicroInst
|
||||
{
|
||||
protected:
|
||||
uint8_t regIdx;
|
||||
VsStrideMicroInst(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, uint8_t _regIdx,
|
||||
uint8_t _microIdx, uint8_t _microVl)
|
||||
: VectorMemMicroInst(mnem, _machInst, __opClass, _microVl,
|
||||
_microIdx, 0)
|
||||
, regIdx(_regIdx)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VlIndexMacroInst : public VectorMemMacroInst
|
||||
{
|
||||
protected:
|
||||
VlIndexMacroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMemMacroInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VlIndexMicroInst : public VectorMemMicroInst
|
||||
{
|
||||
protected:
|
||||
uint8_t vdRegIdx;
|
||||
uint8_t vdElemIdx;
|
||||
uint8_t vs2RegIdx;
|
||||
uint8_t vs2ElemIdx;
|
||||
VlIndexMicroInst(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, uint8_t _vdRegIdx, uint8_t _vdElemIdx,
|
||||
uint8_t _vs2RegIdx, uint8_t _vs2ElemIdx)
|
||||
: VectorMemMicroInst(mnem, _machInst, __opClass, 1,
|
||||
0, 0)
|
||||
, vdRegIdx(_vdRegIdx), vdElemIdx(_vdElemIdx)
|
||||
, vs2RegIdx(_vs2RegIdx), vs2ElemIdx(_vs2ElemIdx)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VsIndexMacroInst : public VectorMemMacroInst
|
||||
{
|
||||
protected:
|
||||
VsIndexMacroInst(const char* mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass)
|
||||
: VectorMemMacroInst(mnem, _machInst, __opClass)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
class VsIndexMicroInst : public VectorMemMicroInst
|
||||
{
|
||||
protected:
|
||||
uint8_t vs3RegIdx;
|
||||
uint8_t vs3ElemIdx;
|
||||
uint8_t vs2RegIdx;
|
||||
uint8_t vs2ElemIdx;
|
||||
VsIndexMicroInst(const char *mnem, ExtMachInst _machInst,
|
||||
OpClass __opClass, uint8_t _vs3RegIdx, uint8_t _vs3ElemIdx,
|
||||
uint8_t _vs2RegIdx, uint8_t _vs2ElemIdx)
|
||||
: VectorMemMicroInst(mnem, _machInst, __opClass, 1, 0, 0)
|
||||
, vs3RegIdx(_vs3RegIdx), vs3ElemIdx(_vs3ElemIdx)
|
||||
, vs2RegIdx(_vs2RegIdx), vs2ElemIdx(_vs2ElemIdx)
|
||||
{}
|
||||
|
||||
std::string generateDisassembly(
|
||||
Addr pc, const loader::SymbolTable *symtab) const override;
|
||||
};
|
||||
|
||||
|
||||
} // namespace RiscvISA
|
||||
} // namespace gem5
|
||||
|
||||
@@ -500,6 +500,174 @@ decode QUADRANT default Unknown::unknown() {
|
||||
Fd_bits = fd.v;
|
||||
}}, inst_flags=FloatMemReadOp);
|
||||
}
|
||||
|
||||
0x0: decode MOP {
|
||||
0x0: decode LUMOP {
|
||||
0x00: VleOp::vle8_v({{
|
||||
if ((machInst.vm || elem_mask(v0, ei)) &&
|
||||
i < this->microVl) {
|
||||
Vd_ub[i] = Mem_vc.as<uint8_t>()[i];
|
||||
} else {
|
||||
Vd_ub[i] = Vs2_ub[i];
|
||||
}
|
||||
}}, inst_flags=VectorUnitStrideLoadOp);
|
||||
0x08: decode NF {
|
||||
format VlWholeOp {
|
||||
0x0: vl1re8_v({{
|
||||
Vd_ub[i] = Mem_vc.as<uint8_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x1: vl2re8_v({{
|
||||
Vd_ub[i] = Mem_vc.as<uint8_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x3: vl4re8_v({{
|
||||
Vd_ub[i] = Mem_vc.as<uint8_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x7: vl8re8_v({{
|
||||
Vd_ub[i] = Mem_vc.as<uint8_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
}
|
||||
}
|
||||
0x0b: VlmOp::vlm_v({{
|
||||
Vd_ub[i] = Mem_vc.as<uint8_t>()[i];
|
||||
}}, inst_flags=VectorUnitStrideMaskLoadOp);
|
||||
}
|
||||
0x1: VlIndexOp::vluxei8_v({{
|
||||
Vd_vu[vdElemIdx] = Mem_vc.as<vu>()[0];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_ub[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedLoadOp);
|
||||
0x2: VlStrideOp::vlse8_v({{
|
||||
Vd_ub[microIdx] = Mem_vc.as<uint8_t>()[0];
|
||||
}}, inst_flags=VectorStridedLoadOp);
|
||||
0x3: VlIndexOp::vloxei8_v({{
|
||||
Vd_vu[vdElemIdx] = Mem_vc.as<vu>()[0];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_ub[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedLoadOp);
|
||||
}
|
||||
0x5: decode MOP {
|
||||
0x0: decode LUMOP {
|
||||
0x00: VleOp::vle16_v({{
|
||||
if ((machInst.vm || elem_mask(v0, ei)) &&
|
||||
i < this->microVl) {
|
||||
Vd_uh[i] = Mem_vc.as<uint16_t>()[i];
|
||||
} else {
|
||||
Vd_uh[i] = Vs2_uh[i];
|
||||
}
|
||||
}}, inst_flags=VectorUnitStrideLoadOp);
|
||||
0x08: decode NF {
|
||||
format VlWholeOp {
|
||||
0x0: vl1re16_v({{
|
||||
Vd_uh[i] = Mem_vc.as<uint16_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x1: vl2re16_v({{
|
||||
Vd_uh[i] = Mem_vc.as<uint16_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x3: vl4re16_v({{
|
||||
Vd_uh[i] = Mem_vc.as<uint16_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x7: vl8re16_v({{
|
||||
Vd_uh[i] = Mem_vc.as<uint16_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
}
|
||||
}
|
||||
}
|
||||
0x1: VlIndexOp::vluxei16_v({{
|
||||
Vd_vu[vdElemIdx] = Mem_vc.as<vu>()[0];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_uh[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedLoadOp);
|
||||
0x2: VlStrideOp::vlse16_v({{
|
||||
Vd_uh[microIdx] = Mem_vc.as<uint16_t>()[0];
|
||||
}}, inst_flags=VectorStridedLoadOp);
|
||||
0x3: VlIndexOp::vloxei16_v({{
|
||||
Vd_vu[vdElemIdx] = Mem_vc.as<vu>()[0];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_uh[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedLoadOp);
|
||||
}
|
||||
0x6: decode MOP {
|
||||
0x0: decode LUMOP {
|
||||
0x00: VleOp::vle32_v({{
|
||||
if ((machInst.vm || elem_mask(v0, ei)) &&
|
||||
i < this->microVl) {
|
||||
Vd_uw[i] = Mem_vc.as<uint32_t>()[i];
|
||||
} else {
|
||||
Vd_uw[i] = Vs2_uw[i];
|
||||
}
|
||||
}}, inst_flags=VectorUnitStrideLoadOp);
|
||||
0x08: decode NF {
|
||||
format VlWholeOp {
|
||||
0x0: vl1re32_v({{
|
||||
Vd_uw[i] = Mem_vc.as<uint32_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x1: vl2re32_v({{
|
||||
Vd_uw[i] = Mem_vc.as<uint32_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x3: vl4re32_v({{
|
||||
Vd_uw[i] = Mem_vc.as<uint32_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x7: vl8re32_v({{
|
||||
Vd_uw[i] = Mem_vc.as<uint32_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
}
|
||||
}
|
||||
}
|
||||
0x1: VlIndexOp::vluxei32_v({{
|
||||
Vd_vu[vdElemIdx] = Mem_vc.as<vu>()[0];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_uw[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedLoadOp);
|
||||
0x2: VlStrideOp::vlse32_v({{
|
||||
Vd_uw[microIdx] = Mem_vc.as<uint32_t>()[0];
|
||||
}}, inst_flags=VectorStridedLoadOp);
|
||||
0x3: VlIndexOp::vloxei32_v({{
|
||||
Vd_vu[vdElemIdx] = Mem_vc.as<vu>()[0];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_uw[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedLoadOp);
|
||||
}
|
||||
0x7: decode MOP {
|
||||
0x0: decode LUMOP {
|
||||
0x00: VleOp::vle64_v({{
|
||||
if ((machInst.vm || elem_mask(v0, ei)) &&
|
||||
i < this->microVl) {
|
||||
Vd_ud[i] = Mem_vc.as<uint64_t>()[i];
|
||||
} else {
|
||||
Vd_ud[i] = Vs2_ud[i];
|
||||
}
|
||||
}}, inst_flags=VectorUnitStrideLoadOp);
|
||||
0x08: decode NF {
|
||||
format VlWholeOp {
|
||||
0x0: vl1re64_v({{
|
||||
Vd_ud[i] = Mem_vc.as<uint64_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x1: vl2re64_v({{
|
||||
Vd_ud[i] = Mem_vc.as<uint64_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x3: vl4re64_v({{
|
||||
Vd_ud[i] = Mem_vc.as<uint64_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
0x7: vl8re64_v({{
|
||||
Vd_ud[i] = Mem_vc.as<uint64_t>()[i];
|
||||
}}, inst_flags=VectorWholeRegisterLoadOp);
|
||||
}
|
||||
}
|
||||
}
|
||||
0x1: VlIndexOp::vluxei64_v({{
|
||||
Vd_vu[vdElemIdx] = Mem_vc.as<vu>()[0];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_ud[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedLoadOp);
|
||||
0x2: VlStrideOp::vlse64_v({{
|
||||
Vd_ud[microIdx] = Mem_vc.as<uint64_t>()[0];
|
||||
}}, inst_flags=VectorStridedLoadOp);
|
||||
0x3: VlIndexOp::vloxei64_v({{
|
||||
Vd_vu[vdElemIdx] = Mem_vc.as<vu>()[0];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_ud[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedLoadOp);
|
||||
}
|
||||
}
|
||||
|
||||
0x03: decode FUNCT3 {
|
||||
@@ -806,6 +974,106 @@ decode QUADRANT default Unknown::unknown() {
|
||||
Mem_ud = Fs2_bits;
|
||||
}}, inst_flags=FloatMemWriteOp);
|
||||
}
|
||||
|
||||
0x0: decode MOP {
|
||||
0x0: decode SUMOP {
|
||||
0x00: VseOp::vse8_v({{
|
||||
Mem_vc.as<uint8_t>()[i] = Vs3_ub[i];
|
||||
}}, inst_flags=VectorUnitStrideStoreOp);
|
||||
format VsWholeOp {
|
||||
0x8: decode NF {
|
||||
0x0: vs1r_v({{
|
||||
Mem_vc.as<uint8_t>()[i] = Vs3_ub[i];
|
||||
}}, inst_flags=VectorWholeRegisterStoreOp);
|
||||
0x1: vs2r_v({{
|
||||
Mem_vc.as<uint8_t>()[i] = Vs3_ub[i];
|
||||
}}, inst_flags=VectorWholeRegisterStoreOp);
|
||||
0x3: vs4r_v({{
|
||||
Mem_vc.as<uint8_t>()[i] = Vs3_ub[i];
|
||||
}}, inst_flags=VectorWholeRegisterStoreOp);
|
||||
0x7: vs8r_v({{
|
||||
Mem_vc.as<uint8_t>()[i] = Vs3_ub[i];
|
||||
}}, inst_flags=VectorWholeRegisterStoreOp);
|
||||
}
|
||||
}
|
||||
0x0b: VsmOp::vsm_v({{
|
||||
Mem_vc.as<uint8_t>()[i] = Vs3_ub[i];
|
||||
}}, inst_flags=VectorUnitStrideMaskStoreOp);
|
||||
}
|
||||
0x1: VsIndexOp::vsuxei8_v({{
|
||||
Mem_vc.as<vu>()[0] = Vs3_vu[vs3ElemIdx];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_ub[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedStoreOp);
|
||||
0x2: VsStrideOp::vsse8_v({{
|
||||
Mem_vc.as<uint8_t>()[0] = Vs3_ub[microIdx];
|
||||
}}, inst_flags=VectorStridedStoreOp);
|
||||
0x3: VsIndexOp::vsoxei8_v({{
|
||||
Mem_vc.as<vu>()[0] = Vs3_vu[vs3ElemIdx];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_ub[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedStoreOp);
|
||||
}
|
||||
0x5: decode MOP {
|
||||
0x0: decode SUMOP {
|
||||
0x00: VseOp::vse16_v({{
|
||||
Mem_vc.as<uint16_t>()[i] = Vs3_uh[i];
|
||||
}}, inst_flags=VectorUnitStrideStoreOp);
|
||||
}
|
||||
0x1: VsIndexOp::vsuxei16_v({{
|
||||
Mem_vc.as<vu>()[0] = Vs3_vu[vs3ElemIdx];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_uh[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedStoreOp);
|
||||
0x2: VsStrideOp::vsse16_v({{
|
||||
Mem_vc.as<uint16_t>()[0] = Vs3_uh[microIdx];
|
||||
}}, inst_flags=VectorStridedStoreOp);
|
||||
0x3: VsIndexOp::vsoxei16_v({{
|
||||
Mem_vc.as<vu>()[0] = Vs3_vu[vs3ElemIdx];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_uh[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedStoreOp);
|
||||
}
|
||||
0x6: decode MOP {
|
||||
0x0: decode SUMOP {
|
||||
0x00: VseOp::vse32_v({{
|
||||
Mem_vc.as<uint32_t>()[i] = Vs3_uw[i];
|
||||
}}, inst_flags=VectorUnitStrideStoreOp);
|
||||
}
|
||||
0x1: VsIndexOp::vsuxei32_v({{
|
||||
Mem_vc.as<vu>()[0] = Vs3_vu[vs3ElemIdx];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_uw[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedStoreOp);
|
||||
0x2: VsStrideOp::vsse32_v({{
|
||||
Mem_vc.as<uint32_t>()[0] = Vs3_uw[microIdx];
|
||||
}}, inst_flags=VectorStridedStoreOp);
|
||||
0x3: VsIndexOp::vsoxei32_v({{
|
||||
Mem_vc.as<vu>()[0] = Vs3_vu[vs3ElemIdx];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_uw[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedStoreOp);
|
||||
}
|
||||
0x7: decode MOP {
|
||||
0x0: decode SUMOP {
|
||||
0x00: VseOp::vse64_v({{
|
||||
Mem_vc.as<uint64_t>()[i] = Vs3_ud[i];
|
||||
}}, inst_flags=VectorUnitStrideStoreOp);
|
||||
}
|
||||
0x1: VsIndexOp::vsuxei64_v({{
|
||||
Mem_vc.as<vu>()[0] = Vs3_vu[vs3ElemIdx];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_ud[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedStoreOp);
|
||||
0x2: VsStrideOp::vsse64_v({{
|
||||
Mem_vc.as<uint64_t>()[0] = Vs3_ud[microIdx];
|
||||
}}, inst_flags=VectorStridedStoreOp);
|
||||
0x3: VsIndexOp::vsoxei64_v({{
|
||||
Mem_vc.as<vu>()[0] = Vs3_vu[vs3ElemIdx];
|
||||
}}, {{
|
||||
EA = Rs1 + Vs2_ud[vs2ElemIdx];
|
||||
}}, inst_flags=VectorIndexedStoreOp);
|
||||
}
|
||||
}
|
||||
|
||||
0x0b: decode FUNCT3 {
|
||||
|
||||
@@ -38,6 +38,7 @@
|
||||
##include "amo.isa"
|
||||
##include "bs.isa"
|
||||
##include "vector_conf.isa"
|
||||
##include "vector_mem.isa"
|
||||
|
||||
// Include formats for nonstandard extensions
|
||||
##include "compressed.isa"
|
||||
|
||||
205
src/arch/riscv/isa/formats/vector_mem.isa
Normal file
205
src/arch/riscv/isa/formats/vector_mem.isa
Normal file
@@ -0,0 +1,205 @@
|
||||
// -*- mode:c++ -*-
|
||||
|
||||
// Copyright (c) 2022 PLCT Lab
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met: redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer;
|
||||
// redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution;
|
||||
// neither the name of the copyright holders nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
let {{
|
||||
|
||||
def VMemBase(name, Name, ea_code, memacc_code, mem_flags,
|
||||
inst_flags, base_class, postacc_code='',
|
||||
declare_template_base=VMemMacroDeclare,
|
||||
decode_template=BasicDecode, exec_template_base='',
|
||||
# If it's a macroop, the corresponding microops will be
|
||||
# generated.
|
||||
is_macroop=True):
|
||||
# Make sure flags are in lists (convert to lists if not).
|
||||
mem_flags = makeList(mem_flags)
|
||||
inst_flags = makeList(inst_flags)
|
||||
iop = InstObjParams(name, Name, base_class,
|
||||
{'ea_code': ea_code,
|
||||
'memacc_code': memacc_code,
|
||||
'postacc_code': postacc_code },
|
||||
inst_flags)
|
||||
|
||||
constructTemplate = eval(exec_template_base + 'Constructor')
|
||||
|
||||
header_output = declare_template_base.subst(iop)
|
||||
decoder_output = ''
|
||||
if declare_template_base is not VMemTemplateMacroDeclare:
|
||||
decoder_output += constructTemplate.subst(iop)
|
||||
else:
|
||||
header_output += constructTemplate.subst(iop)
|
||||
decode_block = decode_template.subst(iop)
|
||||
exec_output = ''
|
||||
if not is_macroop:
|
||||
return (header_output, decoder_output, decode_block, exec_output)
|
||||
|
||||
microiop = InstObjParams(name + '_micro',
|
||||
Name + 'Micro',
|
||||
exec_template_base + 'MicroInst',
|
||||
{'ea_code': ea_code,
|
||||
'memacc_code': memacc_code,
|
||||
'postacc_code': postacc_code},
|
||||
inst_flags)
|
||||
|
||||
if mem_flags:
|
||||
mem_flags = [ 'Request::%s' % flag for flag in mem_flags ]
|
||||
s = '\n\tmemAccessFlags = ' + '|'.join(mem_flags) + ';'
|
||||
microiop.constructor += s
|
||||
|
||||
microDeclTemplate = eval(exec_template_base + 'Micro' + 'Declare')
|
||||
microExecTemplate = eval(exec_template_base + 'Micro' + 'Execute')
|
||||
microInitTemplate = eval(exec_template_base + 'Micro' + 'InitiateAcc')
|
||||
microCompTemplate = eval(exec_template_base + 'Micro' + 'CompleteAcc')
|
||||
header_output = microDeclTemplate.subst(microiop) + header_output
|
||||
micro_exec_output = (microExecTemplate.subst(microiop) +
|
||||
microInitTemplate.subst(microiop) +
|
||||
microCompTemplate.subst(microiop))
|
||||
if declare_template_base is not VMemTemplateMacroDeclare:
|
||||
exec_output += micro_exec_output
|
||||
else:
|
||||
header_output += micro_exec_output
|
||||
|
||||
return (header_output, decoder_output, decode_block, exec_output)
|
||||
|
||||
}};
|
||||
|
||||
def format VleOp(
|
||||
memacc_code,
|
||||
ea_code={{ EA = Rs1 + VLENB * microIdx; }},
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VleMacroInst', exec_template_base='Vle')
|
||||
}};
|
||||
|
||||
def format VseOp(
|
||||
memacc_code,
|
||||
ea_code={{ EA = Rs1 + VLENB * microIdx; }},
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VseMacroInst', exec_template_base='Vse')
|
||||
}};
|
||||
|
||||
def format VlmOp(
|
||||
memacc_code,
|
||||
ea_code={{ EA = Rs1; }},
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VleMacroInst', exec_template_base='Vlm', is_macroop=False)
|
||||
}};
|
||||
|
||||
def format VsmOp(
|
||||
memacc_code,
|
||||
ea_code={{ EA = Rs1; }},
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VseMacroInst', exec_template_base='Vsm', is_macroop=False)
|
||||
}};
|
||||
|
||||
def format VlWholeOp(
|
||||
memacc_code,
|
||||
ea_code={{ EA = Rs1 + VLENB * microIdx; }},
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VlWholeMacroInst', exec_template_base='VlWhole')
|
||||
}};
|
||||
|
||||
def format VsWholeOp(
|
||||
memacc_code,
|
||||
ea_code={{ EA = Rs1 + VLENB * microIdx; }},
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VsWholeMacroInst', exec_template_base='VsWhole')
|
||||
}};
|
||||
|
||||
def format VlStrideOp(
|
||||
memacc_code,
|
||||
ea_code={{ EA = Rs1 + Rs2 * (regIdx * VLENB / elem_size + microIdx); }},
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VlStrideMacroInst', exec_template_base='VlStride')
|
||||
}};
|
||||
|
||||
def format VsStrideOp(
|
||||
memacc_code,
|
||||
ea_code={{ EA = Rs1 + Rs2 * (regIdx * VLENB / elem_size + microIdx); }},
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VsStrideMacroInst', exec_template_base='VsStride')
|
||||
}};
|
||||
|
||||
def format VlIndexOp(
|
||||
memacc_code,
|
||||
ea_code,
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VlIndexMacroInst', exec_template_base='VlIndex',
|
||||
declare_template_base=VMemTemplateMacroDeclare,
|
||||
decode_template=VMemTemplateDecodeBlock
|
||||
)
|
||||
}};
|
||||
|
||||
def format VsIndexOp(
|
||||
memacc_code,
|
||||
ea_code,
|
||||
mem_flags=[],
|
||||
inst_flags=[]
|
||||
) {{
|
||||
(header_output, decoder_output, decode_block, exec_output) = \
|
||||
VMemBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags,
|
||||
'VsIndexMacroInst', exec_template_base='VsIndex',
|
||||
declare_template_base=VMemTemplateMacroDeclare,
|
||||
decode_template=VMemTemplateDecodeBlock
|
||||
)
|
||||
}};
|
||||
@@ -46,6 +46,7 @@ output header {{
|
||||
#include <softfloat.h>
|
||||
#include <specialize.h>
|
||||
|
||||
#include "arch/generic/memhelpers.hh"
|
||||
#include "arch/riscv/decoder.hh"
|
||||
#include "arch/riscv/insts/amo.hh"
|
||||
#include "arch/riscv/insts/bs.hh"
|
||||
@@ -55,6 +56,7 @@ output header {{
|
||||
#include "arch/riscv/insts/standard.hh"
|
||||
#include "arch/riscv/insts/static_inst.hh"
|
||||
#include "arch/riscv/insts/unknown.hh"
|
||||
#include "arch/riscv/insts/vector.hh"
|
||||
#include "arch/riscv/interrupts.hh"
|
||||
#include "cpu/static_inst.hh"
|
||||
#include "mem/packet.hh"
|
||||
@@ -68,9 +70,15 @@ output decoder {{
|
||||
#include <limits>
|
||||
#include <string>
|
||||
|
||||
/* riscv softfloat library */
|
||||
#include <internals.h>
|
||||
#include <softfloat.h>
|
||||
#include <specialize.h>
|
||||
|
||||
#include "arch/riscv/decoder.hh"
|
||||
#include "arch/riscv/faults.hh"
|
||||
#include "arch/riscv/mmu.hh"
|
||||
#include "arch/riscv/regs/float.hh"
|
||||
#include "base/cprintf.hh"
|
||||
#include "base/loader/symtab.hh"
|
||||
#include "cpu/thread_context.hh"
|
||||
|
||||
@@ -50,6 +50,9 @@ namespace RiscvISA;
|
||||
//Include the operand_types and operand definitions
|
||||
##include "operands.isa"
|
||||
|
||||
//Include the definitions for the instruction templates
|
||||
##include "templates/templates.isa"
|
||||
|
||||
//Include the definitions for the instruction formats
|
||||
##include "formats/formats.isa"
|
||||
|
||||
|
||||
2
src/arch/riscv/isa/templates/templates.isa
Normal file
2
src/arch/riscv/isa/templates/templates.isa
Normal file
@@ -0,0 +1,2 @@
|
||||
// Include
|
||||
##include "vector_mem.isa"
|
||||
1349
src/arch/riscv/isa/templates/vector_mem.isa
Normal file
1349
src/arch/riscv/isa/templates/vector_mem.isa
Normal file
File diff suppressed because it is too large
Load Diff
@@ -241,6 +241,61 @@ remu(T rs1, T rs2)
|
||||
return (rs2 == 0) ? rs1 : rs1 % rs2;
|
||||
}
|
||||
|
||||
/*
|
||||
* Encode LMUL to lmul as follows:
|
||||
* LMUL vlmul lmul
|
||||
* 1 000 0
|
||||
* 2 001 1
|
||||
* 4 010 2
|
||||
* 8 011 3
|
||||
* - 100 -
|
||||
* 1/8 101 -3
|
||||
* 1/4 110 -2
|
||||
* 1/2 111 -1
|
||||
*
|
||||
* then, we can calculate VLMAX = vlen >> (vsew + 3 - lmul)
|
||||
* e.g. vlen = 256 bits, SEW = 16, LMUL = 1/8
|
||||
* => VLMAX = vlen >> (1 + 3 - (-3))
|
||||
* = 256 >> 7
|
||||
* = 2
|
||||
* Ref: https://github.com/qemu/qemu/blob/5e9d14f2/target/riscv/cpu.h
|
||||
*/
|
||||
inline uint64_t
|
||||
vtype_VLMAX(const uint64_t vtype, const bool per_reg = false)
|
||||
{
|
||||
int64_t lmul = (int64_t)sext<3>(bits(vtype, 2, 0));
|
||||
lmul = per_reg ? std::min<int64_t>(0, lmul) : lmul;
|
||||
int64_t vsew = bits(vtype, 5, 3);
|
||||
return gem5::RiscvISA::VLEN >> (vsew + 3 - lmul);
|
||||
}
|
||||
|
||||
inline uint64_t
|
||||
width_EEW(uint64_t width)
|
||||
{
|
||||
switch (width) {
|
||||
case 0b000: return 8;
|
||||
case 0b101: return 16;
|
||||
case 0b110: return 32;
|
||||
case 0b111: return 64;
|
||||
default: GEM5_UNREACHABLE;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Spec Section 4.5
|
||||
* Ref:
|
||||
* https://github.com/qemu/qemu/blob/c7d773ae/target/riscv/vector_helper.c
|
||||
*/
|
||||
template<typename T>
|
||||
inline int
|
||||
elem_mask(const T* vs, const int index)
|
||||
{
|
||||
static_assert(std::is_integral_v<T>);
|
||||
int idx = index / (sizeof(T)*8);
|
||||
int pos = index % (sizeof(T)*8);
|
||||
return (vs[idx] >> pos) & 1;
|
||||
}
|
||||
|
||||
} // namespace RiscvISA
|
||||
} // namespace gem5
|
||||
|
||||
|
||||
Reference in New Issue
Block a user