misc: Replace M5_VAR_USED with GEM5_VAR_USED.

Change-Id: I64a874ccd1a9ac0541dfa01971d7d620a98c9d32
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/45231
Tested-by: kokoro <noreply+kokoro@google.com>
Maintainer: Gabe Black <gabe.black@gmail.com>
Reviewed-by: Daniel Carvalho <odanrc@yahoo.com.br>
This commit is contained in:
Gabe Black
2021-05-08 20:23:10 -07:00
parent e55ae090b3
commit fb3befcc6d
69 changed files with 133 additions and 132 deletions

View File

@@ -517,7 +517,7 @@ ArmFault::invoke(ThreadContext *tc, const StaticInstPtr &inst)
saved_cpsr.v = tc->readCCReg(CCREG_V);
saved_cpsr.ge = tc->readCCReg(CCREG_GE);
M5_VAR_USED Addr curPc = tc->pcState().pc();
GEM5_VAR_USED Addr curPc = tc->pcState().pc();
ITSTATE it = tc->pcState().itstate();
saved_cpsr.it2 = it.top6;
saved_cpsr.it1 = it.bottom2;
@@ -525,7 +525,7 @@ ArmFault::invoke(ThreadContext *tc, const StaticInstPtr &inst)
// if we have a valid instruction then use it to annotate this fault with
// extra information. This is used to generate the correct fault syndrome
// information
M5_VAR_USED ArmStaticInst *arm_inst = instrAnnotate(inst);
GEM5_VAR_USED ArmStaticInst *arm_inst = instrAnnotate(inst);
// Ensure Secure state if initially in Monitor mode
if (have_security && saved_cpsr.mode == MODE_MON) {
@@ -703,7 +703,7 @@ ArmFault::invoke64(ThreadContext *tc, const StaticInstPtr &inst)
// If we have a valid instruction then use it to annotate this fault with
// extra information. This is used to generate the correct fault syndrome
// information
M5_VAR_USED ArmStaticInst *arm_inst = instrAnnotate(inst);
GEM5_VAR_USED ArmStaticInst *arm_inst = instrAnnotate(inst);
// Set PC to start of exception handler
Addr new_pc = purifyTaggedAddr(vec_address, tc, toEL, true);
@@ -755,7 +755,7 @@ Reset::getVector(ThreadContext *tc)
Addr base;
// Check for invalid modes
M5_VAR_USED CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
GEM5_VAR_USED CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
assert(ArmSystem::haveSecurity(tc) || cpsr.mode != MODE_MON);
assert(ArmSystem::haveVirtualization(tc) || cpsr.mode != MODE_HYP);
@@ -1069,7 +1069,7 @@ AbortFault<T>::invoke(ThreadContext *tc, const StaticInstPtr &inst)
// See ARM ARM B3-1416
bool override_LPAE = false;
TTBCR ttbcr_s = tc->readMiscReg(MISCREG_TTBCR_S);
M5_VAR_USED TTBCR ttbcr_ns = tc->readMiscReg(MISCREG_TTBCR_NS);
GEM5_VAR_USED TTBCR ttbcr_ns = tc->readMiscReg(MISCREG_TTBCR_NS);
if (ttbcr_s.eae) {
override_LPAE = true;
} else {

View File

@@ -560,7 +560,7 @@ VldSingleOp::VldSingleOp(const char *mnem, ExtMachInst machInst,
unsigned eBytes = (1 << size);
unsigned loadSize = eBytes * elems;
M5_VAR_USED unsigned loadRegs =
GEM5_VAR_USED unsigned loadRegs =
(loadSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
assert(loadRegs > 0 && loadRegs <= 4);
@@ -924,7 +924,7 @@ VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
unsigned eBytes = (1 << size);
unsigned storeSize = eBytes * elems;
M5_VAR_USED unsigned storeRegs =
GEM5_VAR_USED unsigned storeRegs =
(storeSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
assert(storeRegs > 0 && storeRegs <= 4);

View File

@@ -83,16 +83,16 @@ let {{
"logic": '0'
}
immOp2 = "M5_VAR_USED uint64_t secOp = imm;"
sRegOp2 = "M5_VAR_USED uint64_t secOp = " + \
immOp2 = "GEM5_VAR_USED uint64_t secOp = imm;"
sRegOp2 = "GEM5_VAR_USED uint64_t secOp = " + \
"shiftReg64(Op264, shiftAmt, shiftType, intWidth);"
eRegOp2 = "M5_VAR_USED uint64_t secOp = " + \
eRegOp2 = "GEM5_VAR_USED uint64_t secOp = " + \
"extendReg64(Op264, extendType, shiftAmt, intWidth);"
def buildDataWork(mnem, code, flagType, suffix, buildCc, buildNonCc,
base, templateBase):
code = '''
M5_VAR_USED uint64_t resTemp = 0;
GEM5_VAR_USED uint64_t resTemp = 0;
''' + code
ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
Name = mnem.capitalize() + suffix
@@ -577,9 +577,9 @@ let {{
def condCompCode(flagType, op, imm):
ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
opDecl = "M5_VAR_USED uint64_t secOp = imm;"
opDecl = "GEM5_VAR_USED uint64_t secOp = imm;"
if not imm:
opDecl = "M5_VAR_USED uint64_t secOp = Op264;"
opDecl = "GEM5_VAR_USED uint64_t secOp = Op264;"
return opDecl + '''
if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
uint64_t resTemp = Op164 ''' + op + ''' secOp;

View File

@@ -467,7 +467,7 @@ let {{
exec_output = ""
singleSimpleCode = vfpEnabledCheckCode + '''
M5_VAR_USED FPSCR fpscr = (FPSCR) FpscrExc;
GEM5_VAR_USED FPSCR fpscr = (FPSCR) FpscrExc;
FpDest = %(op)s;
'''
singleCode = singleSimpleCode + '''
@@ -488,7 +488,7 @@ let {{
"%(func)s, fpscr.fz, fpscr.dn, fpscr.rMode)"
singleUnaryOp = "unaryOp(fpscr, FpOp1, %(func)s, fpscr.fz, fpscr.rMode)"
doubleCode = vfpEnabledCheckCode + '''
M5_VAR_USED FPSCR fpscr = (FPSCR) FpscrExc;
GEM5_VAR_USED FPSCR fpscr = (FPSCR) FpscrExc;
double dest = %(op)s;
FpDestP0_uw = dblLow(dest);
FpDestP1_uw = dblHi(dest);

View File

@@ -201,7 +201,7 @@ let {{
accEpilogCode = None
# Code that actually handles the access
if self.flavor in ("dprefetch", "iprefetch", "mprefetch"):
accCode = 'M5_VAR_USED uint64_t temp = Mem%s;'
accCode = 'GEM5_VAR_USED uint64_t temp = Mem%s;'
elif self.flavor == "fp":
accEpilogCode = '''
ArmISA::ISA::zeroSveVecRegUpperPart(AA64FpDest,

View File

@@ -128,7 +128,7 @@ let {{
bitMask = (bitMask >> imm1) | (bitMask << (intWidth - imm1));
diff += intWidth;
}
M5_VAR_USED uint64_t topBits = ~mask(diff+1);
GEM5_VAR_USED uint64_t topBits = ~mask(diff+1);
uint64_t result = imm1 == 0 ? Op164 :
(Op164 >> imm1) | (Op164 << (intWidth - imm1));
result &= bitMask;

View File

@@ -2007,7 +2007,7 @@ let {{
destPred.reset();
for (unsigned i = 0; i < eCount; i++) {
const Element& srcElem1 = AA64FpOp1_x[i];
M5_VAR_USED %(src_elem_2_ty)s srcElem2 = %(src_elem_2)s;
GEM5_VAR_USED %(src_elem_2_ty)s srcElem2 = %(src_elem_2)s;
bool destElem = false;
if (tmpPred[i]) {
%(op)s
@@ -2703,7 +2703,7 @@ let {{
CondCodesC = !destPred.lastActive(GpOp, eCount);
CondCodesV = 0;'''
extraPrologCode = '''
M5_VAR_USED auto& destPred = PDest;'''
GEM5_VAR_USED auto& destPred = PDest;'''
baseClass = ('SvePredUnaryWImplicitSrcOp' if predType == PredType.NONE
else 'SvePredUnaryWImplicitSrcPredOp')
iop = ArmInstObjParams(name, 'Sve' + Name, baseClass,
@@ -2722,7 +2722,7 @@ let {{
global header_output, exec_output, decoders
code = sveEnabledCheckCode + op
extraPrologCode = '''
M5_VAR_USED auto& destPred = Ffr;'''
GEM5_VAR_USED auto& destPred = Ffr;'''
baseClass = ('SveWImplicitSrcDstOp' if isSetFfr
else 'SvePredUnaryWImplicitDstOp')
iop = ArmInstObjParams(name, 'Sve' + Name, baseClass,

View File

@@ -1164,7 +1164,7 @@ def template LoadRegConstructor {{
{
%(set_reg_idx_arr)s;
%(constructor)s;
M5_VAR_USED bool conditional = false;
GEM5_VAR_USED bool conditional = false;
if (!(condCode == COND_AL || condCode == COND_UC)) {
conditional = true;
for (int x = 0; x < _numDestRegs; x++) {
@@ -1231,7 +1231,7 @@ def template LoadImmConstructor {{
{
%(set_reg_idx_arr)s;
%(constructor)s;
M5_VAR_USED bool conditional = false;
GEM5_VAR_USED bool conditional = false;
if (!(condCode == COND_AL || condCode == COND_UC)) {
conditional = true;
for (int x = 0; x < _numDestRegs; x++) {

View File

@@ -157,7 +157,7 @@ def template SveContigLoadExecute {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<RegElemType>(xc->tcBase());
@@ -192,7 +192,7 @@ def template SveContigLoadInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<RegElemType>(xc->tcBase());
@@ -217,7 +217,7 @@ def template SveContigLoadCompleteAcc {{
%(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt,
ExecContext *xc, Trace::InstRecord *traceData) const
{
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<RegElemType>(xc->tcBase());
@@ -247,7 +247,7 @@ def template SveContigStoreExecute {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<RegElemType>(xc->tcBase());
@@ -285,7 +285,7 @@ def template SveContigStoreInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<RegElemType>(xc->tcBase());
@@ -329,7 +329,7 @@ def template SveLoadAndReplExecute {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<RegElemType>(xc->tcBase());
@@ -361,7 +361,7 @@ def template SveLoadAndReplInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
%(op_src_decl)s;
%(op_rd)s;
@@ -386,7 +386,7 @@ def template SveLoadAndReplCompleteAcc {{
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<RegElemType>(xc->tcBase());
@@ -585,7 +585,7 @@ def template SveGatherLoadMicroopExecute {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -634,7 +634,7 @@ def template SveGatherLoadMicroopInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
%(op_src_decl)s;
%(op_rd)s;
@@ -675,7 +675,7 @@ def template SveGatherLoadMicroopCompleteAcc {{
%(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt,
ExecContext *xc, Trace::InstRecord *traceData) const
{
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -702,7 +702,7 @@ def template SveScatterStoreMicroopExecute {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -733,7 +733,7 @@ def template SveScatterStoreMicroopInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -806,7 +806,7 @@ def template SveFirstFaultWritebackMicroopExecute {{
%(class_name)s%(tpl_args)s::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -989,7 +989,7 @@ def template SveStructLoadExecute {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<Element>(xc->tcBase());
@@ -1023,7 +1023,7 @@ def template SveStructLoadInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<Element>(xc->tcBase());
@@ -1049,7 +1049,7 @@ def template SveStructLoadCompleteAcc {{
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<Element>(xc->tcBase());
@@ -1082,7 +1082,7 @@ def template SveStructStoreExecute {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<Element>(xc->tcBase());
@@ -1120,7 +1120,7 @@ def template SveStructStoreInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
M5_VAR_USED bool aarch64 = true;
GEM5_VAR_USED bool aarch64 = true;
unsigned eCount =
ArmStaticInst::getCurSveVecLen<Element>(xc->tcBase());

View File

@@ -814,7 +814,7 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
// Cache clean operations require read permissions to the specified VA
bool is_write = !req->isCacheClean() && mode == Write;
bool is_atomic = req->isAtomic();
M5_VAR_USED bool is_priv = isPriv && !(flags & UserMode);
GEM5_VAR_USED bool is_priv = isPriv && !(flags & UserMode);
updateMiscReg(tc, curTranType);

View File

@@ -133,7 +133,7 @@ class Template(object):
if operands.predRead:
myDict['op_decl'] += 'uint8_t _sourceIndex = 0;\n'
if operands.predWrite:
myDict['op_decl'] += 'M5_VAR_USED uint8_t _destIndex = 0;\n'
myDict['op_decl'] += 'GEM5_VAR_USED uint8_t _destIndex = 0;\n'
is_src = lambda op: op.is_src
is_dest = lambda op: op.is_dest

View File

@@ -145,8 +145,8 @@ Interrupts::getInterrupt()
{
assert(checkInterrupts());
M5_VAR_USED StatusReg status = tc->readMiscRegNoEffect(MISCREG_STATUS);
M5_VAR_USED CauseReg cause = tc->readMiscRegNoEffect(MISCREG_CAUSE);
GEM5_VAR_USED StatusReg status = tc->readMiscRegNoEffect(MISCREG_STATUS);
GEM5_VAR_USED CauseReg cause = tc->readMiscRegNoEffect(MISCREG_CAUSE);
DPRINTF(Interrupt, "Interrupt! IM[7:0]=%d IP[7:0]=%d \n",
(unsigned)status.im, (unsigned)cause.ip);

View File

@@ -407,7 +407,7 @@ def template MiscExecute {{
Fault %(class_name)s::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
M5_VAR_USED Addr EA = 0;
GEM5_VAR_USED Addr EA = 0;
Fault fault = NoFault;
%(fp_enable_check)s;

View File

@@ -111,7 +111,7 @@ def template ThreadRegisterExecute {{
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
M5_VAR_USED int64_t data;
GEM5_VAR_USED int64_t data;
%(op_decl)s;
%(op_rd)s;

View File

@@ -112,7 +112,7 @@ def template LoadCompleteAcc {{
ExecContext *xc,
Trace::InstRecord *traceData) const
{
M5_VAR_USED Addr EA;
GEM5_VAR_USED Addr EA;
Fault fault = NoFault;
%(op_decl)s;

View File

@@ -53,7 +53,7 @@
namespace RiscvISA
{
M5_VAR_USED const std::array<const char *, NUM_MISCREGS> MiscRegNames = {{
GEM5_VAR_USED const std::array<const char *, NUM_MISCREGS> MiscRegNames = {{
[MISCREG_PRV] = "PRV",
[MISCREG_ISA] = "ISA",
[MISCREG_VENDORID] = "VENDORID",

View File

@@ -813,7 +813,7 @@ TrapInstruction::invoke(ThreadContext *tc, const StaticInstPtr &inst)
Process *p = tc->getProcessPtr();
M5_VAR_USED SparcProcess *sp = dynamic_cast<SparcProcess *>(p);
GEM5_VAR_USED SparcProcess *sp = dynamic_cast<SparcProcess *>(p);
assert(sp);
auto *workload = dynamic_cast<SEWorkload *>(tc->getSystemPtr()->workload);

View File

@@ -213,7 +213,7 @@ let {{
Macroop * macroop = dynamic_cast<Macroop *>(curMacroop.get());
const ExtMachInst &machInst =
macroop ? macroop->getExtMachInst() : dummyExtMachInst;
M5_VAR_USED const EmulEnv &env =
GEM5_VAR_USED const EmulEnv &env =
macroop ? macroop->getEmulEnv() : dummyEmulEnv;
using namespace RomLabels;
return %s;

View File

@@ -50,7 +50,7 @@ def template MicroRegOpExecute {{
%(op_decl)s;
%(op_rd)s;
M5_VAR_USED RegVal result;
GEM5_VAR_USED RegVal result;
if (%(cond_check)s) {
%(code)s;

View File

@@ -46,7 +46,7 @@
*/
namespace X86ISA
{
M5_VAR_USED const Request::FlagsType SegmentFlagMask = mask(4);
GEM5_VAR_USED const Request::FlagsType SegmentFlagMask = mask(4);
const int FlagShift = 4;
enum FlagBit
{

View File

@@ -141,7 +141,7 @@ ElfObject::ElfObject(ImageFileDataPtr ifd) : ObjectFile(ifd)
"No loadable segments in '%s'. ELF file corrupted?\n",
imageData->filename());
for (M5_VAR_USED auto &seg: image.segments())
for (GEM5_VAR_USED auto &seg: image.segments())
DPRINTFR(Loader, "%s\n", seg);
// We will actually read the sections when we need to load them

View File

@@ -441,7 +441,7 @@ BaseRemoteGDB::detach()
void
BaseRemoteGDB::addThreadContext(ThreadContext *_tc)
{
M5_VAR_USED auto it_success = threads.insert({_tc->contextId(), _tc});
GEM5_VAR_USED auto it_success = threads.insert({_tc->contextId(), _tc});
assert(it_success.second);
// If no ThreadContext is current selected, select this one.
if (!tc)

View File

@@ -67,7 +67,7 @@ Group::regStats()
for (auto &g : statGroups) {
if (Debug::Stats) {
M5_VAR_USED const SimObject *so =
GEM5_VAR_USED const SimObject *so =
dynamic_cast<const SimObject *>(this);
DPRINTF(Stats, "%s: regStats in group %s\n",
so ? so->name() : "?",

View File

@@ -376,7 +376,7 @@ VncServer::checkProtocolVersion()
{
assert(curState == WaitForProtocolVersion);
M5_VAR_USED size_t len;
GEM5_VAR_USED size_t len;
char version_string[13];
// Null terminate the message so it's easier to work with

View File

@@ -390,7 +390,7 @@ void
Fetch1::minorTraceResponseLine(const std::string &name,
Fetch1::FetchRequestPtr response) const
{
M5_VAR_USED const RequestPtr &request = response->request;
GEM5_VAR_USED const RequestPtr &request = response->request;
if (response->packet && response->packet->isError()) {
MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",

View File

@@ -78,7 +78,7 @@ LSQ::LSQRequest::tryToSuppressFault()
SimpleThread &thread = *port.cpu.threads[inst->id.threadId];
TheISA::PCState old_pc = thread.pcState();
ExecContext context(port.cpu, thread, port.execute, inst, zeroReg);
M5_VAR_USED Fault fault = inst->translationFault;
GEM5_VAR_USED Fault fault = inst->translationFault;
// Give the instruction a chance to suppress a translation fault
inst->translationFault = inst->staticInst->initiateAcc(&context, nullptr);
@@ -334,7 +334,7 @@ LSQ::SplitDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
{
port.numAccessesInDTLB--;
M5_VAR_USED unsigned int expected_fragment_index =
GEM5_VAR_USED unsigned int expected_fragment_index =
numTranslatedFragments;
numInTranslationFragments--;
@@ -475,7 +475,7 @@ LSQ::SplitDataRequest::makeFragmentRequests()
for (unsigned int fragment_index = 0; fragment_index < numFragments;
fragment_index++)
{
M5_VAR_USED bool is_last_fragment = false;
GEM5_VAR_USED bool is_last_fragment = false;
if (fragment_addr == base_addr) {
/* First fragment */

View File

@@ -707,7 +707,7 @@ DefaultCommit<Impl>::tick()
// will be active.
_nextStatus = Active;
M5_VAR_USED const DynInstPtr &inst = rob->readHeadInst(tid);
GEM5_VAR_USED const DynInstPtr &inst = rob->readHeadInst(tid);
DPRINTF(Commit,"[tid:%i] Instruction [sn:%llu] PC %s is head of"
" ROB and ready to commit\n",

View File

@@ -683,7 +683,7 @@ LSQ<Impl>::pushRequest(const DynInstPtr& inst, bool isLoad, uint8_t *data,
// This comming request can be either load, store or atomic.
// Atomic request has a corresponding pointer to its atomic memory
// operation
M5_VAR_USED bool isAtomic = !isLoad && amo_op;
GEM5_VAR_USED bool isAtomic = !isLoad && amo_op;
ThreadID tid = cpu->contextToThread(inst->contextId());
auto cacheLineSize = cpu->cacheLineSize();

View File

@@ -270,7 +270,7 @@ MemDepUnit<MemDepPred, Impl>::insert(const DynInstPtr &inst)
} else {
// Otherwise make the instruction dependent on the store/barrier.
DPRINTF(MemDepUnit, "Adding to dependency list\n");
for (M5_VAR_USED auto producing_store : producing_stores)
for (GEM5_VAR_USED auto producing_store : producing_stores)
DPRINTF(MemDepUnit, "\tinst PC %s is dependent on [sn:%lli].\n",
inst->pcState(), producing_store);

View File

@@ -111,7 +111,7 @@ BPredUnit::drainSanityCheck() const
{
// We shouldn't have any outstanding requests when we resume from
// a drained system.
for (M5_VAR_USED const auto& ph : predHist)
for (GEM5_VAR_USED const auto& ph : predHist)
assert(ph.empty());
}

View File

@@ -166,7 +166,7 @@ void
TimingSimpleCPU::switchOut()
{
SimpleExecContext& t_info = *threadInfo[curThread];
M5_VAR_USED SimpleThread* thread = t_info.thread;
GEM5_VAR_USED SimpleThread* thread = t_info.thread;
// hardware transactional memory
// Cannot switch out the CPU in the middle of a transaction
@@ -937,7 +937,7 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
// hardware transactional memory
SimpleExecContext *t_info = threadInfo[curThread];
M5_VAR_USED const bool is_htm_speculative =
GEM5_VAR_USED const bool is_htm_speculative =
t_info->inHtmTransactionalState();
// received a response from the dcache: complete the load or store

View File

@@ -243,7 +243,7 @@ MemTest::tick()
if (cmd < percentReads) {
// start by ensuring there is a reference value if we have not
// seen this address before
M5_VAR_USED uint8_t ref_data = 0;
GEM5_VAR_USED uint8_t ref_data = 0;
auto ref = referenceData.find(req->getPaddr());
if (ref == referenceData.end()) {
referenceData[req->getPaddr()] = 0;

View File

@@ -811,7 +811,7 @@ TraceCPU::ElasticDataGen::printReadyList()
DPRINTF(TraceCPUData, "Printing readyList:\n");
while (itr != readyList.end()) {
auto graph_itr = depGraph.find(itr->seqNum);
M5_VAR_USED GraphNode* node_ptr = graph_itr->second;
GEM5_VAR_USED GraphNode* node_ptr = graph_itr->second;
DPRINTFR(TraceCPUData, "\t%lld(%s), %lld\n", itr->seqNum,
node_ptr->typeToStr(), itr->execTick);
itr++;
@@ -1322,7 +1322,7 @@ TraceCPU::ElasticDataGen::GraphNode::removeDepOnInst(NodeSeqNum done_seq_num)
// If it is not an rob dependency then it must be a register dependency
// If the register dependency is not found, it violates an assumption
// and must be caught by assert.
M5_VAR_USED bool regdep_found = removeRegDep(done_seq_num);
GEM5_VAR_USED bool regdep_found = removeRegDep(done_seq_num);
assert(regdep_found);
}
// Return true if the node is dependency free

View File

@@ -92,7 +92,7 @@ AMDGPUDevice::getAddrRanges() const
Tick
AMDGPUDevice::readConfig(PacketPtr pkt)
{
M5_VAR_USED int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
GEM5_VAR_USED int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
DPRINTF(AMDGPUDevice, "Read Config: from offset: %#x size: %#x "
"data: %#x\n", offset, pkt->getSize(), config.data[offset]);
@@ -102,7 +102,7 @@ AMDGPUDevice::readConfig(PacketPtr pkt)
Tick
AMDGPUDevice::writeConfig(PacketPtr pkt)
{
M5_VAR_USED int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
GEM5_VAR_USED int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
DPRINTF(AMDGPUDevice, "Write Config: from offset: %#x size: %#x "
"data: %#x\n", offset, pkt->getSize(),
pkt->getUintX(ByteOrder::little));

View File

@@ -390,7 +390,7 @@ GicV2::writeDistributor(PacketPtr pkt)
const ContextID ctx = pkt->req->contextId();
const size_t data_sz = pkt->getSize();
M5_VAR_USED uint32_t pkt_data;
GEM5_VAR_USED uint32_t pkt_data;
switch (data_sz)
{
case 1:

View File

@@ -127,7 +127,7 @@ HSAPacketProcessor::write(Packet *pkt)
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
// TODO: How to get pid??
M5_VAR_USED Addr daddr = pkt->getAddr() - pioAddr;
GEM5_VAR_USED Addr daddr = pkt->getAddr() - pioAddr;
DPRINTF(HSAPacketProcessor,
"%s: write of size %d to reg-offset %d (0x%x)\n",
@@ -265,7 +265,7 @@ void
HSAPacketProcessor::CmdQueueCmdDmaEvent::process()
{
uint32_t rl_idx = series_ctx->rl_idx;
M5_VAR_USED AQLRingBuffer *aqlRingBuffer =
GEM5_VAR_USED AQLRingBuffer *aqlRingBuffer =
hsaPP->regdQList[rl_idx]->qCntxt.aqlBuf;
HSAQueueDescriptor* qDesc =
hsaPP->regdQList[rl_idx]->qCntxt.qDesc;
@@ -608,7 +608,7 @@ HSAPacketProcessor::getCommandsFromHost(int pid, uint32_t rl_idx)
void
HSAPacketProcessor::displayQueueDescriptor(int pid, uint32_t rl_idx)
{
M5_VAR_USED HSAQueueDescriptor* qDesc = regdQList[rl_idx]->qCntxt.qDesc;
GEM5_VAR_USED HSAQueueDescriptor* qDesc = regdQList[rl_idx]->qCntxt.qDesc;
DPRINTF(HSAPacketProcessor,
"%s: pid[%d], basePointer[0x%lx], dBPointer[0x%lx], "
"writeIndex[0x%x], readIndex[0x%x], size(bytes)[0x%x]\n",

View File

@@ -113,7 +113,7 @@ HWScheduler::registerNewQueue(uint64_t hostReadIndexPointer,
// Check if this newly created queue can be directly mapped
// to registered queue list
M5_VAR_USED bool register_q = mapQIfSlotAvlbl(queue_id, aql_buf, q_desc);
GEM5_VAR_USED bool register_q = mapQIfSlotAvlbl(queue_id, aql_buf, q_desc);
schedWakeup();
DPRINTF(HSAPacketProcessor,
"%s: offset = %p, qID = %d, is_regd = %s, AL size %d\n",

View File

@@ -219,7 +219,7 @@ Device::read(PacketPtr pkt)
prepareRead(cpu, index);
M5_VAR_USED uint64_t value = 0;
GEM5_VAR_USED uint64_t value = 0;
if (pkt->getSize() == 4) {
uint32_t reg = regData32(raddr);
pkt->setLE(reg);

View File

@@ -251,7 +251,7 @@ TCPIface::connect()
TCPIface::~TCPIface()
{
M5_VAR_USED int ret;
GEM5_VAR_USED int ret;
ret = close(sock);
assert(ret == 0);

View File

@@ -306,19 +306,19 @@ CopyEngine::write(PacketPtr pkt)
///
if (size == sizeof(uint64_t)) {
M5_VAR_USED uint64_t val = pkt->getLE<uint64_t>();
GEM5_VAR_USED uint64_t val = pkt->getLE<uint64_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else if (size == sizeof(uint32_t)) {
M5_VAR_USED uint32_t val = pkt->getLE<uint32_t>();
GEM5_VAR_USED uint32_t val = pkt->getLE<uint32_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else if (size == sizeof(uint16_t)) {
M5_VAR_USED uint16_t val = pkt->getLE<uint16_t>();
GEM5_VAR_USED uint16_t val = pkt->getLE<uint16_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else if (size == sizeof(uint8_t)) {
M5_VAR_USED uint8_t val = pkt->getLE<uint8_t>();
GEM5_VAR_USED uint8_t val = pkt->getLE<uint8_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else {

View File

@@ -65,7 +65,7 @@ PciVirtIO::~PciVirtIO()
Tick
PciVirtIO::read(PacketPtr pkt)
{
M5_VAR_USED const unsigned size(pkt->getSize());
GEM5_VAR_USED const unsigned size(pkt->getSize());
int bar;
Addr offset;
if (!getBAR(pkt->getAddr(), bar, offset))
@@ -146,7 +146,7 @@ PciVirtIO::read(PacketPtr pkt)
Tick
PciVirtIO::write(PacketPtr pkt)
{
M5_VAR_USED const unsigned size(pkt->getSize());
GEM5_VAR_USED const unsigned size(pkt->getSize());
int bar;
Addr offset;
if (!getBAR(pkt->getAddr(), bar, offset))

View File

@@ -352,7 +352,7 @@ ComputeUnit::startWavefront(Wavefront *w, int waveId, LdsChunk *ldsChunk,
// set the wavefront context to have a pointer to this section of the LDS
w->ldsChunk = ldsChunk;
M5_VAR_USED int32_t refCount =
GEM5_VAR_USED int32_t refCount =
lds.increaseRefCounter(w->dispatchId, w->wgId);
DPRINTF(GPUDisp, "CU%d: increase ref ctr wg[%d] to [%d]\n",
cu_id, w->wgId, refCount);
@@ -956,7 +956,7 @@ ComputeUnit::DataPort::recvReqRetry()
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front().first;
M5_VAR_USED GPUDynInstPtr gpuDynInst = retries.front().second;
GEM5_VAR_USED GPUDynInstPtr gpuDynInst = retries.front().second;
DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n",
computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
pkt->req->getPaddr());
@@ -990,7 +990,7 @@ ComputeUnit::SQCPort::recvReqRetry()
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front().first;
M5_VAR_USED Wavefront *wavefront = retries.front().second;
GEM5_VAR_USED Wavefront *wavefront = retries.front().second;
DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n",
computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId,
pkt->req->getPaddr());
@@ -1402,7 +1402,7 @@ ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt)
DTLBPort::SenderState *sender_state =
safe_cast<DTLBPort::SenderState*>(translation_state->saved);
M5_VAR_USED Wavefront *w =
GEM5_VAR_USED Wavefront *w =
computeUnit->wfList[sender_state->_gpuDynInst->simdId]
[sender_state->_gpuDynInst->wfSlotId];
@@ -1571,7 +1571,7 @@ ComputeUnit::DataPort::processMemReqEvent(PacketPtr pkt)
{
SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
M5_VAR_USED ComputeUnit *compute_unit = computeUnit;
GEM5_VAR_USED ComputeUnit *compute_unit = computeUnit;
if (!(sendTimingReq(pkt))) {
retries.push_back(std::make_pair(pkt, gpuDynInst));
@@ -1600,7 +1600,7 @@ ComputeUnit::ScalarDataPort::MemReqEvent::process()
{
SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
M5_VAR_USED ComputeUnit *compute_unit = scalarDataPort.computeUnit;
GEM5_VAR_USED ComputeUnit *compute_unit = scalarDataPort.computeUnit;
if (!(scalarDataPort.sendTimingReq(pkt))) {
scalarDataPort.retries.push_back(pkt);
@@ -1640,7 +1640,7 @@ ComputeUnit::DTLBPort::recvReqRetry()
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front();
M5_VAR_USED Addr vaddr = pkt->req->getVaddr();
GEM5_VAR_USED Addr vaddr = pkt->req->getVaddr();
DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr);
if (!sendTimingReq(pkt)) {
@@ -1679,7 +1679,7 @@ ComputeUnit::ScalarDTLBPort::recvTimingResp(PacketPtr pkt)
GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
delete pkt->senderState;
M5_VAR_USED Wavefront *w = gpuDynInst->wavefront();
GEM5_VAR_USED Wavefront *w = gpuDynInst->wavefront();
DPRINTF(GPUTLB, "CU%d: WF[%d][%d][wv=%d]: scalar DTLB port received "
"translation: PA %#x -> %#x\n", computeUnit->cu_id, w->simdId,
@@ -1718,7 +1718,7 @@ ComputeUnit::ScalarDTLBPort::recvTimingResp(PacketPtr pkt)
bool
ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt)
{
M5_VAR_USED Addr line = pkt->req->getPaddr();
GEM5_VAR_USED Addr line = pkt->req->getPaddr();
DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n",
computeUnit->cu_id, pkt->req->getVaddr(), line);
@@ -1784,7 +1784,7 @@ ComputeUnit::ITLBPort::recvReqRetry()
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front();
M5_VAR_USED Addr vaddr = pkt->req->getVaddr();
GEM5_VAR_USED Addr vaddr = pkt->req->getVaddr();
DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr);
if (!sendTimingReq(pkt)) {
@@ -2037,7 +2037,7 @@ ComputeUnit::LDSPort::sendTimingReq(PacketPtr pkt)
dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState);
fatal_if(!sender_state, "packet without a valid sender state");
M5_VAR_USED GPUDynInstPtr gpuDynInst = sender_state->getMemInst();
GEM5_VAR_USED GPUDynInstPtr gpuDynInst = sender_state->getMemInst();
if (isStalled()) {
fatal_if(retries.empty(), "must have retries waiting to be stalled");

View File

@@ -582,7 +582,7 @@ GPUComputeDriver::ioctl(ThreadContext *tc, unsigned req, Addr ioc_buf)
assert(isdGPU);
assert((args->va_addr % TheISA::PageBytes) == 0);
M5_VAR_USED Addr mmap_offset = 0;
GEM5_VAR_USED Addr mmap_offset = 0;
Request::CacheCoherenceFlags mtype = defaultMtype;
Addr pa_addr = 0;

View File

@@ -758,7 +758,7 @@ ScheduleStage::reserveResources()
// that we've reserved a global and local memory unit. Thus,
// we need to mark the latter execution unit as not available.
if (execUnitIds.size() > 1) {
M5_VAR_USED int lm_exec_unit = wf->localMem;
GEM5_VAR_USED int lm_exec_unit = wf->localMem;
assert(toExecute.dispatchStatus(lm_exec_unit)
== SKIP);
}
@@ -767,7 +767,7 @@ ScheduleStage::reserveResources()
// Verify the GM pipe for this wave is ready to execute
// and the wave in the GM pipe is the same as the wave
// in the LM pipe
M5_VAR_USED int gm_exec_unit = wf->globalMem;
GEM5_VAR_USED int gm_exec_unit = wf->globalMem;
assert(wf->wfDynId == toExecute
.readyInst(gm_exec_unit)->wfDynId);
assert(toExecute.dispatchStatus(gm_exec_unit)

View File

@@ -73,7 +73,7 @@ Linux::openSpecialFile(std::string path, Process *process,
if (matched) {
FILE *f = tmpfile();
int fd = fileno(f);
M5_VAR_USED size_t ret = fwrite(data.c_str(), 1, data.size(), f);
GEM5_VAR_USED size_t ret = fwrite(data.c_str(), 1, data.size(), f);
assert(ret == data.size());
rewind(f);
return fd;

View File

@@ -35,7 +35,7 @@
void
SkipFuncBase::process(ThreadContext *tc)
{
M5_VAR_USED TheISA::PCState oldPC = tc->pcState();
GEM5_VAR_USED TheISA::PCState oldPC = tc->pcState();
returnFromFuncIn(tc);

View File

@@ -229,7 +229,7 @@ SimpleCache::handleResponse(PacketPtr pkt)
DPRINTF(SimpleCache, "Copying data from new packet to old\n");
// We had to upgrade a previous packet. We can functionally deal with
// the cache access now. It better be a hit.
M5_VAR_USED bool hit = accessFunctional(originalPacket);
GEM5_VAR_USED bool hit = accessFunctional(originalPacket);
panic_if(!hit, "Should always hit after inserting");
originalPacket->makeResponse();
delete pkt; // We may need to delay this, I'm not sure.

View File

@@ -896,7 +896,7 @@ BaseCache::updateCompressionData(CacheBlk *&blk, const uint64_t* data,
// Get previous compressed size
CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
M5_VAR_USED const std::size_t prev_size = compression_blk->getSizeBits();
GEM5_VAR_USED const std::size_t prev_size = compression_blk->getSizeBits();
// If compressed size didn't change enough to modify its co-allocatability
// there is nothing to do. Otherwise we may be facing a data expansion
@@ -2418,7 +2418,7 @@ BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
if (cache->system->bypassCaches()) {
// Just forward the packet if caches are disabled.
// @todo This should really enqueue the packet rather
M5_VAR_USED bool success = cache->memSidePort.sendTimingReq(pkt);
GEM5_VAR_USED bool success = cache->memSidePort.sendTimingReq(pkt);
assert(success);
return true;
} else if (tryTiming(pkt)) {

View File

@@ -449,7 +449,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// this express snoop travels towards the memory, and at
// every crossbar it is snooped upwards thus reaching
// every cache in the system
M5_VAR_USED bool success = memSidePort.sendTimingReq(snoop_pkt);
GEM5_VAR_USED bool success = memSidePort.sendTimingReq(snoop_pkt);
// express snoops always succeed
assert(success);
@@ -992,7 +992,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// responds in atomic mode, so remember a few things about the
// original packet up front
bool invalidate = pkt->isInvalidate();
M5_VAR_USED bool needs_writable = pkt->needsWritable();
GEM5_VAR_USED bool needs_writable = pkt->needsWritable();
// at the moment we could get an uncacheable write which does not
// have the invalidate flag, and we need a suitable way of dealing
@@ -1394,7 +1394,7 @@ Cache::sendMSHRQueuePacket(MSHR* mshr)
// prefetchSquash first may result in the MSHR being
// prematurely deallocated.
if (snoop_pkt.cacheResponding()) {
M5_VAR_USED auto r = outstandingSnoop.insert(snoop_pkt.req);
GEM5_VAR_USED auto r = outstandingSnoop.insert(snoop_pkt.req);
assert(r.second);
// if we are getting a snoop response with no sharers it

View File

@@ -141,7 +141,7 @@ FrequentValues::decompress(const CompressionData* comp_data, uint64_t* data)
// its corresponding value, in order to make life easier we
// search for the value and verify that the stored code
// matches the table's
M5_VAR_USED const Encoder::Code code =
GEM5_VAR_USED const Encoder::Code code =
encoder.encode(comp_chunk.value);
// Either the value will be found and the codes match, or the

View File

@@ -110,7 +110,7 @@ void
FALRU::invalidate(CacheBlk *blk)
{
// Erase block entry reference in the hash table
M5_VAR_USED auto num_erased =
GEM5_VAR_USED auto num_erased =
tagHash.erase(std::make_pair(blk->getTag(), blk->isSecure()));
// Sanity check; only one block reference should be erased

View File

@@ -638,7 +638,7 @@ CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID cpu_side_port_id)
*memSidePorts[dest_port_id]);
}
M5_VAR_USED bool success =
GEM5_VAR_USED bool success =
memSidePorts[dest_port_id]->sendTimingSnoopResp(pkt);
pktCount[cpu_side_port_id][dest_port_id]++;
pktSize[cpu_side_port_id][dest_port_id] += pkt_size;
@@ -858,7 +858,7 @@ CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
// if this is the destination of the operation, the xbar
// sends the responce to the cache clean operation only
// after having encountered the cache clean request
M5_VAR_USED auto ret = outstandingCMO.emplace(pkt->id, nullptr);
GEM5_VAR_USED auto ret = outstandingCMO.emplace(pkt->id, nullptr);
// in atomic mode we know that the WriteClean packet should
// precede the clean request
assert(ret.second);

View File

@@ -169,7 +169,7 @@ DRAMSim2Wrapper::canAccept() const
void
DRAMSim2Wrapper::enqueue(bool is_write, uint64_t addr)
{
M5_VAR_USED bool success = dramsim->addTransaction(is_write, addr);
GEM5_VAR_USED bool success = dramsim->addTransaction(is_write, addr);
assert(success);
}

View File

@@ -123,7 +123,7 @@ DRAMsim3Wrapper::canAccept(uint64_t addr, bool is_write) const
void
DRAMsim3Wrapper::enqueue(uint64_t addr, bool is_write)
{
M5_VAR_USED bool success = dramsim->AddTransaction(addr, is_write);
GEM5_VAR_USED bool success = dramsim->AddTransaction(addr, is_write);
assert(success);
}

View File

@@ -97,7 +97,7 @@ Tick
StubSlavePort::recvAtomic(PacketPtr packet)
{
if (Debug::ExternalPort) {
M5_VAR_USED unsigned int size = packet->getSize();
GEM5_VAR_USED unsigned int size = packet->getSize();
DPRINTF(ExternalPort, "StubSlavePort: recvAtomic a: 0x%x size: %d"
" data: ...\n", packet->getAddr(), size);

View File

@@ -78,7 +78,7 @@ EmulationPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
new_vaddr, size);
while (size > 0) {
M5_VAR_USED auto new_it = pTable.find(new_vaddr);
GEM5_VAR_USED auto new_it = pTable.find(new_vaddr);
auto old_it = pTable.find(vaddr);
assert(old_it != pTable.end() && new_it == pTable.end());

View File

@@ -128,7 +128,7 @@ GarnetNetwork::init()
for (std::vector<Router*>::const_iterator i= m_routers.begin();
i != m_routers.end(); ++i) {
Router* router = safe_cast<Router*>(*i);
M5_VAR_USED int router_id =
GEM5_VAR_USED int router_id =
fault_model->declare_router(router->get_num_inports(),
router->get_num_outports(),
router->get_vc_per_vnet(),

View File

@@ -201,7 +201,7 @@ RoutingUnit::outportComputeXY(RouteInfo route,
{
PortDirection outport_dirn = "Unknown";
M5_VAR_USED int num_rows = m_router->get_net_ptr()->getNumRows();
GEM5_VAR_USED int num_rows = m_router->get_net_ptr()->getNumRows();
int num_cols = m_router->get_net_ptr()->getNumCols();
assert(num_rows > 0 && num_cols > 0);

View File

@@ -408,8 +408,8 @@ void
CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
{
uint64_t warmedUpBlocks = 0;
M5_VAR_USED uint64_t totalBlocks = (uint64_t)m_cache_num_sets *
(uint64_t)m_cache_assoc;
GEM5_VAR_USED uint64_t totalBlocks = (uint64_t)m_cache_num_sets *
(uint64_t)m_cache_assoc;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {

View File

@@ -150,7 +150,7 @@ template<class ENTRY>
inline void
PerfectCacheMemory<ENTRY>::deallocate(Addr address)
{
M5_VAR_USED auto num_erased = m_map.erase(makeLineAddress(address));
GEM5_VAR_USED auto num_erased = m_map.erase(makeLineAddress(address));
assert(num_erased == 1);
}

View File

@@ -532,7 +532,7 @@ GPUCoalescer::hitCallback(CoalescedRequest* crequest,
{
PacketPtr pkt = crequest->getFirstPkt();
Addr request_address = pkt->getAddr();
M5_VAR_USED Addr request_line_address = makeLineAddress(request_address);
GEM5_VAR_USED Addr request_line_address = makeLineAddress(request_address);
RubyRequestType type = crequest->getRubyType();

View File

@@ -206,7 +206,7 @@ RubyPort::PioResponsePort::recvTimingReq(PacketPtr pkt)
if (it->contains(pkt->getAddr())) {
// generally it is not safe to assume success here as
// the port could be blocked
M5_VAR_USED bool success =
GEM5_VAR_USED bool success =
ruby_port->request_ports[i]->sendTimingReq(pkt);
assert(success);
return true;
@@ -373,7 +373,7 @@ RubyPort::MemResponsePort::recvFunctional(PacketPtr pkt)
{
DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
M5_VAR_USED RubyPort *rp = static_cast<RubyPort *>(&owner);
GEM5_VAR_USED RubyPort *rp = static_cast<RubyPort *>(&owner);
RubySystem *rs = rp->m_ruby_system;
// Check for pio requests and directly send them to the dedicated
@@ -600,7 +600,7 @@ RubyPort::PioResponsePort::getAddrRanges() const
ranges.splice(ranges.begin(),
ruby_port->request_ports[i]->getAddrRanges());
}
for (M5_VAR_USED const auto &r : ranges)
for (GEM5_VAR_USED const auto &r : ranges)
DPRINTF(RubyPort, "%s\n", r.to_string());
return ranges;
}

View File

@@ -691,7 +691,7 @@ RubySystem::functionalWrite(PacketPtr pkt)
DPRINTF(RubySystem, "Functional Write request for %#x\n", addr);
M5_VAR_USED uint32_t num_functional_writes = 0;
GEM5_VAR_USED uint32_t num_functional_writes = 0;
// Only send functional requests within the same network.
assert(requestorToNetwork.count(pkt->requestorId()));

View File

@@ -61,7 +61,7 @@ class PeekStatementAST(StatementAST):
code('''
{
// Declare message
M5_VAR_USED const $mtid* in_msg_ptr;
GEM5_VAR_USED const $mtid* in_msg_ptr;
in_msg_ptr = dynamic_cast<const $mtid *>(($qcode).${{self.method}}());
if (in_msg_ptr == NULL) {
// If the cast fails, this is the wrong inport (wrong message type).

View File

@@ -601,7 +601,7 @@ void
$c_ident::initNetQueues()
{
MachineType machine_type = string_to_MachineType("${{self.ident}}");
M5_VAR_USED int base = MachineType_base_number(machine_type);
GEM5_VAR_USED int base = MachineType_base_number(machine_type);
''')
code.indent()

View File

@@ -766,7 +766,7 @@ class MetaSimObject(type):
# method, or the Dummy one. Either an implementation is
# mandantory since this was shunted off to the dummy class, or
# one is optional which will override this weak version.
code('M5_VAR_USED ${{cls.cxx_class}} *')
code('GEM5_VAR_USED ${{cls.cxx_class}} *')
code('Dummy${cls}Shunt<${{cls.cxx_class}}>::Params::create() const')
code('{')
code(' return Dummy${cls}Shunt<${{cls.cxx_class}}>::')

View File

@@ -96,7 +96,7 @@ callFrom(ThreadContext *tc, typename ABI::State &state,
template <typename ABI, typename Ret, typename ...Args>
static void
dumpArgsFrom(std::ostream &os, M5_VAR_USED ThreadContext *tc,
dumpArgsFrom(std::ostream &os, GEM5_VAR_USED ThreadContext *tc,
typename ABI::State &state)
{
int count = 0;

View File

@@ -109,7 +109,8 @@ prepareForResult(ThreadContext *tc, typename ABI::State &state)
template <typename ABI, typename ...Args>
static inline void
prepareForArguments(M5_VAR_USED ThreadContext *tc, typename ABI::State &state)
prepareForArguments(GEM5_VAR_USED ThreadContext *tc,
typename ABI::State &state)
{
M5_FOR_EACH_IN_PACK(Preparer<ABI, Argument, Args>::prepare(tc, state));
}

View File

@@ -179,7 +179,7 @@ System::Threads::quiesce(ContextID id)
{
auto &t = thread(id);
# if THE_ISA != NULL_ISA
M5_VAR_USED BaseCPU *cpu = t.context->getCpuPtr();
GEM5_VAR_USED BaseCPU *cpu = t.context->getCpuPtr();
DPRINTFS(Quiesce, cpu, "quiesce()\n");
# endif
t.quiesce();
@@ -255,7 +255,7 @@ System::System(const Params &p)
}
// Get the generic system requestor IDs
M5_VAR_USED RequestorID tmp_id;
GEM5_VAR_USED RequestorID tmp_id;
tmp_id = getRequestorId(this, "writebacks");
assert(tmp_id == Request::wbRequestorId);
tmp_id = getRequestorId(this, "functional");