misc: Update attribute syntax, and reorganize compiler.hh.

This change replaces the __attribute__ syntax with the now standard [[]]
syntax. It also reorganizes compiler.hh so that all special macros have
some explanatory text saying what they do, and each attribute which has a
standard version can use that if available and what version of c++ it's
standard in is put in a comment.

Also, the requirements as far as where you put [[]] style attributes are
a little more strict than the old school __attribute__ style. The use of
the attribute macros was updated to fit these new, more strict
requirements.

Change-Id: Iace44306a534111f1c38b9856dc9e88cd9b49d2a
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/35219
Reviewed-by: Daniel Carvalho <odanrc@yahoo.com.br>
Maintainer: Gabe Black <gabeblack@google.com>
Tested-by: kokoro <noreply+kokoro@google.com>
This commit is contained in:
Gabe Black
2020-09-26 18:26:02 -07:00
parent 3c31a214b6
commit b877efa6d4
78 changed files with 242 additions and 208 deletions

View File

@@ -517,7 +517,7 @@ ArmFault::invoke(ThreadContext *tc, const StaticInstPtr &inst)
saved_cpsr.v = tc->readCCReg(CCREG_V);
saved_cpsr.ge = tc->readCCReg(CCREG_GE);
Addr curPc M5_VAR_USED = tc->pcState().pc();
M5_VAR_USED Addr curPc = tc->pcState().pc();
ITSTATE it = tc->pcState().itstate();
saved_cpsr.it2 = it.top6;
saved_cpsr.it1 = it.bottom2;
@@ -525,7 +525,7 @@ ArmFault::invoke(ThreadContext *tc, const StaticInstPtr &inst)
// if we have a valid instruction then use it to annotate this fault with
// extra information. This is used to generate the correct fault syndrome
// information
ArmStaticInst *arm_inst M5_VAR_USED = instrAnnotate(inst);
M5_VAR_USED ArmStaticInst *arm_inst = instrAnnotate(inst);
// Ensure Secure state if initially in Monitor mode
if (have_security && saved_cpsr.mode == MODE_MON) {
@@ -703,7 +703,7 @@ ArmFault::invoke64(ThreadContext *tc, const StaticInstPtr &inst)
// If we have a valid instruction then use it to annotate this fault with
// extra information. This is used to generate the correct fault syndrome
// information
ArmStaticInst *arm_inst M5_VAR_USED = instrAnnotate(inst);
M5_VAR_USED ArmStaticInst *arm_inst = instrAnnotate(inst);
// Set PC to start of exception handler
Addr new_pc = purifyTaggedAddr(vec_address, tc, toEL, true);
@@ -755,7 +755,7 @@ Reset::getVector(ThreadContext *tc)
Addr base;
// Check for invalid modes
CPSR M5_VAR_USED cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
M5_VAR_USED CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
assert(ArmSystem::haveSecurity(tc) || cpsr.mode != MODE_MON);
assert(ArmSystem::haveVirtualization(tc) || cpsr.mode != MODE_HYP);
@@ -1069,7 +1069,7 @@ AbortFault<T>::invoke(ThreadContext *tc, const StaticInstPtr &inst)
// See ARM ARM B3-1416
bool override_LPAE = false;
TTBCR ttbcr_s = tc->readMiscReg(MISCREG_TTBCR_S);
TTBCR M5_VAR_USED ttbcr_ns = tc->readMiscReg(MISCREG_TTBCR_NS);
M5_VAR_USED TTBCR ttbcr_ns = tc->readMiscReg(MISCREG_TTBCR_NS);
if (ttbcr_s.eae) {
override_LPAE = true;
} else {

View File

@@ -561,7 +561,7 @@ VldSingleOp::VldSingleOp(const char *mnem, ExtMachInst machInst,
unsigned eBytes = (1 << size);
unsigned loadSize = eBytes * elems;
unsigned loadRegs M5_VAR_USED =
M5_VAR_USED unsigned loadRegs =
(loadSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
assert(loadRegs > 0 && loadRegs <= 4);
@@ -925,7 +925,7 @@ VstSingleOp::VstSingleOp(const char *mnem, ExtMachInst machInst,
unsigned eBytes = (1 << size);
unsigned storeSize = eBytes * elems;
unsigned storeRegs M5_VAR_USED =
M5_VAR_USED unsigned storeRegs =
(storeSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
assert(storeRegs > 0 && storeRegs <= 4);

View File

@@ -83,16 +83,16 @@ let {{
"logic": '0'
}
immOp2 = "uint64_t secOp M5_VAR_USED = imm;"
sRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
immOp2 = "M5_VAR_USED uint64_t secOp = imm;"
sRegOp2 = "M5_VAR_USED uint64_t secOp = " + \
"shiftReg64(Op264, shiftAmt, shiftType, intWidth);"
eRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
eRegOp2 = "M5_VAR_USED uint64_t secOp = " + \
"extendReg64(Op264, extendType, shiftAmt, intWidth);"
def buildDataWork(mnem, code, flagType, suffix, buildCc, buildNonCc,
base, templateBase):
code = '''
uint64_t resTemp M5_VAR_USED = 0;
M5_VAR_USED uint64_t resTemp = 0;
''' + code
ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
Name = mnem.capitalize() + suffix
@@ -576,9 +576,9 @@ let {{
def condCompCode(flagType, op, imm):
ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
opDecl = "uint64_t secOp M5_VAR_USED = imm;"
opDecl = "M5_VAR_USED uint64_t secOp = imm;"
if not imm:
opDecl = "uint64_t secOp M5_VAR_USED = Op264;"
opDecl = "M5_VAR_USED uint64_t secOp = Op264;"
return opDecl + '''
if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
uint64_t resTemp = Op164 ''' + op + ''' secOp;

View File

@@ -461,7 +461,7 @@ let {{
exec_output = ""
singleSimpleCode = vfpEnabledCheckCode + '''
FPSCR fpscr M5_VAR_USED = (FPSCR) FpscrExc;
M5_VAR_USED FPSCR fpscr = (FPSCR) FpscrExc;
FpDest = %(op)s;
'''
singleCode = singleSimpleCode + '''
@@ -482,7 +482,7 @@ let {{
"%(func)s, fpscr.fz, fpscr.dn, fpscr.rMode)"
singleUnaryOp = "unaryOp(fpscr, FpOp1, %(func)s, fpscr.fz, fpscr.rMode)"
doubleCode = vfpEnabledCheckCode + '''
FPSCR fpscr M5_VAR_USED = (FPSCR) FpscrExc;
M5_VAR_USED FPSCR fpscr = (FPSCR) FpscrExc;
double dest = %(op)s;
FpDestP0_uw = dblLow(dest);
FpDestP1_uw = dblHi(dest);

View File

@@ -201,7 +201,7 @@ let {{
accEpilogCode = None
# Code that actually handles the access
if self.flavor in ("dprefetch", "iprefetch", "mprefetch"):
accCode = 'uint64_t temp M5_VAR_USED = Mem%s;'
accCode = 'M5_VAR_USED uint64_t temp = Mem%s;'
elif self.flavor == "fp":
accEpilogCode = '''
ArmISA::ISA::zeroSveVecRegUpperPart(AA64FpDest,

View File

@@ -128,7 +128,7 @@ let {{
bitMask = (bitMask >> imm1) | (bitMask << (intWidth - imm1));
diff += intWidth;
}
uint64_t topBits M5_VAR_USED = ~mask(diff+1);
M5_VAR_USED uint64_t topBits = ~mask(diff+1);
uint64_t result = imm1 == 0 ? Op164 :
(Op164 >> imm1) | (Op164 << (intWidth - imm1));
result &= bitMask;

View File

@@ -2703,7 +2703,7 @@ let {{
CondCodesC = !destPred.lastActive(GpOp, eCount);
CondCodesV = 0;'''
extraPrologCode = '''
auto& destPred M5_VAR_USED = PDest;'''
M5_VAR_USED auto& destPred = PDest;'''
baseClass = ('SvePredUnaryWImplicitSrcOp' if predType == PredType.NONE
else 'SvePredUnaryWImplicitSrcPredOp')
iop = InstObjParams(name, 'Sve' + Name, baseClass,
@@ -2722,7 +2722,7 @@ let {{
global header_output, exec_output, decoders
code = sveEnabledCheckCode + op
extraPrologCode = '''
auto& destPred M5_VAR_USED = Ffr;'''
M5_VAR_USED auto& destPred = Ffr;'''
baseClass = ('SveWImplicitSrcDstOp' if isSetFfr
else 'SvePredUnaryWImplicitDstOp')
iop = InstObjParams(name, 'Sve' + Name, baseClass,

View File

@@ -1117,7 +1117,7 @@ def template LoadRegConstructor {{
(IntRegIndex)_index)
{
%(constructor)s;
bool conditional M5_VAR_USED = false;
M5_VAR_USED bool conditional = false;
if (!(condCode == COND_AL || condCode == COND_UC)) {
conditional = true;
for (int x = 0; x < _numDestRegs; x++) {
@@ -1183,7 +1183,7 @@ def template LoadImmConstructor {{
(IntRegIndex)_dest, (IntRegIndex)_base, _add, _imm)
{
%(constructor)s;
bool conditional M5_VAR_USED = false;
M5_VAR_USED bool conditional = false;
if (!(condCode == COND_AL || condCode == COND_UC)) {
conditional = true;
for (int x = 0; x < _numDestRegs; x++) {

View File

@@ -142,7 +142,7 @@ def template SveContigLoadExecute {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
@@ -176,7 +176,7 @@ def template SveContigLoadInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
@@ -200,7 +200,7 @@ def template SveContigLoadCompleteAcc {{
Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt,
ExecContext *xc, Trace::InstRecord *traceData) const
{
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
@@ -229,7 +229,7 @@ def template SveContigStoreExecute {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
@@ -266,7 +266,7 @@ def template SveContigStoreInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
@@ -308,7 +308,7 @@ def template SveLoadAndReplExecute {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
@@ -339,7 +339,7 @@ def template SveLoadAndReplInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
%(op_src_decl)s;
%(op_rd)s;
@@ -363,7 +363,7 @@ def template SveLoadAndReplCompleteAcc {{
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
@@ -547,7 +547,7 @@ def template SveGatherLoadMicroopExecute {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -595,7 +595,7 @@ def template SveGatherLoadMicroopInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
%(op_src_decl)s;
%(op_rd)s;
@@ -635,7 +635,7 @@ def template SveGatherLoadMicroopCompleteAcc {{
Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt,
ExecContext *xc, Trace::InstRecord *traceData) const
{
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -661,7 +661,7 @@ def template SveScatterStoreMicroopExecute {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -691,7 +691,7 @@ def template SveScatterStoreMicroopInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -759,7 +759,7 @@ def template SveFirstFaultWritebackMicroopExecute {{
Fault %(class_name)s%(tpl_args)s::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
@@ -933,7 +933,7 @@ def template SveStructLoadExecute {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());
@@ -965,7 +965,7 @@ def template SveStructLoadInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());
@@ -989,7 +989,7 @@ def template SveStructLoadCompleteAcc {{
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());
@@ -1021,7 +1021,7 @@ def template SveStructStoreExecute {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());
@@ -1058,7 +1058,7 @@ def template SveStructStoreInitiateAcc {{
{
Addr EA;
Fault fault = NoFault;
bool aarch64 M5_VAR_USED = true;
M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());

View File

@@ -66,12 +66,12 @@ class RemoteGDB : public BaseRemoteGDB
{
using BaseGdbRegCache::BaseGdbRegCache;
private:
struct {
struct M5_ATTR_PACKED {
uint32_t gpr[16];
uint32_t cpsr;
uint64_t fpr[32];
uint32_t fpscr;
} M5_ATTR_PACKED r;
} r;
public:
char *data() const { return (char *)&r; }
size_t size() const { return sizeof(r); }
@@ -88,7 +88,7 @@ class RemoteGDB : public BaseRemoteGDB
{
using BaseGdbRegCache::BaseGdbRegCache;
private:
struct {
struct M5_ATTR_PACKED {
uint64_t x[31];
uint64_t spx;
uint64_t pc;
@@ -96,7 +96,7 @@ class RemoteGDB : public BaseRemoteGDB
VecElem v[NumVecV8ArchRegs * NumVecElemPerNeonVecReg];
uint32_t fpsr;
uint32_t fpcr;
} M5_ATTR_PACKED r;
} r;
public:
char *data() const { return (char *)&r; }
size_t size() const { return sizeof(r); }

View File

@@ -695,7 +695,7 @@ TLB::checkPermissions64(TlbEntry *te, const RequestPtr &req, Mode mode,
// Cache clean operations require read permissions to the specified VA
bool is_write = !req->isCacheClean() && mode == Write;
bool is_atomic = req->isAtomic();
bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
M5_VAR_USED bool is_priv = isPriv && !(flags & UserMode);
updateMiscReg(tc, curTranType);

View File

@@ -166,7 +166,7 @@ class Template(object):
if operands.predRead:
myDict['op_decl'] += 'uint8_t _sourceIndex = 0;\n'
if operands.predWrite:
myDict['op_decl'] += 'uint8_t M5_VAR_USED _destIndex = 0;\n'
myDict['op_decl'] += 'M5_VAR_USED uint8_t _destIndex = 0;\n'
is_src = lambda op: op.is_src
is_dest = lambda op: op.is_dest

View File

@@ -145,8 +145,8 @@ Interrupts::getInterrupt()
{
assert(checkInterrupts());
StatusReg M5_VAR_USED status = tc->readMiscRegNoEffect(MISCREG_STATUS);
CauseReg M5_VAR_USED cause = tc->readMiscRegNoEffect(MISCREG_CAUSE);
M5_VAR_USED StatusReg status = tc->readMiscRegNoEffect(MISCREG_STATUS);
M5_VAR_USED CauseReg cause = tc->readMiscRegNoEffect(MISCREG_CAUSE);
DPRINTF(Interrupt, "Interrupt! IM[7:0]=%d IP[7:0]=%d \n",
(unsigned)status.im, (unsigned)cause.ip);

View File

@@ -404,7 +404,7 @@ def template MiscExecute {{
Fault %(class_name)s::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
Addr EA M5_VAR_USED = 0;
M5_VAR_USED Addr EA = 0;
Fault fault = NoFault;
%(fp_enable_check)s;

View File

@@ -111,7 +111,7 @@ def template ThreadRegisterExecute {{
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
int64_t data M5_VAR_USED;
M5_VAR_USED int64_t data;
%(op_decl)s;
%(op_rd)s;

View File

@@ -109,7 +109,7 @@ def template LoadCompleteAcc {{
ExecContext *xc,
Trace::InstRecord *traceData) const
{
Addr M5_VAR_USED EA;
M5_VAR_USED Addr EA;
Fault fault = NoFault;
%(op_decl)s;

View File

@@ -49,7 +49,7 @@
namespace RiscvISA
{
const std::array<const char *, NumMiscRegs> M5_VAR_USED MiscRegNames = {{
M5_VAR_USED const std::array<const char *, NumMiscRegs> MiscRegNames = {{
[MISCREG_PRV] = "PRV",
[MISCREG_ISA] = "ISA",
[MISCREG_VENDORID] = "VENDORID",

View File

@@ -49,7 +49,7 @@ class %(class_name)s : public %(base_class)s
// Constructor.
%(class_name)s(ExtMachInst machInst);
Fault execute(ExecContext *, Trace::InstRecord *) const override;
Fault doFpOp(ExecContext *, Trace::InstRecord *) const M5_NO_INLINE;
M5_NO_INLINE Fault doFpOp(ExecContext *, Trace::InstRecord *) const;
};
}};

View File

@@ -49,7 +49,7 @@ def template MicroRegOpExecute {{
%(op_decl)s;
%(op_rd)s;
RegVal result M5_VAR_USED;
M5_VAR_USED RegVal result;
if(%(cond_check)s)
{
@@ -79,7 +79,7 @@ def template MicroRegOpImmExecute {{
%(op_decl)s;
%(op_rd)s;
RegVal result M5_VAR_USED;
M5_VAR_USED RegVal result;
if(%(cond_check)s)
{

View File

@@ -46,7 +46,7 @@
*/
namespace X86ISA
{
const Request::FlagsType M5_VAR_USED SegmentFlagMask = mask(4);
M5_VAR_USED const Request::FlagsType SegmentFlagMask = mask(4);
const int FlagShift = 4;
enum FlagBit {
CPL0FlagBit = 1,

View File

@@ -75,15 +75,15 @@ class BmpWriter : public ImgWriter
void write(std::ostream &bmp) const override;
private:
struct FileHeader {
struct M5_ATTR_PACKED FileHeader {
unsigned char magic_number[2];
uint32_t size;
uint16_t reserved1;
uint16_t reserved2;
uint32_t offset;
} M5_ATTR_PACKED;
};
struct InfoHeaderV1 { /* Aka DIB header */
struct M5_ATTR_PACKED InfoHeaderV1 { /* Aka DIB header */
uint32_t Size;
uint32_t Width;
uint32_t Height;
@@ -95,14 +95,14 @@ class BmpWriter : public ImgWriter
uint32_t YPelsPerMeter;
uint32_t ClrUsed;
uint32_t ClrImportant;
} M5_ATTR_PACKED;
};
struct CompleteV1Header {
struct M5_ATTR_PACKED CompleteV1Header {
FileHeader file;
InfoHeaderV1 info;
} M5_ATTR_PACKED;
};
struct BmpPixel32 {
struct M5_ATTR_PACKED BmpPixel32 {
BmpPixel32 &operator=(const Pixel &rhs) {
red = rhs.red;
green = rhs.green;
@@ -115,7 +115,7 @@ class BmpWriter : public ImgWriter
uint8_t green;
uint8_t red;
uint8_t padding;
} M5_ATTR_PACKED;
};
typedef BmpPixel32 PixelType;

View File

@@ -45,43 +45,77 @@
// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
#if defined(__GNUC__) // clang or gcc
# define M5_VAR_USED __attribute__((unused))
# define M5_ATTR_PACKED __attribute__ ((__packed__))
# define M5_NO_INLINE __attribute__ ((__noinline__))
/*
* Attributes that become standard in later versions of c++.
*/
// Use M5_FALLTHROUGH to mark when you're intentionally falling through from
// one case to another in a switch statement.
#if __has_cpp_attribute(fallthrough) // Standard in c++17.
# define M5_FALLTHROUGH [[fallthrough]]
#else
// Not supported, so it's not necessary to avoid warnings.
# define M5_FALLTHROUGH
#endif
// When the return value of a function should not be discarded, mark it with
// M5_NODISCARD.
#if __has_cpp_attribute(nodiscard) // Standard in c++17, with message in c++20.
# define M5_NODISCARD [[nodiscard]]
#else
// Not supported, but it's optional so we can just omit it.
# define M5_NODISCARD
#endif
// When a variable may purposefully not be used, for instance if it's only used
// in debug statements which might be disabled, mark it with M5_VAR_USED.
#if __has_cpp_attribute(maybe_unused) // Standard in c++17.
# define M5_VAR_USED [[maybe_unused]]
#elif defined(__GNUC__)
// gcc and clang support a custom attribute which is essentially the same
// thing.
# define M5_VAR_USED [[gnu::unused]]
#endif
/*
* Compiler specific features.
*/
#if defined(__GNUC__) // clang or gcc.
// Mark a structure as packed, so that no padding is added to its layout. This
// padding might be added to, for instance, ensure certain fields have certain
// alignment.
# define M5_ATTR_PACKED [[gnu::packed]]
// Prevent a function from being inlined.
# define M5_NO_INLINE [[gnu::noinline]]
// Set the visibility of a symbol.
# define M5_PUBLIC [[gnu:visibility("default")]]
# define M5_LOCAL [[gnu::visibility("hidden")]]
// Marker for what should be an unreachable point in the code.
# define M5_UNREACHABLE __builtin_unreachable()
# define M5_PUBLIC __attribute__ ((visibility ("default")))
# define M5_LOCAL __attribute__ ((visibility ("hidden")))
// To mark a branch condition as likely taken, wrap it's condition with
// M5_LIKELY. To mark it as likely not taken, wrap it's condition with
// M5_UNLIKELY. These can be replaced with the standard attributes [[likely]]
// and [[unlikely]] in c++20, although the syntax is different enough that
// we can't do that with direct substitution.
# define M5_LIKELY(cond) __builtin_expect(!!(cond), 1)
# define M5_UNLIKELY(cond) __builtin_expect(!!(cond), 0)
#endif
#if defined(__clang__)
// When a member variable may be unused, mark it with M5_CLASS_VAR_USED. This
// needs to be limitted to clang only since clang warns on these unused
// variables, and g++ will actually warn if you use this attribute since it
// won't do anything there.
#if defined(__clang__) // clang only.
# define M5_CLASS_VAR_USED M5_VAR_USED
#else
# define M5_CLASS_VAR_USED
#endif
// This can be removed once all compilers support C++17
#if defined __has_cpp_attribute
// Note: We must separate this if statement because GCC < 5.0 doesn't
// support the function-like syntax in #if statements.
#if __has_cpp_attribute(fallthrough)
#define M5_FALLTHROUGH [[fallthrough]]
#else
#define M5_FALLTHROUGH
#endif
#if __has_cpp_attribute(nodiscard)
#define M5_NODISCARD [[nodiscard]]
#else
#define M5_NODISCARD
#endif
#else
// Unsupported (and no warning) on GCC < 7.
#define M5_FALLTHROUGH
#define M5_NODISCARD
#endif
#endif // __BASE_COMPILER_HH__

View File

@@ -141,7 +141,7 @@ ElfObject::ElfObject(ImageFileDataPtr ifd) : ObjectFile(ifd)
"No loadable segments in '%s'. ELF file corrupted?\n",
imageData->filename());
for (auto M5_VAR_USED &seg: image.segments())
for (M5_VAR_USED auto &seg: image.segments())
DPRINTFR(Loader, "%s\n", seg);
// We will actually read the sections when we need to load them

View File

@@ -76,7 +76,7 @@ class PngWriter : public ImgWriter
void write(std::ostream &png) const override;
private:
/** Png Pixel type: not containing padding */
struct PngPixel24 {
struct M5_ATTR_PACKED PngPixel24 {
PngPixel24 &operator=(const Pixel &rhs) {
red = rhs.red;
green = rhs.green;
@@ -87,7 +87,7 @@ class PngWriter : public ImgWriter
uint8_t red;
uint8_t green;
uint8_t blue;
} M5_ATTR_PACKED;
};
/**
* Handle to resources used by libpng:

View File

@@ -68,7 +68,7 @@ Group::regStats()
for (auto &g : statGroups) {
if (DTRACE(Stats)) {
const SimObject M5_VAR_USED *so =
M5_VAR_USED const SimObject *so =
dynamic_cast<const SimObject *>(this);
DPRINTF(Stats, "%s: regStats in group %s\n",
so ? so->name() : "?",

View File

@@ -96,7 +96,7 @@ class VncInput : public SimObject
ClientCutText = 6
};
struct PixelFormat {
struct M5_ATTR_PACKED PixelFormat {
uint8_t bpp;
uint8_t depth;
uint8_t bigendian;
@@ -108,48 +108,48 @@ class VncInput : public SimObject
uint8_t greenshift;
uint8_t blueshift;
uint8_t padding[3];
} M5_ATTR_PACKED;
};
struct PixelFormatMessage {
struct M5_ATTR_PACKED PixelFormatMessage {
uint8_t type;
uint8_t padding[3];
PixelFormat px;
} M5_ATTR_PACKED;
};
struct PixelEncodingsMessage {
struct M5_ATTR_PACKED PixelEncodingsMessage {
uint8_t type;
uint8_t padding;
uint16_t num_encodings;
} M5_ATTR_PACKED;
};
struct FrameBufferUpdateReq {
struct M5_ATTR_PACKED FrameBufferUpdateReq {
uint8_t type;
uint8_t incremental;
uint16_t x;
uint16_t y;
uint16_t width;
uint16_t height;
} M5_ATTR_PACKED;
};
struct KeyEventMessage {
struct M5_ATTR_PACKED KeyEventMessage {
uint8_t type;
uint8_t down_flag;
uint8_t padding[2];
uint32_t key;
} M5_ATTR_PACKED;
};
struct PointerEventMessage {
struct M5_ATTR_PACKED PointerEventMessage {
uint8_t type;
uint8_t button_mask;
uint16_t x;
uint16_t y;
} M5_ATTR_PACKED;
};
struct ClientCutTextMessage {
struct M5_ATTR_PACKED ClientCutTextMessage {
uint8_t type;
uint8_t padding[3];
uint32_t length;
} M5_ATTR_PACKED;
};
typedef VncInputParams Params;
VncInput(const Params *p);

View File

@@ -378,7 +378,7 @@ VncServer::checkProtocolVersion()
{
assert(curState == WaitForProtocolVersion);
size_t len M5_VAR_USED;
M5_VAR_USED size_t len;
char version_string[13];
// Null terminate the message so it's easier to work with

View File

@@ -106,33 +106,33 @@ class VncServer : public VncInput
NormalPhase
};
struct ServerInitMsg {
struct M5_ATTR_PACKED ServerInitMsg {
uint16_t fbWidth;
uint16_t fbHeight;
PixelFormat px;
uint32_t namelen;
char name[2]; // just to put M5 in here
} M5_ATTR_PACKED;
};
struct FrameBufferUpdate {
struct M5_ATTR_PACKED FrameBufferUpdate {
uint8_t type;
uint8_t padding;
uint16_t num_rects;
} M5_ATTR_PACKED;
};
struct FrameBufferRect {
struct M5_ATTR_PACKED FrameBufferRect {
uint16_t x;
uint16_t y;
uint16_t width;
uint16_t height;
int32_t encoding;
} M5_ATTR_PACKED;
};
struct ServerCutText {
struct M5_ATTR_PACKED ServerCutText {
uint8_t type;
uint8_t padding[3];
uint32_t length;
} M5_ATTR_PACKED;
};
/** @} */

View File

@@ -68,7 +68,7 @@ using namespace X86ISA;
// data) is used to indicate that a segment has been accessed.
#define SEG_TYPE_BIT_ACCESSED 1
struct FXSave
struct M5_ATTR_PACKED FXSave
{
uint16_t fcw;
uint16_t fsw;
@@ -97,7 +97,7 @@ struct FXSave
uint8_t xmm[16][16];
uint64_t reserved[12];
} M5_ATTR_PACKED;
};
static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");

View File

@@ -388,7 +388,7 @@ void
Fetch1::minorTraceResponseLine(const std::string &name,
Fetch1::FetchRequestPtr response) const
{
const RequestPtr &request M5_VAR_USED = response->request;
M5_VAR_USED const RequestPtr &request = response->request;
if (response->packet && response->packet->isError()) {
MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",

View File

@@ -77,7 +77,7 @@ LSQ::LSQRequest::tryToSuppressFault()
SimpleThread &thread = *port.cpu.threads[inst->id.threadId];
TheISA::PCState old_pc = thread.pcState();
ExecContext context(port.cpu, thread, port.execute, inst);
Fault M5_VAR_USED fault = inst->translationFault;
M5_VAR_USED Fault fault = inst->translationFault;
// Give the instruction a chance to suppress a translation fault
inst->translationFault = inst->staticInst->initiateAcc(&context, nullptr);
@@ -334,7 +334,7 @@ LSQ::SplitDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
{
port.numAccessesInDTLB--;
unsigned int M5_VAR_USED expected_fragment_index =
M5_VAR_USED unsigned int expected_fragment_index =
numTranslatedFragments;
numInTranslationFragments--;
@@ -475,7 +475,7 @@ LSQ::SplitDataRequest::makeFragmentRequests()
for (unsigned int fragment_index = 0; fragment_index < numFragments;
fragment_index++)
{
bool M5_VAR_USED is_last_fragment = false;
M5_VAR_USED bool is_last_fragment = false;
if (fragment_addr == base_addr) {
/* First fragment */

View File

@@ -701,7 +701,7 @@ DefaultCommit<Impl>::tick()
// will be active.
_nextStatus = Active;
const DynInstPtr &inst M5_VAR_USED = rob->readHeadInst(tid);
M5_VAR_USED const DynInstPtr &inst = rob->readHeadInst(tid);
DPRINTF(Commit,"[tid:%i] Instruction [sn:%llu] PC %s is head of"
" ROB and ready to commit\n",

View File

@@ -682,7 +682,7 @@ LSQ<Impl>::pushRequest(const DynInstPtr& inst, bool isLoad, uint8_t *data,
// This comming request can be either load, store or atomic.
// Atomic request has a corresponding pointer to its atomic memory
// operation
bool isAtomic M5_VAR_USED = !isLoad && amo_op;
M5_VAR_USED bool isAtomic = !isLoad && amo_op;
ThreadID tid = cpu->contextToThread(inst->contextId());
auto cacheLineSize = cpu->cacheLineSize();

View File

@@ -270,7 +270,7 @@ MemDepUnit<MemDepPred, Impl>::insert(const DynInstPtr &inst)
} else {
// Otherwise make the instruction dependent on the store/barrier.
DPRINTF(MemDepUnit, "Adding to dependency list\n");
for (auto M5_VAR_USED producing_store : producing_stores)
for (M5_VAR_USED auto producing_store : producing_stores)
DPRINTF(MemDepUnit, "\tinst PC %s is dependent on [sn:%lli].\n",
inst->pcState(), producing_store);

View File

@@ -57,7 +57,7 @@ class Scoreboard
std::vector<bool> regScoreBoard;
/** The number of actual physical registers */
unsigned M5_CLASS_VAR_USED numPhysRegs;
M5_CLASS_VAR_USED unsigned numPhysRegs;
public:
/** Constructs a scoreboard.

View File

@@ -108,7 +108,7 @@ BPredUnit::drainSanityCheck() const
{
// We shouldn't have any outstanding requests when we resume from
// a drained system.
for (const auto& ph M5_VAR_USED : predHist)
for (M5_VAR_USED const auto& ph : predHist)
assert(ph.empty());
}

View File

@@ -947,7 +947,7 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
// hardware transactional memory
SimpleExecContext *t_info = threadInfo[curThread];
const bool is_htm_speculative M5_VAR_USED =
M5_VAR_USED const bool is_htm_speculative =
t_info->inHtmTransactionalState();
// received a response from the dcache: complete the load or store

View File

@@ -245,7 +245,7 @@ MemTest::tick()
if (cmd < percentReads) {
// start by ensuring there is a reference value if we have not
// seen this address before
uint8_t M5_VAR_USED ref_data = 0;
M5_VAR_USED uint8_t ref_data = 0;
auto ref = referenceData.find(req->getPaddr());
if (ref == referenceData.end()) {
referenceData[req->getPaddr()] = 0;

View File

@@ -811,7 +811,7 @@ TraceCPU::ElasticDataGen::printReadyList() {
DPRINTF(TraceCPUData, "Printing readyList:\n");
while (itr != readyList.end()) {
auto graph_itr = depGraph.find(itr->seqNum);
GraphNode* node_ptr M5_VAR_USED = graph_itr->second;
M5_VAR_USED GraphNode* node_ptr = graph_itr->second;
DPRINTFR(TraceCPUData, "\t%lld(%s), %lld\n", itr->seqNum,
node_ptr->typeToStr(), itr->execTick);
itr++;
@@ -1341,7 +1341,7 @@ TraceCPU::ElasticDataGen::GraphNode::removeDepOnInst(NodeSeqNum done_seq_num)
// If it is not an rob dependency then it must be a register dependency
// If the register dependency is not found, it violates an assumption
// and must be caught by assert.
bool regdep_found M5_VAR_USED = removeRegDep(done_seq_num);
M5_VAR_USED bool regdep_found = removeRegDep(done_seq_num);
assert(regdep_found);
}
// Return true if the node is dependency free

View File

@@ -389,7 +389,7 @@ GicV2::writeDistributor(PacketPtr pkt)
const ContextID ctx = pkt->req->contextId();
const size_t data_sz = pkt->getSize();
uint32_t pkt_data M5_VAR_USED;
M5_VAR_USED uint32_t pkt_data;
switch (data_sz)
{
case 1:

View File

@@ -97,7 +97,7 @@ class SMMUTranslationProcess : public SMMUProcess
TranslContext context;
Tick recvTick;
Tick M5_CLASS_VAR_USED faultTick;
M5_CLASS_VAR_USED Tick faultTick;
virtual void main(Yield &yield);

View File

@@ -126,7 +126,7 @@ HSAPacketProcessor::write(Packet *pkt)
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
// TODO: How to get pid??
Addr M5_VAR_USED daddr = pkt->getAddr() - pioAddr;
M5_VAR_USED Addr daddr = pkt->getAddr() - pioAddr;
DPRINTF(HSAPacketProcessor,
"%s: write of size %d to reg-offset %d (0x%x)\n",
@@ -256,7 +256,7 @@ void
HSAPacketProcessor::CmdQueueCmdDmaEvent::process()
{
uint32_t rl_idx = series_ctx->rl_idx;
AQLRingBuffer *aqlRingBuffer M5_VAR_USED =
M5_VAR_USED AQLRingBuffer *aqlRingBuffer =
hsaPP->regdQList[rl_idx]->qCntxt.aqlBuf;
HSAQueueDescriptor* qDesc =
hsaPP->regdQList[rl_idx]->qCntxt.qDesc;
@@ -590,7 +590,7 @@ HSAPacketProcessor::getCommandsFromHost(int pid, uint32_t rl_idx)
void
HSAPacketProcessor::displayQueueDescriptor(int pid, uint32_t rl_idx)
{
HSAQueueDescriptor* M5_VAR_USED qDesc = regdQList[rl_idx]->qCntxt.qDesc;
M5_VAR_USED HSAQueueDescriptor* qDesc = regdQList[rl_idx]->qCntxt.qDesc;
DPRINTF(HSAPacketProcessor,
"%s: pid[%d], basePointer[0x%lx], dBPointer[0x%lx], "
"writeIndex[0x%x], readIndex[0x%x], size(bytes)[0x%x]\n",

View File

@@ -118,7 +118,7 @@ HWScheduler::registerNewQueue(uint64_t hostReadIndexPointer,
// Check if this newly created queue can be directly mapped
// to registered queue list
bool M5_VAR_USED register_q = mapQIfSlotAvlbl(queue_id, aql_buf, q_desc);
M5_VAR_USED bool register_q = mapQIfSlotAvlbl(queue_id, aql_buf, q_desc);
schedWakeup();
DPRINTF(HSAPacketProcessor,
"%s: offset = %p, qID = %d, is_regd = %s, AL size %d\n",

View File

@@ -233,7 +233,7 @@ Device::read(PacketPtr pkt)
prepareRead(cpu, index);
uint64_t value M5_VAR_USED = 0;
M5_VAR_USED uint64_t value = 0;
if (pkt->getSize() == 4) {
uint32_t reg = regData32(raddr);
pkt->setLE(reg);

View File

@@ -253,7 +253,7 @@ TCPIface::connect()
TCPIface::~TCPIface()
{
int M5_VAR_USED ret;
M5_VAR_USED int ret;
ret = close(sock);
assert(ret == 0);

View File

@@ -305,19 +305,19 @@ CopyEngine::write(PacketPtr pkt)
///
if (size == sizeof(uint64_t)) {
uint64_t val M5_VAR_USED = pkt->getLE<uint64_t>();
M5_VAR_USED uint64_t val = pkt->getLE<uint64_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else if (size == sizeof(uint32_t)) {
uint32_t val M5_VAR_USED = pkt->getLE<uint32_t>();
M5_VAR_USED uint32_t val = pkt->getLE<uint32_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else if (size == sizeof(uint16_t)) {
uint16_t val M5_VAR_USED = pkt->getLE<uint16_t>();
M5_VAR_USED uint16_t val = pkt->getLE<uint16_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else if (size == sizeof(uint8_t)) {
uint8_t val M5_VAR_USED = pkt->getLE<uint8_t>();
M5_VAR_USED uint8_t val = pkt->getLE<uint8_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else {

View File

@@ -451,10 +451,10 @@ public:
typedef uint16_t Flags;
typedef uint16_t Index;
struct Header {
struct M5_ATTR_PACKED Header {
Flags flags;
Index index;
} M5_ATTR_PACKED;
};
VirtRing<T>(PortProxy &proxy, ByteOrder bo, uint16_t size) :
header{0, 0}, ring(size), _proxy(proxy), _base(0), byteOrder(bo)

View File

@@ -81,9 +81,9 @@ class VirtIOBlock : public VirtIODeviceBase
* @note This needs to be changed if the supported feature set
* changes!
*/
struct Config {
struct M5_ATTR_PACKED Config {
uint64_t capacity;
} M5_ATTR_PACKED;
};
Config config;
/** @{
@@ -122,11 +122,11 @@ class VirtIOBlock : public VirtIODeviceBase
/** @} */
/** VirtIO block device request as sent by guest */
struct BlkRequest {
struct M5_ATTR_PACKED BlkRequest {
RequestType type;
uint32_t reserved;
uint64_t sector;
} M5_ATTR_PACKED;
};
/**
* Device read request.

View File

@@ -77,10 +77,10 @@ class VirtIOConsole : public VirtIODeviceBase
* @note This needs to be changed if the multiport feature is
* announced!
*/
struct Config {
struct M5_ATTR_PACKED Config {
uint16_t cols;
uint16_t rows;
} M5_ATTR_PACKED;
};
/** Currently active configuration (host byte order) */
Config config;

View File

@@ -50,14 +50,14 @@ struct VirtIO9PBaseParams;
typedef uint8_t P9MsgType;
typedef uint16_t P9Tag;
struct P9MsgHeader {
struct M5_ATTR_PACKED P9MsgHeader {
/** Length including header */
uint32_t len;
/** Message type */
P9MsgType type;
/** Message tag */
P9Tag tag;
} M5_ATTR_PACKED;
};
/** Convert p9 byte order (LE) to host byte order */
template <typename T> inline T
@@ -120,10 +120,10 @@ class VirtIO9PBase : public VirtIODeviceBase
* @note The fields in this structure depend on the features
* exposed to the guest.
*/
struct Config {
struct M5_ATTR_PACKED Config {
uint16_t len;
char tag[];
} M5_ATTR_PACKED;
};
/** Currently active configuration (host byte order) */
std::unique_ptr<Config> config;

View File

@@ -65,7 +65,7 @@ PciVirtIO::~PciVirtIO()
Tick
PciVirtIO::read(PacketPtr pkt)
{
const unsigned M5_VAR_USED size(pkt->getSize());
M5_VAR_USED const unsigned size(pkt->getSize());
int bar;
Addr offset;
if (!getBAR(pkt->getAddr(), bar, offset))
@@ -146,7 +146,7 @@ PciVirtIO::read(PacketPtr pkt)
Tick
PciVirtIO::write(PacketPtr pkt)
{
const unsigned M5_VAR_USED size(pkt->getSize());
M5_VAR_USED const unsigned size(pkt->getSize());
int bar;
Addr offset;
if (!getBAR(pkt->getAddr(), bar, offset))

View File

@@ -350,7 +350,7 @@ ComputeUnit::startWavefront(Wavefront *w, int waveId, LdsChunk *ldsChunk,
// set the wavefront context to have a pointer to this section of the LDS
w->ldsChunk = ldsChunk;
int32_t refCount M5_VAR_USED =
M5_VAR_USED int32_t refCount =
lds.increaseRefCounter(w->dispatchId, w->wgId);
DPRINTF(GPUDisp, "CU%d: increase ref ctr wg[%d] to [%d]\n",
cu_id, w->wgId, refCount);
@@ -867,7 +867,7 @@ ComputeUnit::DataPort::recvTimingResp(PacketPtr pkt)
// this is for writeComplete callback
// we simply get decrement write-related wait counters
assert(gpuDynInst);
Wavefront *w M5_VAR_USED =
M5_VAR_USED Wavefront *w =
computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId];
assert(w);
DPRINTF(GPUExec, "WriteCompleteResp: WF[%d][%d] WV%d %s decrementing "
@@ -965,7 +965,7 @@ ComputeUnit::DataPort::recvReqRetry()
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front().first;
GPUDynInstPtr gpuDynInst M5_VAR_USED = retries.front().second;
M5_VAR_USED GPUDynInstPtr gpuDynInst = retries.front().second;
DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n",
computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
pkt->req->getPaddr());
@@ -999,7 +999,7 @@ ComputeUnit::SQCPort::recvReqRetry()
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front().first;
Wavefront *wavefront M5_VAR_USED = retries.front().second;
M5_VAR_USED Wavefront *wavefront = retries.front().second;
DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n",
computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId,
pkt->req->getPaddr());
@@ -1406,7 +1406,7 @@ ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt)
DTLBPort::SenderState *sender_state =
safe_cast<DTLBPort::SenderState*>(translation_state->saved);
Wavefront *w M5_VAR_USED =
M5_VAR_USED Wavefront *w =
computeUnit->wfList[sender_state->_gpuDynInst->simdId]
[sender_state->_gpuDynInst->wfSlotId];
@@ -1575,7 +1575,7 @@ ComputeUnit::DataPort::processMemReqEvent(PacketPtr pkt)
{
SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
ComputeUnit *compute_unit M5_VAR_USED = computeUnit;
M5_VAR_USED ComputeUnit *compute_unit = computeUnit;
if (!(sendTimingReq(pkt))) {
retries.push_back(std::make_pair(pkt, gpuDynInst));
@@ -1604,7 +1604,7 @@ ComputeUnit::ScalarDataPort::MemReqEvent::process()
{
SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
ComputeUnit *compute_unit M5_VAR_USED = scalarDataPort.computeUnit;
M5_VAR_USED ComputeUnit *compute_unit = scalarDataPort.computeUnit;
if (!(scalarDataPort.sendTimingReq(pkt))) {
scalarDataPort.retries.push_back(pkt);
@@ -1644,7 +1644,7 @@ ComputeUnit::DTLBPort::recvReqRetry()
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front();
Addr vaddr M5_VAR_USED = pkt->req->getVaddr();
M5_VAR_USED Addr vaddr = pkt->req->getVaddr();
DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr);
if (!sendTimingReq(pkt)) {
@@ -1683,7 +1683,7 @@ ComputeUnit::ScalarDTLBPort::recvTimingResp(PacketPtr pkt)
GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
delete pkt->senderState;
Wavefront *w M5_VAR_USED = gpuDynInst->wavefront();
M5_VAR_USED Wavefront *w = gpuDynInst->wavefront();
DPRINTF(GPUTLB, "CU%d: WF[%d][%d][wv=%d]: scalar DTLB port received "
"translation: PA %#x -> %#x\n", computeUnit->cu_id, w->simdId,
@@ -1722,7 +1722,7 @@ ComputeUnit::ScalarDTLBPort::recvTimingResp(PacketPtr pkt)
bool
ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt)
{
Addr line M5_VAR_USED = pkt->req->getPaddr();
M5_VAR_USED Addr line = pkt->req->getPaddr();
DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n",
computeUnit->cu_id, pkt->req->getVaddr(), line);
@@ -1788,7 +1788,7 @@ ComputeUnit::ITLBPort::recvReqRetry()
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front();
Addr vaddr M5_VAR_USED = pkt->req->getVaddr();
M5_VAR_USED Addr vaddr = pkt->req->getVaddr();
DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr);
if (!sendTimingReq(pkt)) {
@@ -2584,7 +2584,7 @@ ComputeUnit::LDSPort::sendTimingReq(PacketPtr pkt)
dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState);
fatal_if(!sender_state, "packet without a valid sender state");
GPUDynInstPtr gpuDynInst M5_VAR_USED = sender_state->getMemInst();
M5_VAR_USED GPUDynInstPtr gpuDynInst = sender_state->getMemInst();
if (isStalled()) {
fatal_if(retries.empty(), "must have retries waiting to be stalled");

View File

@@ -749,7 +749,7 @@ ScheduleStage::reserveResources()
// that we've reserved a global and local memory unit. Thus,
// we need to mark the latter execution unit as not available.
if (execUnitIds.size() > 1) {
int lm_exec_unit M5_VAR_USED = wf->localMem;
M5_VAR_USED int lm_exec_unit = wf->localMem;
assert(toExecute.dispatchStatus(lm_exec_unit)
== SKIP);
}
@@ -758,7 +758,7 @@ ScheduleStage::reserveResources()
// Verify the GM pipe for this wave is ready to execute
// and the wave in the GM pipe is the same as the wave
// in the LM pipe
int gm_exec_unit M5_VAR_USED = wf->globalMem;
M5_VAR_USED int gm_exec_unit = wf->globalMem;
assert(wf->wfDynId == toExecute
.readyInst(gm_exec_unit)->wfDynId);
assert(toExecute.dispatchStatus(gm_exec_unit)

View File

@@ -43,14 +43,14 @@
#include "sim/byteswap.hh"
#include "sim/system.hh"
struct DmesgEntry {
struct M5_ATTR_PACKED DmesgEntry {
uint64_t ts_nsec;
uint16_t len;
uint16_t text_len;
uint16_t dict_len;
uint8_t facility;
uint8_t flags;
} M5_ATTR_PACKED;
};
static int
dumpDmesgEntry(const uint8_t *base, const uint8_t *end, const ByteOrder bo,

View File

@@ -73,7 +73,7 @@ Linux::openSpecialFile(std::string path, Process *process,
if (matched) {
FILE *f = tmpfile();
int fd = fileno(f);
size_t ret M5_VAR_USED = fwrite(data.c_str(), 1, data.size(), f);
M5_VAR_USED size_t ret = fwrite(data.c_str(), 1, data.size(), f);
assert(ret == data.size());
rewind(f);
return fd;

View File

@@ -35,7 +35,7 @@
void
SkipFuncBase::process(ThreadContext *tc)
{
TheISA::PCState oldPC M5_VAR_USED = tc->pcState();
M5_VAR_USED TheISA::PCState oldPC = tc->pcState();
returnFromFuncIn(tc);

View File

@@ -230,7 +230,7 @@ SimpleCache::handleResponse(PacketPtr pkt)
DPRINTF(SimpleCache, "Copying data from new packet to old\n");
// We had to upgrade a previous packet. We can functionally deal with
// the cache access now. It better be a hit.
bool hit M5_VAR_USED = accessFunctional(originalPacket);
M5_VAR_USED bool hit = accessFunctional(originalPacket);
panic_if(!hit, "Should always hit after inserting");
originalPacket->makeResponse();
delete pkt; // We may need to delay this, I'm not sure.

View File

@@ -855,7 +855,7 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
// the bigger block
// Get previous compressed size
const std::size_t M5_VAR_USED prev_size = compression_blk->getSizeBits();
M5_VAR_USED const std::size_t prev_size = compression_blk->getSizeBits();
// Check if new data is co-allocatable
const bool is_co_allocatable = superblock->isCompressed(compression_blk) &&
@@ -2320,7 +2320,7 @@ BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
if (cache->system->bypassCaches()) {
// Just forward the packet if caches are disabled.
// @todo This should really enqueue the packet rather
bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
M5_VAR_USED bool success = cache->memSidePort.sendTimingReq(pkt);
assert(success);
return true;
} else if (tryTiming(pkt)) {

View File

@@ -447,7 +447,7 @@ Cache::recvTimingReq(PacketPtr pkt)
// this express snoop travels towards the memory, and at
// every crossbar it is snooped upwards thus reaching
// every cache in the system
bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
M5_VAR_USED bool success = memSidePort.sendTimingReq(snoop_pkt);
// express snoops always succeed
assert(success);
@@ -992,7 +992,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
// responds in atomic mode, so remember a few things about the
// original packet up front
bool invalidate = pkt->isInvalidate();
bool M5_VAR_USED needs_writable = pkt->needsWritable();
M5_VAR_USED bool needs_writable = pkt->needsWritable();
// at the moment we could get an uncacheable write which does not
// have the invalidate flag, and we need a suitable way of dealing
@@ -1391,7 +1391,7 @@ Cache::sendMSHRQueuePacket(MSHR* mshr)
// prefetchSquash first may result in the MSHR being
// prematurely deallocated.
if (snoop_pkt.cacheResponding()) {
auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
M5_VAR_USED auto r = outstandingSnoop.insert(snoop_pkt.req);
assert(r.second);
// if we are getting a snoop response with no sharers it

View File

@@ -117,7 +117,7 @@ void
FALRU::invalidate(CacheBlk *blk)
{
// Erase block entry reference in the hash table
auto num_erased M5_VAR_USED =
M5_VAR_USED auto num_erased =
tagHash.erase(std::make_pair(blk->tag, blk->isSecure()));
// Sanity check; only one block reference should be erased

View File

@@ -638,7 +638,7 @@ CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID cpu_side_port_id)
*memSidePorts[dest_port_id]);
}
bool success M5_VAR_USED =
M5_VAR_USED bool success =
memSidePorts[dest_port_id]->sendTimingSnoopResp(pkt);
pktCount[cpu_side_port_id][dest_port_id]++;
pktSize[cpu_side_port_id][dest_port_id] += pkt_size;
@@ -858,7 +858,7 @@ CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
// if this is the destination of the operation, the xbar
// sends the responce to the cache clean operation only
// after having encountered the cache clean request
auto M5_VAR_USED ret = outstandingCMO.emplace(pkt->id, nullptr);
M5_VAR_USED auto ret = outstandingCMO.emplace(pkt->id, nullptr);
// in atomic mode we know that the WriteClean packet should
// precede the clean request
assert(ret.second);

View File

@@ -169,7 +169,7 @@ DRAMSim2Wrapper::canAccept() const
void
DRAMSim2Wrapper::enqueue(bool is_write, uint64_t addr)
{
bool success M5_VAR_USED = dramsim->addTransaction(is_write, addr);
M5_VAR_USED bool success = dramsim->addTransaction(is_write, addr);
assert(success);
}

View File

@@ -123,7 +123,7 @@ DRAMsim3Wrapper::canAccept(uint64_t addr, bool is_write) const
void
DRAMsim3Wrapper::enqueue(uint64_t addr, bool is_write)
{
bool success M5_VAR_USED = dramsim->AddTransaction(addr, is_write);
M5_VAR_USED bool success = dramsim->AddTransaction(addr, is_write);
assert(success);
}

View File

@@ -97,7 +97,7 @@ Tick
StubSlavePort::recvAtomic(PacketPtr packet)
{
if (DTRACE(ExternalPort)) {
unsigned int M5_VAR_USED size = packet->getSize();
M5_VAR_USED unsigned int size = packet->getSize();
DPRINTF(ExternalPort, "StubSlavePort: recvAtomic a: 0x%x size: %d"
" data: ...\n", packet->getAddr(), size);

View File

@@ -140,7 +140,7 @@ class MemInterface : public AbstractMemory
/**
* General timing requirements
*/
const Tick M5_CLASS_VAR_USED tCK;
M5_CLASS_VAR_USED const Tick tCK;
const Tick tCS;
const Tick tBURST;
const Tick tRTW;

View File

@@ -78,7 +78,7 @@ EmulationPageTable::remap(Addr vaddr, int64_t size, Addr new_vaddr)
new_vaddr, size);
while (size > 0) {
auto new_it M5_VAR_USED = pTable.find(new_vaddr);
M5_VAR_USED auto new_it = pTable.find(new_vaddr);
auto old_it = pTable.find(vaddr);
assert(old_it != pTable.end() && new_it == pTable.end());

View File

@@ -130,7 +130,7 @@ GarnetNetwork::init()
for (vector<Router*>::const_iterator i= m_routers.begin();
i != m_routers.end(); ++i) {
Router* router = safe_cast<Router*>(*i);
int router_id M5_VAR_USED =
M5_VAR_USED int router_id =
fault_model->declare_router(router->get_num_inports(),
router->get_num_outports(),
router->get_vc_per_vnet(),

View File

@@ -99,7 +99,7 @@ class OutputUnit : public Consumer
private:
Router *m_router;
int M5_CLASS_VAR_USED m_id;
M5_CLASS_VAR_USED int m_id;
PortDirection m_direction;
int m_vc_per_vnet;
NetworkLink *m_out_link;

View File

@@ -201,7 +201,7 @@ RoutingUnit::outportComputeXY(RouteInfo route,
{
PortDirection outport_dirn = "Unknown";
int M5_VAR_USED num_rows = m_router->get_net_ptr()->getNumRows();
M5_VAR_USED int num_rows = m_router->get_net_ptr()->getNumRows();
int num_cols = m_router->get_net_ptr()->getNumCols();
assert(num_rows > 0 && num_cols > 0);

View File

@@ -414,7 +414,7 @@ void
CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
{
uint64_t warmedUpBlocks = 0;
uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
M5_VAR_USED uint64_t totalBlocks = (uint64_t)m_cache_num_sets *
(uint64_t)m_cache_assoc;
for (int i = 0; i < m_cache_num_sets; i++) {

View File

@@ -150,7 +150,7 @@ template<class ENTRY>
inline void
PerfectCacheMemory<ENTRY>::deallocate(Addr address)
{
auto num_erased M5_VAR_USED = m_map.erase(makeLineAddress(address));
M5_VAR_USED auto num_erased = m_map.erase(makeLineAddress(address));
assert(num_erased == 1);
}

View File

@@ -460,7 +460,7 @@ GPUCoalescer::hitCallback(CoalescedRequest* crequest,
{
PacketPtr pkt = crequest->getFirstPkt();
Addr request_address = pkt->getAddr();
Addr request_line_address M5_VAR_USED = makeLineAddress(request_address);
M5_VAR_USED Addr request_line_address = makeLineAddress(request_address);
RubyRequestType type = crequest->getRubyType();

View File

@@ -204,7 +204,7 @@ RubyPort::PioResponsePort::recvTimingReq(PacketPtr pkt)
if (it->contains(pkt->getAddr())) {
// generally it is not safe to assume success here as
// the port could be blocked
bool M5_VAR_USED success =
M5_VAR_USED bool success =
ruby_port->request_ports[i]->sendTimingReq(pkt);
assert(success);
return true;
@@ -371,7 +371,7 @@ RubyPort::MemResponsePort::recvFunctional(PacketPtr pkt)
{
DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
RubyPort *rp M5_VAR_USED = static_cast<RubyPort *>(&owner);
M5_VAR_USED RubyPort *rp = static_cast<RubyPort *>(&owner);
RubySystem *rs = rp->m_ruby_system;
// Check for pio requests and directly send them to the dedicated
@@ -597,7 +597,7 @@ RubyPort::PioResponsePort::getAddrRanges() const
ranges.splice(ranges.begin(),
ruby_port->request_ports[i]->getAddrRanges());
}
for (const auto M5_VAR_USED &r : ranges)
for (M5_VAR_USED const auto &r : ranges)
DPRINTF(RubyPort, "%s\n", r.to_string());
return ranges;
}

View File

@@ -602,7 +602,7 @@ RubySystem::functionalWrite(PacketPtr pkt)
DPRINTF(RubySystem, "Functional Write request for %#x\n", addr);
uint32_t M5_VAR_USED num_functional_writes = 0;
M5_VAR_USED uint32_t num_functional_writes = 0;
// Only send functional requests within the same network.
assert(requestorToNetwork.count(pkt->requestorId()));

View File

@@ -61,7 +61,7 @@ class PeekStatementAST(StatementAST):
code('''
{
// Declare message
const $mtid* in_msg_ptr M5_VAR_USED;
M5_VAR_USED const $mtid* in_msg_ptr;
in_msg_ptr = dynamic_cast<const $mtid *>(($qcode).${{self.method}}());
if (in_msg_ptr == NULL) {
// If the cast fails, this is the wrong inport (wrong message type).

View File

@@ -605,7 +605,7 @@ void
$c_ident::initNetQueues()
{
MachineType machine_type = string_to_MachineType("${{self.ident}}");
int base M5_VAR_USED = MachineType_base_number(machine_type);
M5_VAR_USED int base = MachineType_base_number(machine_type);
''')
code.indent()

View File

@@ -151,7 +151,7 @@ class ProbeManager
{
private:
/** Required for sensible debug messages.*/
const M5_CLASS_VAR_USED SimObject *object;
M5_CLASS_VAR_USED const SimObject *object;
/** Vector for name look-up. */
std::vector<ProbePoint *> points;

View File

@@ -183,7 +183,7 @@ System::Threads::quiesce(ContextID id)
{
auto &t = thread(id);
# if THE_ISA != NULL_ISA
BaseCPU M5_VAR_USED *cpu = t.context->getCpuPtr();
M5_VAR_USED BaseCPU *cpu = t.context->getCpuPtr();
DPRINTFS(Quiesce, cpu, "quiesce()\n");
# endif
t.quiesce();
@@ -250,7 +250,7 @@ System::System(Params *p)
warn_once("Cache line size is neither 16, 32, 64 nor 128 bytes.\n");
// Get the generic system requestor IDs
RequestorID tmp_id M5_VAR_USED;
M5_VAR_USED RequestorID tmp_id;
tmp_id = getRequestorId(this, "writebacks");
assert(tmp_id == Request::wbRequestorId);
tmp_id = getRequestorId(this, "functional");