saved_cpsr.v = tc->readCCReg(CCREG_V);
saved_cpsr.ge = tc->readCCReg(CCREG_GE);
- Addr curPc M5_VAR_USED = tc->pcState().pc();
+ M5_VAR_USED Addr curPc = tc->pcState().pc();
ITSTATE it = tc->pcState().itstate();
saved_cpsr.it2 = it.top6;
saved_cpsr.it1 = it.bottom2;
// if we have a valid instruction then use it to annotate this fault with
// extra information. This is used to generate the correct fault syndrome
// information
- ArmStaticInst *arm_inst M5_VAR_USED = instrAnnotate(inst);
+ M5_VAR_USED ArmStaticInst *arm_inst = instrAnnotate(inst);
// Ensure Secure state if initially in Monitor mode
if (have_security && saved_cpsr.mode == MODE_MON) {
// If we have a valid instruction then use it to annotate this fault with
// extra information. This is used to generate the correct fault syndrome
// information
- ArmStaticInst *arm_inst M5_VAR_USED = instrAnnotate(inst);
+ M5_VAR_USED ArmStaticInst *arm_inst = instrAnnotate(inst);
// Set PC to start of exception handler
Addr new_pc = purifyTaggedAddr(vec_address, tc, toEL, true);
Addr base;
// Check for invalid modes
- CPSR M5_VAR_USED cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
+ M5_VAR_USED CPSR cpsr = tc->readMiscRegNoEffect(MISCREG_CPSR);
assert(ArmSystem::haveSecurity(tc) || cpsr.mode != MODE_MON);
assert(ArmSystem::haveVirtualization(tc) || cpsr.mode != MODE_HYP);
// See ARM ARM B3-1416
bool override_LPAE = false;
TTBCR ttbcr_s = tc->readMiscReg(MISCREG_TTBCR_S);
- TTBCR M5_VAR_USED ttbcr_ns = tc->readMiscReg(MISCREG_TTBCR_NS);
+ M5_VAR_USED TTBCR ttbcr_ns = tc->readMiscReg(MISCREG_TTBCR_NS);
if (ttbcr_s.eae) {
override_LPAE = true;
} else {
unsigned eBytes = (1 << size);
unsigned loadSize = eBytes * elems;
- unsigned loadRegs M5_VAR_USED =
+ M5_VAR_USED unsigned loadRegs =
(loadSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
assert(loadRegs > 0 && loadRegs <= 4);
unsigned eBytes = (1 << size);
unsigned storeSize = eBytes * elems;
- unsigned storeRegs M5_VAR_USED =
+ M5_VAR_USED unsigned storeRegs =
(storeSize + sizeof(uint32_t) - 1) / sizeof(uint32_t);
assert(storeRegs > 0 && storeRegs <= 4);
"logic": '0'
}
- immOp2 = "uint64_t secOp M5_VAR_USED = imm;"
- sRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
+ immOp2 = "M5_VAR_USED uint64_t secOp = imm;"
+ sRegOp2 = "M5_VAR_USED uint64_t secOp = " + \
"shiftReg64(Op264, shiftAmt, shiftType, intWidth);"
- eRegOp2 = "uint64_t secOp M5_VAR_USED = " + \
+ eRegOp2 = "M5_VAR_USED uint64_t secOp = " + \
"extendReg64(Op264, extendType, shiftAmt, intWidth);"
def buildDataWork(mnem, code, flagType, suffix, buildCc, buildNonCc,
base, templateBase):
code = '''
- uint64_t resTemp M5_VAR_USED = 0;
+ M5_VAR_USED uint64_t resTemp = 0;
''' + code
ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
Name = mnem.capitalize() + suffix
def condCompCode(flagType, op, imm):
ccCode = createCcCode64(carryCode64[flagType], overflowCode64[flagType])
- opDecl = "uint64_t secOp M5_VAR_USED = imm;"
+ opDecl = "M5_VAR_USED uint64_t secOp = imm;"
if not imm:
- opDecl = "uint64_t secOp M5_VAR_USED = Op264;"
+ opDecl = "M5_VAR_USED uint64_t secOp = Op264;"
return opDecl + '''
if (testPredicate(CondCodesNZ, CondCodesC, CondCodesV, condCode)) {
uint64_t resTemp = Op164 ''' + op + ''' secOp;
exec_output = ""
singleSimpleCode = vfpEnabledCheckCode + '''
- FPSCR fpscr M5_VAR_USED = (FPSCR) FpscrExc;
+ M5_VAR_USED FPSCR fpscr = (FPSCR) FpscrExc;
FpDest = %(op)s;
'''
singleCode = singleSimpleCode + '''
"%(func)s, fpscr.fz, fpscr.dn, fpscr.rMode)"
singleUnaryOp = "unaryOp(fpscr, FpOp1, %(func)s, fpscr.fz, fpscr.rMode)"
doubleCode = vfpEnabledCheckCode + '''
- FPSCR fpscr M5_VAR_USED = (FPSCR) FpscrExc;
+ M5_VAR_USED FPSCR fpscr = (FPSCR) FpscrExc;
double dest = %(op)s;
FpDestP0_uw = dblLow(dest);
FpDestP1_uw = dblHi(dest);
accEpilogCode = None
# Code that actually handles the access
if self.flavor in ("dprefetch", "iprefetch", "mprefetch"):
- accCode = 'uint64_t temp M5_VAR_USED = Mem%s;'
+ accCode = 'M5_VAR_USED uint64_t temp = Mem%s;'
elif self.flavor == "fp":
accEpilogCode = '''
ArmISA::ISA::zeroSveVecRegUpperPart(AA64FpDest,
bitMask = (bitMask >> imm1) | (bitMask << (intWidth - imm1));
diff += intWidth;
}
- uint64_t topBits M5_VAR_USED = ~mask(diff+1);
+ M5_VAR_USED uint64_t topBits = ~mask(diff+1);
uint64_t result = imm1 == 0 ? Op164 :
(Op164 >> imm1) | (Op164 << (intWidth - imm1));
result &= bitMask;
CondCodesC = !destPred.lastActive(GpOp, eCount);
CondCodesV = 0;'''
extraPrologCode = '''
- auto& destPred M5_VAR_USED = PDest;'''
+ M5_VAR_USED auto& destPred = PDest;'''
baseClass = ('SvePredUnaryWImplicitSrcOp' if predType == PredType.NONE
else 'SvePredUnaryWImplicitSrcPredOp')
iop = InstObjParams(name, 'Sve' + Name, baseClass,
global header_output, exec_output, decoders
code = sveEnabledCheckCode + op
extraPrologCode = '''
- auto& destPred M5_VAR_USED = Ffr;'''
+ M5_VAR_USED auto& destPred = Ffr;'''
baseClass = ('SveWImplicitSrcDstOp' if isSetFfr
else 'SvePredUnaryWImplicitDstOp')
iop = InstObjParams(name, 'Sve' + Name, baseClass,
(IntRegIndex)_index)
{
%(constructor)s;
- bool conditional M5_VAR_USED = false;
+ M5_VAR_USED bool conditional = false;
if (!(condCode == COND_AL || condCode == COND_UC)) {
conditional = true;
for (int x = 0; x < _numDestRegs; x++) {
(IntRegIndex)_dest, (IntRegIndex)_base, _add, _imm)
{
%(constructor)s;
- bool conditional M5_VAR_USED = false;
+ M5_VAR_USED bool conditional = false;
if (!(condCode == COND_AL || condCode == COND_UC)) {
conditional = true;
for (int x = 0; x < _numDestRegs; x++) {
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt,
ExecContext *xc, Trace::InstRecord *traceData) const
{
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
%(op_src_decl)s;
%(op_rd)s;
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<RegElemType>(
xc->tcBase());
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
%(op_src_decl)s;
%(op_rd)s;
Fault %(class_name)s%(tpl_args)s::completeAcc(PacketPtr pkt,
ExecContext *xc, Trace::InstRecord *traceData) const
{
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
Fault %(class_name)s%(tpl_args)s::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
%(op_decl)s;
%(op_rd)s;
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());
{
Addr EA;
Fault fault = NoFault;
- bool aarch64 M5_VAR_USED = true;
+ M5_VAR_USED bool aarch64 = true;
unsigned eCount = ArmStaticInst::getCurSveVecLen<Element>(
xc->tcBase());
{
using BaseGdbRegCache::BaseGdbRegCache;
private:
- struct {
+ struct M5_ATTR_PACKED {
uint32_t gpr[16];
uint32_t cpsr;
uint64_t fpr[32];
uint32_t fpscr;
- } M5_ATTR_PACKED r;
+ } r;
public:
char *data() const { return (char *)&r; }
size_t size() const { return sizeof(r); }
{
using BaseGdbRegCache::BaseGdbRegCache;
private:
- struct {
+ struct M5_ATTR_PACKED {
uint64_t x[31];
uint64_t spx;
uint64_t pc;
VecElem v[NumVecV8ArchRegs * NumVecElemPerNeonVecReg];
uint32_t fpsr;
uint32_t fpcr;
- } M5_ATTR_PACKED r;
+ } r;
public:
char *data() const { return (char *)&r; }
size_t size() const { return sizeof(r); }
// Cache clean operations require read permissions to the specified VA
bool is_write = !req->isCacheClean() && mode == Write;
bool is_atomic = req->isAtomic();
- bool is_priv M5_VAR_USED = isPriv && !(flags & UserMode);
+ M5_VAR_USED bool is_priv = isPriv && !(flags & UserMode);
updateMiscReg(tc, curTranType);
if operands.predRead:
myDict['op_decl'] += 'uint8_t _sourceIndex = 0;\n'
if operands.predWrite:
- myDict['op_decl'] += 'uint8_t M5_VAR_USED _destIndex = 0;\n'
+ myDict['op_decl'] += 'M5_VAR_USED uint8_t _destIndex = 0;\n'
is_src = lambda op: op.is_src
is_dest = lambda op: op.is_dest
{
assert(checkInterrupts());
- StatusReg M5_VAR_USED status = tc->readMiscRegNoEffect(MISCREG_STATUS);
- CauseReg M5_VAR_USED cause = tc->readMiscRegNoEffect(MISCREG_CAUSE);
+ M5_VAR_USED StatusReg status = tc->readMiscRegNoEffect(MISCREG_STATUS);
+ M5_VAR_USED CauseReg cause = tc->readMiscRegNoEffect(MISCREG_CAUSE);
DPRINTF(Interrupt, "Interrupt! IM[7:0]=%d IP[7:0]=%d \n",
(unsigned)status.im, (unsigned)cause.ip);
Fault %(class_name)s::execute(ExecContext *xc,
Trace::InstRecord *traceData) const
{
- Addr EA M5_VAR_USED = 0;
+ M5_VAR_USED Addr EA = 0;
Fault fault = NoFault;
%(fp_enable_check)s;
ExecContext *xc, Trace::InstRecord *traceData) const
{
Fault fault = NoFault;
- int64_t data M5_VAR_USED;
+ M5_VAR_USED int64_t data;
%(op_decl)s;
%(op_rd)s;
ExecContext *xc,
Trace::InstRecord *traceData) const
{
- Addr M5_VAR_USED EA;
+ M5_VAR_USED Addr EA;
Fault fault = NoFault;
%(op_decl)s;
namespace RiscvISA
{
-const std::array<const char *, NumMiscRegs> M5_VAR_USED MiscRegNames = {{
+M5_VAR_USED const std::array<const char *, NumMiscRegs> MiscRegNames = {{
[MISCREG_PRV] = "PRV",
[MISCREG_ISA] = "ISA",
[MISCREG_VENDORID] = "VENDORID",
// Constructor.
%(class_name)s(ExtMachInst machInst);
Fault execute(ExecContext *, Trace::InstRecord *) const override;
- Fault doFpOp(ExecContext *, Trace::InstRecord *) const M5_NO_INLINE;
+ M5_NO_INLINE Fault doFpOp(ExecContext *, Trace::InstRecord *) const;
};
}};
%(op_decl)s;
%(op_rd)s;
- RegVal result M5_VAR_USED;
+ M5_VAR_USED RegVal result;
if(%(cond_check)s)
{
%(op_decl)s;
%(op_rd)s;
- RegVal result M5_VAR_USED;
+ M5_VAR_USED RegVal result;
if(%(cond_check)s)
{
*/
namespace X86ISA
{
- const Request::FlagsType M5_VAR_USED SegmentFlagMask = mask(4);
+ M5_VAR_USED const Request::FlagsType SegmentFlagMask = mask(4);
const int FlagShift = 4;
enum FlagBit {
CPL0FlagBit = 1,
void write(std::ostream &bmp) const override;
private:
- struct FileHeader {
+ struct M5_ATTR_PACKED FileHeader {
unsigned char magic_number[2];
uint32_t size;
uint16_t reserved1;
uint16_t reserved2;
uint32_t offset;
- } M5_ATTR_PACKED;
+ };
- struct InfoHeaderV1 { /* Aka DIB header */
+ struct M5_ATTR_PACKED InfoHeaderV1 { /* Aka DIB header */
uint32_t Size;
uint32_t Width;
uint32_t Height;
uint32_t YPelsPerMeter;
uint32_t ClrUsed;
uint32_t ClrImportant;
- } M5_ATTR_PACKED;
+ };
- struct CompleteV1Header {
+ struct M5_ATTR_PACKED CompleteV1Header {
FileHeader file;
InfoHeaderV1 info;
- } M5_ATTR_PACKED;
+ };
- struct BmpPixel32 {
+ struct M5_ATTR_PACKED BmpPixel32 {
BmpPixel32 &operator=(const Pixel &rhs) {
red = rhs.red;
green = rhs.green;
uint8_t green;
uint8_t red;
uint8_t padding;
- } M5_ATTR_PACKED;
+ };
typedef BmpPixel32 PixelType;
// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
-#if defined(__GNUC__) // clang or gcc
-# define M5_VAR_USED __attribute__((unused))
-# define M5_ATTR_PACKED __attribute__ ((__packed__))
-# define M5_NO_INLINE __attribute__ ((__noinline__))
+
+/*
+ * Attributes that become standard in later versions of c++.
+ */
+
+// Use M5_FALLTHROUGH to mark when you're intentionally falling through from
+// one case to another in a switch statement.
+#if __has_cpp_attribute(fallthrough) // Standard in c++17.
+# define M5_FALLTHROUGH [[fallthrough]]
+#else
+// Not supported, so it's not necessary to avoid warnings.
+# define M5_FALLTHROUGH
+#endif
+
+// When the return value of a function should not be discarded, mark it with
+// M5_NODISCARD.
+#if __has_cpp_attribute(nodiscard) // Standard in c++17, with message in c++20.
+# define M5_NODISCARD [[nodiscard]]
+#else
+// Not supported, but it's optional so we can just omit it.
+# define M5_NODISCARD
+#endif
+
+// When a variable may purposefully not be used, for instance if it's only used
+// in debug statements which might be disabled, mark it with M5_VAR_USED.
+#if __has_cpp_attribute(maybe_unused) // Standard in c++17.
+# define M5_VAR_USED [[maybe_unused]]
+#elif defined(__GNUC__)
+// gcc and clang support a custom attribute which is essentially the same
+// thing.
+# define M5_VAR_USED [[gnu::unused]]
+#endif
+
+
+/*
+ * Compiler specific features.
+ */
+
+#if defined(__GNUC__) // clang or gcc.
+// Mark a structure as packed, so that no padding is added to its layout. This
+// padding might be added to, for instance, ensure certain fields have certain
+// alignment.
+# define M5_ATTR_PACKED [[gnu::packed]]
+
+// Prevent a function from being inlined.
+# define M5_NO_INLINE [[gnu::noinline]]
+
+// Set the visibility of a symbol.
+# define M5_PUBLIC [[gnu:visibility("default")]]
+# define M5_LOCAL [[gnu::visibility("hidden")]]
+
+// Marker for what should be an unreachable point in the code.
# define M5_UNREACHABLE __builtin_unreachable()
-# define M5_PUBLIC __attribute__ ((visibility ("default")))
-# define M5_LOCAL __attribute__ ((visibility ("hidden")))
+
+// To mark a branch condition as likely taken, wrap it's condition with
+// M5_LIKELY. To mark it as likely not taken, wrap it's condition with
+// M5_UNLIKELY. These can be replaced with the standard attributes [[likely]]
+// and [[unlikely]] in c++20, although the syntax is different enough that
+// we can't do that with direct substitution.
# define M5_LIKELY(cond) __builtin_expect(!!(cond), 1)
# define M5_UNLIKELY(cond) __builtin_expect(!!(cond), 0)
#endif
-#if defined(__clang__)
+// When a member variable may be unused, mark it with M5_CLASS_VAR_USED. This
+// needs to be limitted to clang only since clang warns on these unused
+// variables, and g++ will actually warn if you use this attribute since it
+// won't do anything there.
+#if defined(__clang__) // clang only.
# define M5_CLASS_VAR_USED M5_VAR_USED
#else
# define M5_CLASS_VAR_USED
#endif
-// This can be removed once all compilers support C++17
-#if defined __has_cpp_attribute
- // Note: We must separate this if statement because GCC < 5.0 doesn't
- // support the function-like syntax in #if statements.
- #if __has_cpp_attribute(fallthrough)
- #define M5_FALLTHROUGH [[fallthrough]]
- #else
- #define M5_FALLTHROUGH
- #endif
-
- #if __has_cpp_attribute(nodiscard)
- #define M5_NODISCARD [[nodiscard]]
- #else
- #define M5_NODISCARD
- #endif
-#else
- // Unsupported (and no warning) on GCC < 7.
- #define M5_FALLTHROUGH
-
- #define M5_NODISCARD
-#endif
-
#endif // __BASE_COMPILER_HH__
"No loadable segments in '%s'. ELF file corrupted?\n",
imageData->filename());
- for (auto M5_VAR_USED &seg: image.segments())
+ for (M5_VAR_USED auto &seg: image.segments())
DPRINTFR(Loader, "%s\n", seg);
// We will actually read the sections when we need to load them
void write(std::ostream &png) const override;
private:
/** Png Pixel type: not containing padding */
- struct PngPixel24 {
+ struct M5_ATTR_PACKED PngPixel24 {
PngPixel24 &operator=(const Pixel &rhs) {
red = rhs.red;
green = rhs.green;
uint8_t red;
uint8_t green;
uint8_t blue;
- } M5_ATTR_PACKED;
+ };
/**
* Handle to resources used by libpng:
for (auto &g : statGroups) {
if (DTRACE(Stats)) {
- const SimObject M5_VAR_USED *so =
+ M5_VAR_USED const SimObject *so =
dynamic_cast<const SimObject *>(this);
DPRINTF(Stats, "%s: regStats in group %s\n",
so ? so->name() : "?",
ClientCutText = 6
};
- struct PixelFormat {
+ struct M5_ATTR_PACKED PixelFormat {
uint8_t bpp;
uint8_t depth;
uint8_t bigendian;
uint8_t greenshift;
uint8_t blueshift;
uint8_t padding[3];
- } M5_ATTR_PACKED;
+ };
- struct PixelFormatMessage {
+ struct M5_ATTR_PACKED PixelFormatMessage {
uint8_t type;
uint8_t padding[3];
PixelFormat px;
- } M5_ATTR_PACKED;
+ };
- struct PixelEncodingsMessage {
+ struct M5_ATTR_PACKED PixelEncodingsMessage {
uint8_t type;
uint8_t padding;
uint16_t num_encodings;
- } M5_ATTR_PACKED;
+ };
- struct FrameBufferUpdateReq {
+ struct M5_ATTR_PACKED FrameBufferUpdateReq {
uint8_t type;
uint8_t incremental;
uint16_t x;
uint16_t y;
uint16_t width;
uint16_t height;
- } M5_ATTR_PACKED;
+ };
- struct KeyEventMessage {
+ struct M5_ATTR_PACKED KeyEventMessage {
uint8_t type;
uint8_t down_flag;
uint8_t padding[2];
uint32_t key;
- } M5_ATTR_PACKED;
+ };
- struct PointerEventMessage {
+ struct M5_ATTR_PACKED PointerEventMessage {
uint8_t type;
uint8_t button_mask;
uint16_t x;
uint16_t y;
- } M5_ATTR_PACKED;
+ };
- struct ClientCutTextMessage {
+ struct M5_ATTR_PACKED ClientCutTextMessage {
uint8_t type;
uint8_t padding[3];
uint32_t length;
- } M5_ATTR_PACKED;
+ };
typedef VncInputParams Params;
VncInput(const Params *p);
{
assert(curState == WaitForProtocolVersion);
- size_t len M5_VAR_USED;
+ M5_VAR_USED size_t len;
char version_string[13];
// Null terminate the message so it's easier to work with
NormalPhase
};
- struct ServerInitMsg {
+ struct M5_ATTR_PACKED ServerInitMsg {
uint16_t fbWidth;
uint16_t fbHeight;
PixelFormat px;
uint32_t namelen;
char name[2]; // just to put M5 in here
- } M5_ATTR_PACKED;
+ };
- struct FrameBufferUpdate {
+ struct M5_ATTR_PACKED FrameBufferUpdate {
uint8_t type;
uint8_t padding;
uint16_t num_rects;
- } M5_ATTR_PACKED;
+ };
- struct FrameBufferRect {
+ struct M5_ATTR_PACKED FrameBufferRect {
uint16_t x;
uint16_t y;
uint16_t width;
uint16_t height;
int32_t encoding;
- } M5_ATTR_PACKED;
+ };
- struct ServerCutText {
+ struct M5_ATTR_PACKED ServerCutText {
uint8_t type;
uint8_t padding[3];
uint32_t length;
- } M5_ATTR_PACKED;
+ };
/** @} */
// data) is used to indicate that a segment has been accessed.
#define SEG_TYPE_BIT_ACCESSED 1
-struct FXSave
+struct M5_ATTR_PACKED FXSave
{
uint16_t fcw;
uint16_t fsw;
uint8_t xmm[16][16];
uint64_t reserved[12];
-} M5_ATTR_PACKED;
+};
static_assert(sizeof(FXSave) == 512, "Unexpected size of FXSave");
Fetch1::minorTraceResponseLine(const std::string &name,
Fetch1::FetchRequestPtr response) const
{
- const RequestPtr &request M5_VAR_USED = response->request;
+ M5_VAR_USED const RequestPtr &request = response->request;
if (response->packet && response->packet->isError()) {
MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
SimpleThread &thread = *port.cpu.threads[inst->id.threadId];
TheISA::PCState old_pc = thread.pcState();
ExecContext context(port.cpu, thread, port.execute, inst);
- Fault M5_VAR_USED fault = inst->translationFault;
+ M5_VAR_USED Fault fault = inst->translationFault;
// Give the instruction a chance to suppress a translation fault
inst->translationFault = inst->staticInst->initiateAcc(&context, nullptr);
{
port.numAccessesInDTLB--;
- unsigned int M5_VAR_USED expected_fragment_index =
+ M5_VAR_USED unsigned int expected_fragment_index =
numTranslatedFragments;
numInTranslationFragments--;
for (unsigned int fragment_index = 0; fragment_index < numFragments;
fragment_index++)
{
- bool M5_VAR_USED is_last_fragment = false;
+ M5_VAR_USED bool is_last_fragment = false;
if (fragment_addr == base_addr) {
/* First fragment */
// will be active.
_nextStatus = Active;
- const DynInstPtr &inst M5_VAR_USED = rob->readHeadInst(tid);
+ M5_VAR_USED const DynInstPtr &inst = rob->readHeadInst(tid);
DPRINTF(Commit,"[tid:%i] Instruction [sn:%llu] PC %s is head of"
" ROB and ready to commit\n",
// This comming request can be either load, store or atomic.
// Atomic request has a corresponding pointer to its atomic memory
// operation
- bool isAtomic M5_VAR_USED = !isLoad && amo_op;
+ M5_VAR_USED bool isAtomic = !isLoad && amo_op;
ThreadID tid = cpu->contextToThread(inst->contextId());
auto cacheLineSize = cpu->cacheLineSize();
} else {
// Otherwise make the instruction dependent on the store/barrier.
DPRINTF(MemDepUnit, "Adding to dependency list\n");
- for (auto M5_VAR_USED producing_store : producing_stores)
+ for (M5_VAR_USED auto producing_store : producing_stores)
DPRINTF(MemDepUnit, "\tinst PC %s is dependent on [sn:%lli].\n",
inst->pcState(), producing_store);
std::vector<bool> regScoreBoard;
/** The number of actual physical registers */
- unsigned M5_CLASS_VAR_USED numPhysRegs;
+ M5_CLASS_VAR_USED unsigned numPhysRegs;
public:
/** Constructs a scoreboard.
{
// We shouldn't have any outstanding requests when we resume from
// a drained system.
- for (const auto& ph M5_VAR_USED : predHist)
+ for (M5_VAR_USED const auto& ph : predHist)
assert(ph.empty());
}
// hardware transactional memory
SimpleExecContext *t_info = threadInfo[curThread];
- const bool is_htm_speculative M5_VAR_USED =
+ M5_VAR_USED const bool is_htm_speculative =
t_info->inHtmTransactionalState();
// received a response from the dcache: complete the load or store
if (cmd < percentReads) {
// start by ensuring there is a reference value if we have not
// seen this address before
- uint8_t M5_VAR_USED ref_data = 0;
+ M5_VAR_USED uint8_t ref_data = 0;
auto ref = referenceData.find(req->getPaddr());
if (ref == referenceData.end()) {
referenceData[req->getPaddr()] = 0;
DPRINTF(TraceCPUData, "Printing readyList:\n");
while (itr != readyList.end()) {
auto graph_itr = depGraph.find(itr->seqNum);
- GraphNode* node_ptr M5_VAR_USED = graph_itr->second;
+ M5_VAR_USED GraphNode* node_ptr = graph_itr->second;
DPRINTFR(TraceCPUData, "\t%lld(%s), %lld\n", itr->seqNum,
node_ptr->typeToStr(), itr->execTick);
itr++;
// If it is not an rob dependency then it must be a register dependency
// If the register dependency is not found, it violates an assumption
// and must be caught by assert.
- bool regdep_found M5_VAR_USED = removeRegDep(done_seq_num);
+ M5_VAR_USED bool regdep_found = removeRegDep(done_seq_num);
assert(regdep_found);
}
// Return true if the node is dependency free
const ContextID ctx = pkt->req->contextId();
const size_t data_sz = pkt->getSize();
- uint32_t pkt_data M5_VAR_USED;
+ M5_VAR_USED uint32_t pkt_data;
switch (data_sz)
{
case 1:
TranslContext context;
Tick recvTick;
- Tick M5_CLASS_VAR_USED faultTick;
+ M5_CLASS_VAR_USED Tick faultTick;
virtual void main(Yield &yield);
assert(pkt->getAddr() >= pioAddr && pkt->getAddr() < pioAddr + pioSize);
// TODO: How to get pid??
- Addr M5_VAR_USED daddr = pkt->getAddr() - pioAddr;
+ M5_VAR_USED Addr daddr = pkt->getAddr() - pioAddr;
DPRINTF(HSAPacketProcessor,
"%s: write of size %d to reg-offset %d (0x%x)\n",
HSAPacketProcessor::CmdQueueCmdDmaEvent::process()
{
uint32_t rl_idx = series_ctx->rl_idx;
- AQLRingBuffer *aqlRingBuffer M5_VAR_USED =
+ M5_VAR_USED AQLRingBuffer *aqlRingBuffer =
hsaPP->regdQList[rl_idx]->qCntxt.aqlBuf;
HSAQueueDescriptor* qDesc =
hsaPP->regdQList[rl_idx]->qCntxt.qDesc;
void
HSAPacketProcessor::displayQueueDescriptor(int pid, uint32_t rl_idx)
{
- HSAQueueDescriptor* M5_VAR_USED qDesc = regdQList[rl_idx]->qCntxt.qDesc;
+ M5_VAR_USED HSAQueueDescriptor* qDesc = regdQList[rl_idx]->qCntxt.qDesc;
DPRINTF(HSAPacketProcessor,
"%s: pid[%d], basePointer[0x%lx], dBPointer[0x%lx], "
"writeIndex[0x%x], readIndex[0x%x], size(bytes)[0x%x]\n",
// Check if this newly created queue can be directly mapped
// to registered queue list
- bool M5_VAR_USED register_q = mapQIfSlotAvlbl(queue_id, aql_buf, q_desc);
+ M5_VAR_USED bool register_q = mapQIfSlotAvlbl(queue_id, aql_buf, q_desc);
schedWakeup();
DPRINTF(HSAPacketProcessor,
"%s: offset = %p, qID = %d, is_regd = %s, AL size %d\n",
prepareRead(cpu, index);
- uint64_t value M5_VAR_USED = 0;
+ M5_VAR_USED uint64_t value = 0;
if (pkt->getSize() == 4) {
uint32_t reg = regData32(raddr);
pkt->setLE(reg);
TCPIface::~TCPIface()
{
- int M5_VAR_USED ret;
+ M5_VAR_USED int ret;
ret = close(sock);
assert(ret == 0);
///
if (size == sizeof(uint64_t)) {
- uint64_t val M5_VAR_USED = pkt->getLE<uint64_t>();
+ M5_VAR_USED uint64_t val = pkt->getLE<uint64_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else if (size == sizeof(uint32_t)) {
- uint32_t val M5_VAR_USED = pkt->getLE<uint32_t>();
+ M5_VAR_USED uint32_t val = pkt->getLE<uint32_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else if (size == sizeof(uint16_t)) {
- uint16_t val M5_VAR_USED = pkt->getLE<uint16_t>();
+ M5_VAR_USED uint16_t val = pkt->getLE<uint16_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else if (size == sizeof(uint8_t)) {
- uint8_t val M5_VAR_USED = pkt->getLE<uint8_t>();
+ M5_VAR_USED uint8_t val = pkt->getLE<uint8_t>();
DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
daddr, val);
} else {
typedef uint16_t Flags;
typedef uint16_t Index;
- struct Header {
+ struct M5_ATTR_PACKED Header {
Flags flags;
Index index;
- } M5_ATTR_PACKED;
+ };
VirtRing<T>(PortProxy &proxy, ByteOrder bo, uint16_t size) :
header{0, 0}, ring(size), _proxy(proxy), _base(0), byteOrder(bo)
* @note This needs to be changed if the supported feature set
* changes!
*/
- struct Config {
+ struct M5_ATTR_PACKED Config {
uint64_t capacity;
- } M5_ATTR_PACKED;
+ };
Config config;
/** @{
/** @} */
/** VirtIO block device request as sent by guest */
- struct BlkRequest {
+ struct M5_ATTR_PACKED BlkRequest {
RequestType type;
uint32_t reserved;
uint64_t sector;
- } M5_ATTR_PACKED;
+ };
/**
* Device read request.
* @note This needs to be changed if the multiport feature is
* announced!
*/
- struct Config {
+ struct M5_ATTR_PACKED Config {
uint16_t cols;
uint16_t rows;
- } M5_ATTR_PACKED;
+ };
/** Currently active configuration (host byte order) */
Config config;
typedef uint8_t P9MsgType;
typedef uint16_t P9Tag;
-struct P9MsgHeader {
+struct M5_ATTR_PACKED P9MsgHeader {
/** Length including header */
uint32_t len;
/** Message type */
P9MsgType type;
/** Message tag */
P9Tag tag;
-} M5_ATTR_PACKED;
+};
/** Convert p9 byte order (LE) to host byte order */
template <typename T> inline T
* @note The fields in this structure depend on the features
* exposed to the guest.
*/
- struct Config {
+ struct M5_ATTR_PACKED Config {
uint16_t len;
char tag[];
- } M5_ATTR_PACKED;
+ };
/** Currently active configuration (host byte order) */
std::unique_ptr<Config> config;
Tick
PciVirtIO::read(PacketPtr pkt)
{
- const unsigned M5_VAR_USED size(pkt->getSize());
+ M5_VAR_USED const unsigned size(pkt->getSize());
int bar;
Addr offset;
if (!getBAR(pkt->getAddr(), bar, offset))
Tick
PciVirtIO::write(PacketPtr pkt)
{
- const unsigned M5_VAR_USED size(pkt->getSize());
+ M5_VAR_USED const unsigned size(pkt->getSize());
int bar;
Addr offset;
if (!getBAR(pkt->getAddr(), bar, offset))
// set the wavefront context to have a pointer to this section of the LDS
w->ldsChunk = ldsChunk;
- int32_t refCount M5_VAR_USED =
+ M5_VAR_USED int32_t refCount =
lds.increaseRefCounter(w->dispatchId, w->wgId);
DPRINTF(GPUDisp, "CU%d: increase ref ctr wg[%d] to [%d]\n",
cu_id, w->wgId, refCount);
// this is for writeComplete callback
// we simply get decrement write-related wait counters
assert(gpuDynInst);
- Wavefront *w M5_VAR_USED =
+ M5_VAR_USED Wavefront *w =
computeUnit->wfList[gpuDynInst->simdId][gpuDynInst->wfSlotId];
assert(w);
DPRINTF(GPUExec, "WriteCompleteResp: WF[%d][%d] WV%d %s decrementing "
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front().first;
- GPUDynInstPtr gpuDynInst M5_VAR_USED = retries.front().second;
+ M5_VAR_USED GPUDynInstPtr gpuDynInst = retries.front().second;
DPRINTF(GPUMem, "CU%d: WF[%d][%d]: retry mem inst addr %#x\n",
computeUnit->cu_id, gpuDynInst->simdId, gpuDynInst->wfSlotId,
pkt->req->getPaddr());
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front().first;
- Wavefront *wavefront M5_VAR_USED = retries.front().second;
+ M5_VAR_USED Wavefront *wavefront = retries.front().second;
DPRINTF(GPUFetch, "CU%d: WF[%d][%d]: retrying FETCH addr %#x\n",
computeUnit->cu_id, wavefront->simdId, wavefront->wfSlotId,
pkt->req->getPaddr());
DTLBPort::SenderState *sender_state =
safe_cast<DTLBPort::SenderState*>(translation_state->saved);
- Wavefront *w M5_VAR_USED =
+ M5_VAR_USED Wavefront *w =
computeUnit->wfList[sender_state->_gpuDynInst->simdId]
[sender_state->_gpuDynInst->wfSlotId];
{
SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
- ComputeUnit *compute_unit M5_VAR_USED = computeUnit;
+ M5_VAR_USED ComputeUnit *compute_unit = computeUnit;
if (!(sendTimingReq(pkt))) {
retries.push_back(std::make_pair(pkt, gpuDynInst));
{
SenderState *sender_state = safe_cast<SenderState*>(pkt->senderState);
GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
- ComputeUnit *compute_unit M5_VAR_USED = scalarDataPort.computeUnit;
+ M5_VAR_USED ComputeUnit *compute_unit = scalarDataPort.computeUnit;
if (!(scalarDataPort.sendTimingReq(pkt))) {
scalarDataPort.retries.push_back(pkt);
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front();
- Addr vaddr M5_VAR_USED = pkt->req->getVaddr();
+ M5_VAR_USED Addr vaddr = pkt->req->getVaddr();
DPRINTF(GPUTLB, "CU%d: retrying D-translaton for address%#x", vaddr);
if (!sendTimingReq(pkt)) {
GPUDynInstPtr gpuDynInst = sender_state->_gpuDynInst;
delete pkt->senderState;
- Wavefront *w M5_VAR_USED = gpuDynInst->wavefront();
+ M5_VAR_USED Wavefront *w = gpuDynInst->wavefront();
DPRINTF(GPUTLB, "CU%d: WF[%d][%d][wv=%d]: scalar DTLB port received "
"translation: PA %#x -> %#x\n", computeUnit->cu_id, w->simdId,
bool
ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt)
{
- Addr line M5_VAR_USED = pkt->req->getPaddr();
+ M5_VAR_USED Addr line = pkt->req->getPaddr();
DPRINTF(GPUTLB, "CU%d: ITLBPort received %#x->%#x\n",
computeUnit->cu_id, pkt->req->getVaddr(), line);
for (int i = 0; i < len; ++i) {
PacketPtr pkt = retries.front();
- Addr vaddr M5_VAR_USED = pkt->req->getVaddr();
+ M5_VAR_USED Addr vaddr = pkt->req->getVaddr();
DPRINTF(GPUTLB, "CU%d: retrying I-translaton for address%#x", vaddr);
if (!sendTimingReq(pkt)) {
dynamic_cast<ComputeUnit::LDSPort::SenderState*>(pkt->senderState);
fatal_if(!sender_state, "packet without a valid sender state");
- GPUDynInstPtr gpuDynInst M5_VAR_USED = sender_state->getMemInst();
+ M5_VAR_USED GPUDynInstPtr gpuDynInst = sender_state->getMemInst();
if (isStalled()) {
fatal_if(retries.empty(), "must have retries waiting to be stalled");
// that we've reserved a global and local memory unit. Thus,
// we need to mark the latter execution unit as not available.
if (execUnitIds.size() > 1) {
- int lm_exec_unit M5_VAR_USED = wf->localMem;
+ M5_VAR_USED int lm_exec_unit = wf->localMem;
assert(toExecute.dispatchStatus(lm_exec_unit)
== SKIP);
}
// Verify the GM pipe for this wave is ready to execute
// and the wave in the GM pipe is the same as the wave
// in the LM pipe
- int gm_exec_unit M5_VAR_USED = wf->globalMem;
+ M5_VAR_USED int gm_exec_unit = wf->globalMem;
assert(wf->wfDynId == toExecute
.readyInst(gm_exec_unit)->wfDynId);
assert(toExecute.dispatchStatus(gm_exec_unit)
#include "sim/byteswap.hh"
#include "sim/system.hh"
-struct DmesgEntry {
+struct M5_ATTR_PACKED DmesgEntry {
uint64_t ts_nsec;
uint16_t len;
uint16_t text_len;
uint16_t dict_len;
uint8_t facility;
uint8_t flags;
-} M5_ATTR_PACKED;
+};
static int
dumpDmesgEntry(const uint8_t *base, const uint8_t *end, const ByteOrder bo,
if (matched) {
FILE *f = tmpfile();
int fd = fileno(f);
- size_t ret M5_VAR_USED = fwrite(data.c_str(), 1, data.size(), f);
+ M5_VAR_USED size_t ret = fwrite(data.c_str(), 1, data.size(), f);
assert(ret == data.size());
rewind(f);
return fd;
void
SkipFuncBase::process(ThreadContext *tc)
{
- TheISA::PCState oldPC M5_VAR_USED = tc->pcState();
+ M5_VAR_USED TheISA::PCState oldPC = tc->pcState();
returnFromFuncIn(tc);
DPRINTF(SimpleCache, "Copying data from new packet to old\n");
// We had to upgrade a previous packet. We can functionally deal with
// the cache access now. It better be a hit.
- bool hit M5_VAR_USED = accessFunctional(originalPacket);
+ M5_VAR_USED bool hit = accessFunctional(originalPacket);
panic_if(!hit, "Should always hit after inserting");
originalPacket->makeResponse();
delete pkt; // We may need to delay this, I'm not sure.
// the bigger block
// Get previous compressed size
- const std::size_t M5_VAR_USED prev_size = compression_blk->getSizeBits();
+ M5_VAR_USED const std::size_t prev_size = compression_blk->getSizeBits();
// Check if new data is co-allocatable
const bool is_co_allocatable = superblock->isCompressed(compression_blk) &&
if (cache->system->bypassCaches()) {
// Just forward the packet if caches are disabled.
// @todo This should really enqueue the packet rather
- bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
+ M5_VAR_USED bool success = cache->memSidePort.sendTimingReq(pkt);
assert(success);
return true;
} else if (tryTiming(pkt)) {
// this express snoop travels towards the memory, and at
// every crossbar it is snooped upwards thus reaching
// every cache in the system
- bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
+ M5_VAR_USED bool success = memSidePort.sendTimingReq(snoop_pkt);
// express snoops always succeed
assert(success);
// responds in atomic mode, so remember a few things about the
// original packet up front
bool invalidate = pkt->isInvalidate();
- bool M5_VAR_USED needs_writable = pkt->needsWritable();
+ M5_VAR_USED bool needs_writable = pkt->needsWritable();
// at the moment we could get an uncacheable write which does not
// have the invalidate flag, and we need a suitable way of dealing
// prefetchSquash first may result in the MSHR being
// prematurely deallocated.
if (snoop_pkt.cacheResponding()) {
- auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
+ M5_VAR_USED auto r = outstandingSnoop.insert(snoop_pkt.req);
assert(r.second);
// if we are getting a snoop response with no sharers it
FALRU::invalidate(CacheBlk *blk)
{
// Erase block entry reference in the hash table
- auto num_erased M5_VAR_USED =
+ M5_VAR_USED auto num_erased =
tagHash.erase(std::make_pair(blk->tag, blk->isSecure()));
// Sanity check; only one block reference should be erased
*memSidePorts[dest_port_id]);
}
- bool success M5_VAR_USED =
+ M5_VAR_USED bool success =
memSidePorts[dest_port_id]->sendTimingSnoopResp(pkt);
pktCount[cpu_side_port_id][dest_port_id]++;
pktSize[cpu_side_port_id][dest_port_id] += pkt_size;
// if this is the destination of the operation, the xbar
// sends the responce to the cache clean operation only
// after having encountered the cache clean request
- auto M5_VAR_USED ret = outstandingCMO.emplace(pkt->id, nullptr);
+ M5_VAR_USED auto ret = outstandingCMO.emplace(pkt->id, nullptr);
// in atomic mode we know that the WriteClean packet should
// precede the clean request
assert(ret.second);
void
DRAMSim2Wrapper::enqueue(bool is_write, uint64_t addr)
{
- bool success M5_VAR_USED = dramsim->addTransaction(is_write, addr);
+ M5_VAR_USED bool success = dramsim->addTransaction(is_write, addr);
assert(success);
}
void
DRAMsim3Wrapper::enqueue(uint64_t addr, bool is_write)
{
- bool success M5_VAR_USED = dramsim->AddTransaction(addr, is_write);
+ M5_VAR_USED bool success = dramsim->AddTransaction(addr, is_write);
assert(success);
}
StubSlavePort::recvAtomic(PacketPtr packet)
{
if (DTRACE(ExternalPort)) {
- unsigned int M5_VAR_USED size = packet->getSize();
+ M5_VAR_USED unsigned int size = packet->getSize();
DPRINTF(ExternalPort, "StubSlavePort: recvAtomic a: 0x%x size: %d"
" data: ...\n", packet->getAddr(), size);
/**
* General timing requirements
*/
- const Tick M5_CLASS_VAR_USED tCK;
+ M5_CLASS_VAR_USED const Tick tCK;
const Tick tCS;
const Tick tBURST;
const Tick tRTW;
new_vaddr, size);
while (size > 0) {
- auto new_it M5_VAR_USED = pTable.find(new_vaddr);
+ M5_VAR_USED auto new_it = pTable.find(new_vaddr);
auto old_it = pTable.find(vaddr);
assert(old_it != pTable.end() && new_it == pTable.end());
for (vector<Router*>::const_iterator i= m_routers.begin();
i != m_routers.end(); ++i) {
Router* router = safe_cast<Router*>(*i);
- int router_id M5_VAR_USED =
+ M5_VAR_USED int router_id =
fault_model->declare_router(router->get_num_inports(),
router->get_num_outports(),
router->get_vc_per_vnet(),
private:
Router *m_router;
- int M5_CLASS_VAR_USED m_id;
+ M5_CLASS_VAR_USED int m_id;
PortDirection m_direction;
int m_vc_per_vnet;
NetworkLink *m_out_link;
{
PortDirection outport_dirn = "Unknown";
- int M5_VAR_USED num_rows = m_router->get_net_ptr()->getNumRows();
+ M5_VAR_USED int num_rows = m_router->get_net_ptr()->getNumRows();
int num_cols = m_router->get_net_ptr()->getNumCols();
assert(num_rows > 0 && num_cols > 0);
CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
{
uint64_t warmedUpBlocks = 0;
- uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
+ M5_VAR_USED uint64_t totalBlocks = (uint64_t)m_cache_num_sets *
(uint64_t)m_cache_assoc;
for (int i = 0; i < m_cache_num_sets; i++) {
inline void
PerfectCacheMemory<ENTRY>::deallocate(Addr address)
{
- auto num_erased M5_VAR_USED = m_map.erase(makeLineAddress(address));
+ M5_VAR_USED auto num_erased = m_map.erase(makeLineAddress(address));
assert(num_erased == 1);
}
{
PacketPtr pkt = crequest->getFirstPkt();
Addr request_address = pkt->getAddr();
- Addr request_line_address M5_VAR_USED = makeLineAddress(request_address);
+ M5_VAR_USED Addr request_line_address = makeLineAddress(request_address);
RubyRequestType type = crequest->getRubyType();
if (it->contains(pkt->getAddr())) {
// generally it is not safe to assume success here as
// the port could be blocked
- bool M5_VAR_USED success =
+ M5_VAR_USED bool success =
ruby_port->request_ports[i]->sendTimingReq(pkt);
assert(success);
return true;
{
DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
- RubyPort *rp M5_VAR_USED = static_cast<RubyPort *>(&owner);
+ M5_VAR_USED RubyPort *rp = static_cast<RubyPort *>(&owner);
RubySystem *rs = rp->m_ruby_system;
// Check for pio requests and directly send them to the dedicated
ranges.splice(ranges.begin(),
ruby_port->request_ports[i]->getAddrRanges());
}
- for (const auto M5_VAR_USED &r : ranges)
+ for (M5_VAR_USED const auto &r : ranges)
DPRINTF(RubyPort, "%s\n", r.to_string());
return ranges;
}
DPRINTF(RubySystem, "Functional Write request for %#x\n", addr);
- uint32_t M5_VAR_USED num_functional_writes = 0;
+ M5_VAR_USED uint32_t num_functional_writes = 0;
// Only send functional requests within the same network.
assert(requestorToNetwork.count(pkt->requestorId()));
code('''
{
// Declare message
- const $mtid* in_msg_ptr M5_VAR_USED;
+ M5_VAR_USED const $mtid* in_msg_ptr;
in_msg_ptr = dynamic_cast<const $mtid *>(($qcode).${{self.method}}());
if (in_msg_ptr == NULL) {
// If the cast fails, this is the wrong inport (wrong message type).
$c_ident::initNetQueues()
{
MachineType machine_type = string_to_MachineType("${{self.ident}}");
- int base M5_VAR_USED = MachineType_base_number(machine_type);
+ M5_VAR_USED int base = MachineType_base_number(machine_type);
''')
code.indent()
{
private:
/** Required for sensible debug messages.*/
- const M5_CLASS_VAR_USED SimObject *object;
+ M5_CLASS_VAR_USED const SimObject *object;
/** Vector for name look-up. */
std::vector<ProbePoint *> points;
{
auto &t = thread(id);
# if THE_ISA != NULL_ISA
- BaseCPU M5_VAR_USED *cpu = t.context->getCpuPtr();
+ M5_VAR_USED BaseCPU *cpu = t.context->getCpuPtr();
DPRINTFS(Quiesce, cpu, "quiesce()\n");
# endif
t.quiesce();
warn_once("Cache line size is neither 16, 32, 64 nor 128 bytes.\n");
// Get the generic system requestor IDs
- RequestorID tmp_id M5_VAR_USED;
+ M5_VAR_USED RequestorID tmp_id;
tmp_id = getRequestorId(this, "writebacks");
assert(tmp_id == Request::wbRequestorId);
tmp_id = getRequestorId(this, "functional");