/*
- * Copyright (c) 2012-2013 ARM Limited
+ * Copyright (c) 2012-2013,2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <climits>
#include "base/flags.hh"
-#include "base/misc.hh"
+#include "base/logging.hh"
#include "base/types.hh"
+#include "cpu/inst_seq.hh"
#include "sim/core.hh"
/**
class Request
{
public:
- typedef uint32_t FlagsType;
+ typedef uint64_t FlagsType;
typedef uint8_t ArchFlagsType;
typedef ::Flags<FlagsType> Flags;
- /**
- * Architecture specific flags.
- *
- * These bits int the flag field are reserved for
- * architecture-specific code. For example, SPARC uses them to
- * represent ASIs.
- */
- static const FlagsType ARCH_BITS = 0x000000FF;
- /** The request was an instruction fetch. */
- static const FlagsType INST_FETCH = 0x00000100;
- /** The virtual address is also the physical address. */
- static const FlagsType PHYSICAL = 0x00000200;
- /**
- * The request is to an uncacheable address.
- *
- * @note Uncacheable accesses may be reordered by CPU models. The
- * STRICT_ORDER flag should be set if such reordering is
- * undesirable.
- */
- static const FlagsType UNCACHEABLE = 0x00000400;
- /**
- * The request is required to be strictly ordered by <i>CPU
- * models</i> and is non-speculative.
- *
- * A strictly ordered request is guaranteed to never be re-ordered
- * or executed speculatively by a CPU model. The memory system may
- * still reorder requests in caches unless the UNCACHEABLE flag is
- * set as well.
- */
- static const FlagsType STRICT_ORDER = 0x00000800;
- /** This request is to a memory mapped register. */
- static const FlagsType MMAPPED_IPR = 0x00002000;
- /** This request is a clear exclusive. */
- static const FlagsType CLEAR_LL = 0x00004000;
- /** This request is made in privileged mode. */
- static const FlagsType PRIVILEGED = 0x00008000;
-
- /** This is a write that is targeted and zeroing an entire cache block.
- * There is no need for a read/modify/write
- */
- static const FlagsType CACHE_BLOCK_ZERO = 0x00010000;
-
- /** The request should not cause a memory access. */
- static const FlagsType NO_ACCESS = 0x00080000;
- /** This request will lock or unlock the accessed memory. When used with
- * a load, the access locks the particular chunk of memory. When used
- * with a store, it unlocks. The rule is that locked accesses have to be
- * made up of a locked load, some operation on the data, and then a locked
- * store.
- */
- static const FlagsType LOCKED_RMW = 0x00100000;
- /** The request is a Load locked/store conditional. */
- static const FlagsType LLSC = 0x00200000;
- /** This request is for a memory swap. */
- static const FlagsType MEM_SWAP = 0x00400000;
- static const FlagsType MEM_SWAP_COND = 0x00800000;
-
- /** The request is a prefetch. */
- static const FlagsType PREFETCH = 0x01000000;
- /** The request should be prefetched into the exclusive state. */
- static const FlagsType PF_EXCLUSIVE = 0x02000000;
- /** The request should be marked as LRU. */
- static const FlagsType EVICT_NEXT = 0x04000000;
-
- /** The request should be handled by the generic IPR code (only
- * valid together with MMAPPED_IPR) */
- static const FlagsType GENERIC_IPR = 0x08000000;
-
- /** The request targets the secure memory space. */
- static const FlagsType SECURE = 0x10000000;
- /** The request is a page table walk */
- static const FlagsType PT_WALK = 0x20000000;
-
- /** These flags are *not* cleared when a Request object is reused
- (assigned a new address). */
- static const FlagsType STICKY_FLAGS = INST_FETCH;
-
- /** Request Ids that are statically allocated
+ enum : FlagsType {
+ /**
+ * Architecture specific flags.
+ *
+ * These bits int the flag field are reserved for
+ * architecture-specific code. For example, SPARC uses them to
+ * represent ASIs.
+ */
+ ARCH_BITS = 0x000000FF,
+ /** The request was an instruction fetch. */
+ INST_FETCH = 0x00000100,
+ /** The virtual address is also the physical address. */
+ PHYSICAL = 0x00000200,
+ /**
+ * The request is to an uncacheable address.
+ *
+ * @note Uncacheable accesses may be reordered by CPU models. The
+ * STRICT_ORDER flag should be set if such reordering is
+ * undesirable.
+ */
+ UNCACHEABLE = 0x00000400,
+ /**
+ * The request is required to be strictly ordered by <i>CPU
+ * models</i> and is non-speculative.
+ *
+ * A strictly ordered request is guaranteed to never be
+ * re-ordered or executed speculatively by a CPU model. The
+ * memory system may still reorder requests in caches unless
+ * the UNCACHEABLE flag is set as well.
+ */
+ STRICT_ORDER = 0x00000800,
+ /** This request is to a memory mapped register. */
+ MMAPPED_IPR = 0x00002000,
+ /** This request is made in privileged mode. */
+ PRIVILEGED = 0x00008000,
+
+ /**
+ * This is a write that is targeted and zeroing an entire
+ * cache block. There is no need for a read/modify/write
+ */
+ CACHE_BLOCK_ZERO = 0x00010000,
+
+ /** The request should not cause a memory access. */
+ NO_ACCESS = 0x00080000,
+ /**
+ * This request will lock or unlock the accessed memory. When
+ * used with a load, the access locks the particular chunk of
+ * memory. When used with a store, it unlocks. The rule is
+ * that locked accesses have to be made up of a locked load,
+ * some operation on the data, and then a locked store.
+ */
+ LOCKED_RMW = 0x00100000,
+ /** The request is a Load locked/store conditional. */
+ LLSC = 0x00200000,
+ /** This request is for a memory swap. */
+ MEM_SWAP = 0x00400000,
+ MEM_SWAP_COND = 0x00800000,
+
+ /** The request is a prefetch. */
+ PREFETCH = 0x01000000,
+ /** The request should be prefetched into the exclusive state. */
+ PF_EXCLUSIVE = 0x02000000,
+ /** The request should be marked as LRU. */
+ EVICT_NEXT = 0x04000000,
+ /** The request should be marked with ACQUIRE. */
+ ACQUIRE = 0x00020000,
+ /** The request should be marked with RELEASE. */
+ RELEASE = 0x00040000,
+
+ /** The request is an atomic that returns data. */
+ ATOMIC_RETURN_OP = 0x40000000,
+ /** The request is an atomic that does not return data. */
+ ATOMIC_NO_RETURN_OP = 0x80000000,
+
+ /** The request should be marked with KERNEL.
+ * Used to indicate the synchronization associated with a GPU kernel
+ * launch or completion.
+ */
+ KERNEL = 0x00001000,
+
+ /**
+ * The request should be handled by the generic IPR code (only
+ * valid together with MMAPPED_IPR)
+ */
+ GENERIC_IPR = 0x08000000,
+
+ /** The request targets the secure memory space. */
+ SECURE = 0x10000000,
+ /** The request is a page table walk */
+ PT_WALK = 0x20000000,
+
+ /** The request invalidates a memory location */
+ INVALIDATE = 0x0000000100000000,
+ /** The request cleans a memory location */
+ CLEAN = 0x0000000200000000,
+
+ /** The request targets the point of unification */
+ DST_POU = 0x0000001000000000,
+
+ /** The request targets the point of coherence */
+ DST_POC = 0x0000002000000000,
+
+ /** Bits to define the destination of a request */
+ DST_BITS = 0x0000003000000000,
+
+ /**
+ * These flags are *not* cleared when a Request object is
+ * reused (assigned a new address).
+ */
+ STICKY_FLAGS = INST_FETCH
+ };
+ static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
+ CLEAN | INVALIDATE;
+
+ /** Master Ids that are statically allocated
* @{*/
- /** This request id is used for writeback requests by the caches */
- static const MasterID wbMasterId = 0;
- /** This request id is used for functional requests that don't come from a
- * particular device
- */
- static const MasterID funcMasterId = 1;
- /** This request id is used for message signaled interrupts */
- static const MasterID intMasterId = 2;
- /** Invalid request id for assertion checking only. It is invalid behavior
- * to ever send this id as part of a request.
- * @todo C++1x replace with numeric_limits when constexpr is added */
- static const MasterID invldMasterId = std::numeric_limits<MasterID>::max();
+ enum : MasterID {
+ /** This master id is used for writeback requests by the caches */
+ wbMasterId = 0,
+ /**
+ * This master id is used for functional requests that
+ * don't come from a particular device
+ */
+ funcMasterId = 1,
+ /** This master id is used for message signaled interrupts */
+ intMasterId = 2,
+ /**
+ * Invalid master id for assertion checking only. It is
+ * invalid behavior to ever send this id as part of a request.
+ */
+ invldMasterId = std::numeric_limits<MasterID>::max()
+ };
/** @} */
- /** Invalid or unknown Pid. Possible when operating system is not present
- * or has not assigned a pid yet */
- static const uint32_t invldPid = std::numeric_limits<uint32_t>::max();
+ typedef uint32_t MemSpaceConfigFlagsType;
+ typedef ::Flags<MemSpaceConfigFlagsType> MemSpaceConfigFlags;
+
+ enum : MemSpaceConfigFlagsType {
+ /** Has a synchronization scope been set? */
+ SCOPE_VALID = 0x00000001,
+ /** Access has Wavefront scope visibility */
+ WAVEFRONT_SCOPE = 0x00000002,
+ /** Access has Workgroup scope visibility */
+ WORKGROUP_SCOPE = 0x00000004,
+ /** Access has Device (e.g., GPU) scope visibility */
+ DEVICE_SCOPE = 0x00000008,
+ /** Access has System (e.g., CPU + GPU) scope visibility */
+ SYSTEM_SCOPE = 0x00000010,
+
+ /** Global Segment */
+ GLOBAL_SEGMENT = 0x00000020,
+ /** Group Segment */
+ GROUP_SEGMENT = 0x00000040,
+ /** Private Segment */
+ PRIVATE_SEGMENT = 0x00000080,
+ /** Kergarg Segment */
+ KERNARG_SEGMENT = 0x00000100,
+ /** Readonly Segment */
+ READONLY_SEGMENT = 0x00000200,
+ /** Spill Segment */
+ SPILL_SEGMENT = 0x00000400,
+ /** Arg Segment */
+ ARG_SEGMENT = 0x00000800,
+ };
private:
typedef uint8_t PrivateFlagsType;
typedef ::Flags<PrivateFlagsType> PrivateFlags;
- /** Whether or not the size is valid. */
- static const PrivateFlagsType VALID_SIZE = 0x00000001;
- /** Whether or not paddr is valid (has been written yet). */
- static const PrivateFlagsType VALID_PADDR = 0x00000002;
- /** Whether or not the vaddr & asid are valid. */
- static const PrivateFlagsType VALID_VADDR = 0x00000004;
- /** Whether or not the pc is valid. */
- static const PrivateFlagsType VALID_PC = 0x00000010;
- /** Whether or not the context ID is valid. */
- static const PrivateFlagsType VALID_CONTEXT_ID = 0x00000020;
- static const PrivateFlagsType VALID_THREAD_ID = 0x00000040;
- /** Whether or not the sc result is valid. */
- static const PrivateFlagsType VALID_EXTRA_DATA = 0x00000080;
-
- /** These flags are *not* cleared when a Request object is reused
- (assigned a new address). */
- static const PrivateFlagsType STICKY_PRIVATE_FLAGS =
- VALID_CONTEXT_ID | VALID_THREAD_ID;
+ enum : PrivateFlagsType {
+ /** Whether or not the size is valid. */
+ VALID_SIZE = 0x00000001,
+ /** Whether or not paddr is valid (has been written yet). */
+ VALID_PADDR = 0x00000002,
+ /** Whether or not the vaddr & asid are valid. */
+ VALID_VADDR = 0x00000004,
+ /** Whether or not the instruction sequence number is valid. */
+ VALID_INST_SEQ_NUM = 0x00000008,
+ /** Whether or not the pc is valid. */
+ VALID_PC = 0x00000010,
+ /** Whether or not the context ID is valid. */
+ VALID_CONTEXT_ID = 0x00000020,
+ /** Whether or not the sc result is valid. */
+ VALID_EXTRA_DATA = 0x00000080,
+ /**
+ * These flags are *not* cleared when a Request object is reused
+ * (assigned a new address).
+ */
+ STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
+ };
private:
void
setPhys(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
{
- assert(size >= 0);
_paddr = paddr;
_size = size;
_time = time;
/** Flag structure for the request. */
Flags _flags;
+ /** Memory space configuraiton flag structure for the request. */
+ MemSpaceConfigFlags _memSpaceConfigFlags;
+
/** Private flags for field validity checking. */
PrivateFlags privateFlags;
* store conditional or the compare value for a CAS. */
uint64_t _extraData;
- /** The context ID (for statistics, typically). */
- int _contextId;
- /** The thread ID (id within this CPU) */
- ThreadID _threadId;
+ /** The context ID (for statistics, locks, and wakeups). */
+ ContextID _contextId;
/** program counter of initiating access; for tracing/debugging */
Addr _pc;
+ /** Sequence number of the instruction that creates the request */
+ InstSeqNum _reqInstSeqNum;
+
+ /** A pointer to an atomic operation */
+ AtomicOpFunctor *atomicOpFunctor;
+
public:
/**
Request()
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
- translateDelta(0), accessDelta(0), depth(0)
+ _extraData(0), _contextId(0), _pc(0),
+ _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
+ accessDelta(0), depth(0)
{}
+ Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
+ InstSeqNum seq_num, ContextID cid)
+ : _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
+ _taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
+ _extraData(0), _contextId(0), _pc(0),
+ _reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
+ accessDelta(0), depth(0)
+ {
+ setPhys(paddr, size, flags, mid, curTick());
+ setContext(cid);
+ privateFlags.set(VALID_INST_SEQ_NUM);
+ }
+
/**
* Constructor for physical (e.g. device) requests. Initializes
* just physical address, size, flags, and timestamp (to curTick()).
Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
- translateDelta(0), accessDelta(0), depth(0)
+ _extraData(0), _contextId(0), _pc(0),
+ _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
+ accessDelta(0), depth(0)
{
setPhys(paddr, size, flags, mid, curTick());
}
Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
- translateDelta(0), accessDelta(0), depth(0)
+ _extraData(0), _contextId(0), _pc(0),
+ _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
+ accessDelta(0), depth(0)
{
setPhys(paddr, size, flags, mid, time);
}
Addr pc)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
- translateDelta(0), accessDelta(0), depth(0)
+ _extraData(0), _contextId(0), _pc(pc),
+ _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
+ accessDelta(0), depth(0)
{
setPhys(paddr, size, flags, mid, time);
privateFlags.set(VALID_PC);
- _pc = pc;
}
Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
- Addr pc, int cid, ThreadID tid)
+ Addr pc, ContextID cid)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
- translateDelta(0), accessDelta(0), depth(0)
+ _extraData(0), _contextId(0), _pc(0),
+ _reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
+ accessDelta(0), depth(0)
{
setVirt(asid, vaddr, size, flags, mid, pc);
- setThreadContext(cid, tid);
+ setContext(cid);
}
- ~Request() {}
+ Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
+ Addr pc, ContextID cid, AtomicOpFunctor *atomic_op)
+ : atomicOpFunctor(atomic_op)
+ {
+ setVirt(asid, vaddr, size, flags, mid, pc);
+ setContext(cid);
+ }
+
+ ~Request()
+ {
+ if (hasAtomicOpFunctor()) {
+ delete atomicOpFunctor;
+ }
+ }
/**
- * Set up CPU and thread numbers.
+ * Set up Context numbers.
*/
void
- setThreadContext(int context_id, ThreadID tid)
+ setContext(ContextID context_id)
{
_contextId = context_id;
- _threadId = tid;
- privateFlags.set(VALID_CONTEXT_ID|VALID_THREAD_ID);
+ privateFlags.set(VALID_CONTEXT_ID);
}
/**
return _time;
}
+ /**
+ * Accessor for atomic-op functor.
+ */
+ bool
+ hasAtomicOpFunctor()
+ {
+ return atomicOpFunctor != NULL;
+ }
+
+ AtomicOpFunctor *
+ getAtomicOpFunctor()
+ {
+ assert(atomicOpFunctor != NULL);
+ return atomicOpFunctor;
+ }
+
/** Accessor for flags. */
Flags
getFlags()
}
/** Note that unlike other accessors, this function sets *specific
- flags* (ORs them in); it does not assign its argument to the
- _flags field. Thus this method should rightly be called
- setFlags() and not just flags(). */
+ flags* (ORs them in); it does not assign its argument to the
+ _flags field. Thus this method should rightly be called
+ setFlags() and not just flags(). */
void
setFlags(Flags flags)
{
_flags.set(flags);
}
+ void
+ setMemSpaceConfigFlags(MemSpaceConfigFlags extraFlags)
+ {
+ assert(privateFlags.isSet(VALID_PADDR | VALID_VADDR));
+ _memSpaceConfigFlags.set(extraFlags);
+ }
+
/** Accessor function for vaddr.*/
bool
hasVaddr() const
}
/** Accessor function for context ID.*/
- int
+ ContextID
contextId() const
{
assert(privateFlags.isSet(VALID_CONTEXT_ID));
return _contextId;
}
- /** Accessor function for thread ID. */
- ThreadID
- threadId() const
- {
- assert(privateFlags.isSet(VALID_THREAD_ID));
- return _threadId;
- }
-
void
setPC(Addr pc)
{
void setAccessLatency() { accessDelta = curTick() - _time - translateDelta; }
Tick getAccessLatency() const { return accessDelta; }
- /** Accessor functions for flags. Note that these are for testing
- only; setting flags should be done via setFlags(). */
+ /**
+ * Accessor for the sequence number of instruction that creates the
+ * request.
+ */
+ bool
+ hasInstSeqNum() const
+ {
+ return privateFlags.isSet(VALID_INST_SEQ_NUM);
+ }
+
+ InstSeqNum
+ getReqInstSeqNum() const
+ {
+ assert(privateFlags.isSet(VALID_INST_SEQ_NUM));
+ return _reqInstSeqNum;
+ }
+
+ void
+ setReqInstSeqNum(const InstSeqNum seq_num)
+ {
+ privateFlags.set(VALID_INST_SEQ_NUM);
+ _reqInstSeqNum = seq_num;
+ }
+
+ /** Accessor functions for flags. Note that these are for testing
+ only; setting flags should be done via setFlags(). */
bool isUncacheable() const { return _flags.isSet(UNCACHEABLE); }
bool isStrictlyOrdered() const { return _flags.isSet(STRICT_ORDER); }
bool isInstFetch() const { return _flags.isSet(INST_FETCH); }
bool isSwap() const { return _flags.isSet(MEM_SWAP|MEM_SWAP_COND); }
bool isCondSwap() const { return _flags.isSet(MEM_SWAP_COND); }
bool isMmappedIpr() const { return _flags.isSet(MMAPPED_IPR); }
- bool isClearLL() const { return _flags.isSet(CLEAR_LL); }
bool isSecure() const { return _flags.isSet(SECURE); }
bool isPTWalk() const { return _flags.isSet(PT_WALK); }
+ bool isAcquire() const { return _flags.isSet(ACQUIRE); }
+ bool isRelease() const { return _flags.isSet(RELEASE); }
+ bool isKernel() const { return _flags.isSet(KERNEL); }
+ bool isAtomicReturn() const { return _flags.isSet(ATOMIC_RETURN_OP); }
+ bool isAtomicNoReturn() const { return _flags.isSet(ATOMIC_NO_RETURN_OP); }
+
+ bool
+ isAtomic() const
+ {
+ return _flags.isSet(ATOMIC_RETURN_OP) ||
+ _flags.isSet(ATOMIC_NO_RETURN_OP);
+ }
+
+ /**
+ * Accessor functions for the destination of a memory request. The
+ * destination flag can specify a point of reference for the
+ * operation (e.g. a cache block clean to the the point of
+ * unification). At the moment the destination is only used by the
+ * cache maintenance operations.
+ */
+ bool isToPOU() const { return _flags.isSet(DST_POU); }
+ bool isToPOC() const { return _flags.isSet(DST_POC); }
+ Flags getDest() const { return _flags & DST_BITS; }
+
+ /**
+ * Accessor functions for the memory space configuration flags and used by
+ * GPU ISAs such as the Heterogeneous System Architecture (HSA). Note that
+ * these are for testing only; setting extraFlags should be done via
+ * setMemSpaceConfigFlags().
+ */
+ bool isScoped() const { return _memSpaceConfigFlags.isSet(SCOPE_VALID); }
+
+ bool
+ isWavefrontScope() const
+ {
+ assert(isScoped());
+ return _memSpaceConfigFlags.isSet(WAVEFRONT_SCOPE);
+ }
+
+ bool
+ isWorkgroupScope() const
+ {
+ assert(isScoped());
+ return _memSpaceConfigFlags.isSet(WORKGROUP_SCOPE);
+ }
+
+ bool
+ isDeviceScope() const
+ {
+ assert(isScoped());
+ return _memSpaceConfigFlags.isSet(DEVICE_SCOPE);
+ }
+
+ bool
+ isSystemScope() const
+ {
+ assert(isScoped());
+ return _memSpaceConfigFlags.isSet(SYSTEM_SCOPE);
+ }
+
+ bool
+ isGlobalSegment() const
+ {
+ return _memSpaceConfigFlags.isSet(GLOBAL_SEGMENT) ||
+ (!isGroupSegment() && !isPrivateSegment() &&
+ !isKernargSegment() && !isReadonlySegment() &&
+ !isSpillSegment() && !isArgSegment());
+ }
+
+ bool
+ isGroupSegment() const
+ {
+ return _memSpaceConfigFlags.isSet(GROUP_SEGMENT);
+ }
+
+ bool
+ isPrivateSegment() const
+ {
+ return _memSpaceConfigFlags.isSet(PRIVATE_SEGMENT);
+ }
+
+ bool
+ isKernargSegment() const
+ {
+ return _memSpaceConfigFlags.isSet(KERNARG_SEGMENT);
+ }
+
+ bool
+ isReadonlySegment() const
+ {
+ return _memSpaceConfigFlags.isSet(READONLY_SEGMENT);
+ }
+
+ bool
+ isSpillSegment() const
+ {
+ return _memSpaceConfigFlags.isSet(SPILL_SEGMENT);
+ }
+
+ bool
+ isArgSegment() const
+ {
+ return _memSpaceConfigFlags.isSet(ARG_SEGMENT);
+ }
+
+ /**
+ * Accessor functions to determine whether this request is part of
+ * a cache maintenance operation. At the moment three operations
+ * are supported:
+
+ * 1) A cache clean operation updates all copies of a memory
+ * location to the point of reference,
+ * 2) A cache invalidate operation invalidates all copies of the
+ * specified block in the memory above the point of reference,
+ * 3) A clean and invalidate operation is a combination of the two
+ * operations.
+ * @{ */
+ bool isCacheClean() const { return _flags.isSet(CLEAN); }
+ bool isCacheInvalidate() const { return _flags.isSet(INVALIDATE); }
+ bool isCacheMaintenance() const { return _flags.isSet(CLEAN|INVALIDATE); }
+ /** @} */
};
#endif // __MEM_REQUEST_HH__