/*
+ * Copyright (c) 2012-2018 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
+ * Andreas Sandberg
*/
/** @file
* Definitions of a simple cache block class.
*/
-#ifndef __CACHE_BLK_HH__
-#define __CACHE_BLK_HH__
+#ifndef __MEM_CACHE_BLK_HH__
+#define __MEM_CACHE_BLK_HH__
+#include <cassert>
+#include <cstdint>
+#include <iosfwd>
#include <list>
+#include <string>
#include "base/printable.hh"
-#include "sim/core.hh" // for Tick
-#include "arch/isa_traits.hh" // for Addr
+#include "base/types.hh"
+#include "mem/cache/replacement_policies/base.hh"
#include "mem/packet.hh"
#include "mem/request.hh"
/**
* Cache block status bit assignments
*/
-enum CacheBlkStatusBits {
+enum CacheBlkStatusBits : unsigned {
/** valid, readable */
BlkValid = 0x01,
/** write permission */
BlkReadable = 0x04,
/** dirty (modified) */
BlkDirty = 0x08,
- /** block was referenced */
- BlkReferenced = 0x10,
/** block was a hardware prefetch yet unaccessed*/
- BlkHWPrefetched = 0x20
+ BlkHWPrefetched = 0x20,
+ /** block holds data from the secure memory space */
+ BlkSecure = 0x40,
};
/**
* A Basic Cache block.
* Contains the tag, status, and a pointer to data.
*/
-class CacheBlk
+class CacheBlk : public ReplaceableEntry
{
public:
- /** The address space ID of this block. */
- int asid;
+ /** Task Id associated with this block */
+ uint32_t task_id;
+
/** Data block tag value. */
Addr tag;
/**
* referenced by this block.
*/
uint8_t *data;
- /** the number of bytes stored in this block. */
- int size;
/** block state: OR of CacheBlkStatusBit */
typedef unsigned State;
/** The current status of this block. @sa CacheBlockStatusBits */
State status;
- /** Which curTick() will this block be accessable */
+ /** Which curTick() will this block be accessible */
Tick whenReady;
/**
- * The set this block belongs to.
+ * The set and way this block belongs to.
* @todo Move this into subclasses when we fix CacheTags to use them.
*/
- int set;
-
- /** whether this block has been touched */
- bool isTouched;
+ int set, way;
/** Number of references to this block since it was brought in. */
- int refCount;
+ unsigned refCount;
- /** holds the context source ID of the requestor for this block. */
- int contextSrc;
+ /** holds the source requestor ID for this block. */
+ int srcMasterId;
+
+ /** Tick on which the block was inserted in the cache. */
+ Tick tickInserted;
protected:
/**
*/
class Lock {
public:
- int contextId; // locking context
+ ContextID contextId; // locking context
+ Addr lowAddr; // low address of lock range
+ Addr highAddr; // high address of lock range
- // check for matching execution context
- bool matchesContext(Request *req)
+ // check for matching execution context, and an address that
+ // is within the lock
+ bool matches(const RequestPtr &req) const
{
- return (contextId == req->contextId());
+ Addr req_low = req->getPaddr();
+ Addr req_high = req_low + req->getSize() -1;
+ return (contextId == req->contextId()) &&
+ (req_low >= lowAddr) && (req_high <= highAddr);
+ }
+
+ // check if a request is intersecting and thus invalidating the lock
+ bool intersects(const RequestPtr &req) const
+ {
+ Addr req_low = req->getPaddr();
+ Addr req_high = req_low + req->getSize() - 1;
+
+ return (req_low <= highAddr) && (req_high >= lowAddr);
}
- Lock(Request *req)
- : contextId(req->contextId())
+ Lock(const RequestPtr &req)
+ : contextId(req->contextId()),
+ lowAddr(req->getPaddr()),
+ highAddr(lowAddr + req->getSize() - 1)
{
}
};
std::list<Lock> lockList;
public:
-
- CacheBlk()
- : asid(-1), tag(0), data(0) ,size(0), status(0), whenReady(0),
- set(-1), isTouched(false), refCount(0), contextSrc(-1)
- {}
-
- /**
- * Copy the state of the given block into this one.
- * @param rhs The block to copy.
- * @return a const reference to this block.
- */
- const CacheBlk& operator=(const CacheBlk& rhs)
+ CacheBlk() : data(nullptr)
{
- asid = rhs.asid;
- tag = rhs.tag;
- data = rhs.data;
- size = rhs.size;
- status = rhs.status;
- whenReady = rhs.whenReady;
- set = rhs.set;
- refCount = rhs.refCount;
- return *this;
+ invalidate();
}
+ CacheBlk(const CacheBlk&) = delete;
+ CacheBlk& operator=(const CacheBlk&) = delete;
+ virtual ~CacheBlk() {};
+
/**
* Checks the write permissions of this block.
* @return True if the block is writable.
}
/**
- * Check to see if a block has been written.
- * @return True if the block is dirty.
+ * Invalidate the block and clear all state.
*/
- bool isDirty() const
+ virtual void invalidate()
{
- return (status & BlkDirty) != 0;
+ tag = MaxAddr;
+ task_id = ContextSwitchTaskId::Unknown;
+ status = 0;
+ whenReady = MaxTick;
+ refCount = 0;
+ srcMasterId = Request::invldMasterId;
+ tickInserted = MaxTick;
+ lockList.clear();
}
/**
- * Check if this block has been referenced.
- * @return True if the block has been referenced.
+ * Check to see if a block has been written.
+ * @return True if the block is dirty.
*/
- bool isReferenced() const
+ bool isDirty() const
{
- return (status & BlkReferenced) != 0;
+ return (status & BlkDirty) != 0;
}
/**
}
/**
- * Track the fact that a local locked was issued to the block. If
- * multiple LLs get issued from the same context we could have
- * redundant records on the list, but that's OK, as they'll all
- * get blown away at the next store.
+ * Check if this block holds data from the secure memory space.
+ * @return True if the block holds data from the secure memory space.
+ */
+ bool isSecure() const
+ {
+ return (status & BlkSecure) != 0;
+ }
+
+ /**
+ * Set member variables when a block insertion occurs. Resets reference
+ * count to 1 (the insertion counts as a reference), and touch block if
+ * it hadn't been touched previously. Sets the insertion tick to the
+ * current tick. Does not make block valid.
+ *
+ * @param tag Block address tag.
+ * @param is_secure Whether the block is in secure space or not.
+ * @param src_master_ID The source requestor ID.
+ * @param task_ID The new task ID.
+ */
+ virtual void insert(const Addr tag, const bool is_secure,
+ const int src_master_ID, const uint32_t task_ID);
+
+ /**
+ * Track the fact that a local locked was issued to the
+ * block. Invalidate any previous LL to the same address.
*/
void trackLoadLocked(PacketPtr pkt)
{
assert(pkt->isLLSC());
- lockList.push_front(Lock(pkt->req));
+ auto l = lockList.begin();
+ while (l != lockList.end()) {
+ if (l->intersects(pkt->req))
+ l = lockList.erase(l);
+ else
+ ++l;
+ }
+
+ lockList.emplace_front(pkt->req);
+ }
+
+ /**
+ * Clear the any load lock that intersect the request, and is from
+ * a different context.
+ */
+ void clearLoadLocks(const RequestPtr &req)
+ {
+ auto l = lockList.begin();
+ while (l != lockList.end()) {
+ if (l->intersects(req) && l->contextId != req->contextId()) {
+ l = lockList.erase(l);
+ } else {
+ ++l;
+ }
+ }
}
/**
- * Clear the list of valid load locks. Should be called whenever
- * block is written to or invalidated.
+ * Pretty-print a tag, and interpret state bits to readable form
+ * including mapping to a MOESI state.
+ *
+ * @return string with basic state information
*/
- void clearLoadLocks() { lockList.clear(); }
+ std::string print() const
+ {
+ /**
+ * state M O E S I
+ * writable 1 0 1 0 0
+ * dirty 1 1 0 0 0
+ * valid 1 1 1 1 0
+ *
+ * state writable dirty valid
+ * M 1 1 1
+ * O 0 1 1
+ * E 1 0 1
+ * S 0 0 1
+ * I 0 0 0
+ *
+ * Note that only one cache ever has a block in Modified or
+ * Owned state, i.e., only one cache owns the block, or
+ * equivalently has the BlkDirty bit set. However, multiple
+ * caches on the same path to memory can have a block in the
+ * Exclusive state (despite the name). Exclusive means this
+ * cache has the only copy at this level of the hierarchy,
+ * i.e., there may be copies in caches above this cache (in
+ * various states), but there are no peers that have copies on
+ * this branch of the hierarchy, and no caches at or above
+ * this level on any other branch have copies either.
+ **/
+ unsigned state = isWritable() << 2 | isDirty() << 1 | isValid();
+ char s = '?';
+ switch (state) {
+ case 0b111: s = 'M'; break;
+ case 0b011: s = 'O'; break;
+ case 0b101: s = 'E'; break;
+ case 0b001: s = 'S'; break;
+ case 0b000: s = 'I'; break;
+ default: s = 'T'; break; // @TODO add other types
+ }
+ return csprintf("state: %x (%c) valid: %d writable: %d readable: %d "
+ "dirty: %d tag: %x", status, s, isValid(),
+ isWritable(), isReadable(), isDirty(), tag);
+ }
/**
* Handle interaction of load-locked operations and stores.
*/
bool checkWrite(PacketPtr pkt)
{
- Request *req = pkt->req;
+ assert(pkt->isWrite());
+
+ // common case
+ if (!pkt->isLLSC() && lockList.empty())
+ return true;
+
+ const RequestPtr &req = pkt->req;
+
if (pkt->isLLSC()) {
// it's a store conditional... have to check for matching
// load locked.
bool success = false;
- for (std::list<Lock>::iterator i = lockList.begin();
- i != lockList.end(); ++i)
- {
- if (i->matchesContext(req)) {
- // it's a store conditional, and as far as the memory
- // system can tell, the requesting context's lock is
- // still valid.
+ auto l = lockList.begin();
+ while (!success && l != lockList.end()) {
+ if (l->matches(pkt->req)) {
+ // it's a store conditional, and as far as the
+ // memory system can tell, the requesting
+ // context's lock is still valid.
success = true;
- break;
+ lockList.erase(l);
+ } else {
+ ++l;
}
}
req->setExtraData(success ? 1 : 0);
- clearLoadLocks();
+ // clear any intersected locks from other contexts (our LL
+ // should already have cleared them)
+ clearLoadLocks(req);
return success;
} else {
- // for *all* stores (conditional or otherwise) we have to
- // clear the list of load-locks as they're all invalid now.
- clearLoadLocks();
+ // a normal write, if there is any lock not from this
+ // context we clear the list, thus for a private cache we
+ // never clear locks on normal writes
+ clearLoadLocks(req);
return true;
}
}
};
+/**
+ * Special instance of CacheBlk for use with tempBlk that deals with its
+ * block address regeneration.
+ * @sa Cache
+ */
+class TempCacheBlk final : public CacheBlk
+{
+ private:
+ /**
+ * Copy of the block's address, used to regenerate tempBlock's address.
+ */
+ Addr _addr;
+
+ public:
+ /**
+ * Creates a temporary cache block, with its own storage.
+ * @param size The size (in bytes) of this cache block.
+ */
+ TempCacheBlk(unsigned size) : CacheBlk()
+ {
+ data = new uint8_t[size];
+ }
+ TempCacheBlk(const TempCacheBlk&) = delete;
+ TempCacheBlk& operator=(const TempCacheBlk&) = delete;
+ ~TempCacheBlk() { delete [] data; };
+
+ /**
+ * Invalidate the block and clear all state.
+ */
+ void invalidate() override {
+ CacheBlk::invalidate();
+
+ _addr = MaxAddr;
+ }
+
+ void insert(const Addr addr, const bool is_secure,
+ const int src_master_ID=0, const uint32_t task_ID=0) override
+ {
+ // Set block address
+ _addr = addr;
+
+ // Set secure state
+ if (is_secure) {
+ status = BlkSecure;
+ } else {
+ status = 0;
+ }
+ }
+
+ /**
+ * Get block's address.
+ *
+ * @return addr Address value.
+ */
+ Addr getAddr() const
+ {
+ return _addr;
+ }
+};
+
/**
* Simple class to provide virtual print() method on cache blocks
* without allocating a vtable pointer for every single cache block.
const std::string &prefix = "") const;
};
-
-
-#endif //__CACHE_BLK_HH__
+#endif //__MEM_CACHE_BLK_HH__