/*
- * Copyright (c) 2012-2015 ARM Limited
+ * Copyright (c) 2012-2018 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
#ifndef __MEM_CACHE_BLK_HH__
#define __MEM_CACHE_BLK_HH__
+#include <cassert>
+#include <cstdint>
+#include <iosfwd>
#include <list>
+#include <string>
#include "base/printable.hh"
+#include "base/types.hh"
+#include "mem/cache/replacement_policies/base.hh"
#include "mem/packet.hh"
#include "mem/request.hh"
* A Basic Cache block.
* Contains the tag, status, and a pointer to data.
*/
-class CacheBlk
+class CacheBlk : public ReplaceableEntry
{
public:
/** Task Id associated with this block */
uint32_t task_id;
- /** The address space ID of this block. */
- int asid;
/** Data block tag value. */
Addr tag;
/**
* referenced by this block.
*/
uint8_t *data;
- /** the number of bytes stored in this block. */
- unsigned size;
/** block state: OR of CacheBlkStatusBit */
typedef unsigned State;
/** The current status of this block. @sa CacheBlockStatusBits */
State status;
- /** Which curTick() will this block be accessable */
+ /** Which curTick() will this block be accessible */
Tick whenReady;
/**
*/
int set, way;
- /** whether this block has been touched */
- bool isTouched;
-
/** Number of references to this block since it was brought in. */
unsigned refCount;
/** holds the source requestor ID for this block. */
int srcMasterId;
+ /** Tick on which the block was inserted in the cache. */
Tick tickInserted;
protected:
*/
class Lock {
public:
- int contextId; // locking context
+ ContextID contextId; // locking context
Addr lowAddr; // low address of lock range
Addr highAddr; // high address of lock range
- // check for matching execution context
- bool matchesContext(const RequestPtr req) const
+ // check for matching execution context, and an address that
+ // is within the lock
+ bool matches(const RequestPtr &req) const
{
Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() -1;
(req_low >= lowAddr) && (req_high <= highAddr);
}
- bool overlapping(const RequestPtr req) const
+ // check if a request is intersecting and thus invalidating the lock
+ bool intersects(const RequestPtr &req) const
{
Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() - 1;
return (req_low <= highAddr) && (req_high >= lowAddr);
}
- Lock(const RequestPtr req)
+ Lock(const RequestPtr &req)
: contextId(req->contextId()),
lowAddr(req->getPaddr()),
highAddr(lowAddr + req->getSize() - 1)
std::list<Lock> lockList;
public:
-
- CacheBlk()
- : task_id(ContextSwitchTaskId::Unknown),
- asid(-1), tag(0), data(0) ,size(0), status(0), whenReady(0),
- set(-1), way(-1), isTouched(false), refCount(0),
- srcMasterId(Request::invldMasterId),
- tickInserted(0)
- {}
+ CacheBlk() : data(nullptr)
+ {
+ invalidate();
+ }
CacheBlk(const CacheBlk&) = delete;
CacheBlk& operator=(const CacheBlk&) = delete;
+ virtual ~CacheBlk() {};
/**
* Checks the write permissions of this block.
/**
* Invalidate the block and clear all state.
*/
- void invalidate()
+ virtual void invalidate()
{
+ tag = MaxAddr;
+ task_id = ContextSwitchTaskId::Unknown;
status = 0;
- isTouched = false;
- clearLoadLocks();
+ whenReady = MaxTick;
+ refCount = 0;
+ srcMasterId = Request::invldMasterId;
+ tickInserted = MaxTick;
+ lockList.clear();
}
/**
}
/**
- * Track the fact that a local locked was issued to the block. If
- * multiple LLs get issued from the same context we could have
- * redundant records on the list, but that's OK, as they'll all
- * get blown away at the next store.
+ * Set member variables when a block insertion occurs. Resets reference
+ * count to 1 (the insertion counts as a reference), and touch block if
+ * it hadn't been touched previously. Sets the insertion tick to the
+ * current tick. Does not make block valid.
+ *
+ * @param tag Block address tag.
+ * @param is_secure Whether the block is in secure space or not.
+ * @param src_master_ID The source requestor ID.
+ * @param task_ID The new task ID.
+ */
+ virtual void insert(const Addr tag, const bool is_secure,
+ const int src_master_ID, const uint32_t task_ID);
+
+ /**
+ * Track the fact that a local locked was issued to the
+ * block. Invalidate any previous LL to the same address.
*/
void trackLoadLocked(PacketPtr pkt)
{
assert(pkt->isLLSC());
+ auto l = lockList.begin();
+ while (l != lockList.end()) {
+ if (l->intersects(pkt->req))
+ l = lockList.erase(l);
+ else
+ ++l;
+ }
+
lockList.emplace_front(pkt->req);
}
/**
- * Clear the list of valid load locks. Should be called whenever
- * block is written to or invalidated.
+ * Clear the any load lock that intersect the request, and is from
+ * a different context.
*/
- void clearLoadLocks(RequestPtr req = nullptr)
+ void clearLoadLocks(const RequestPtr &req)
{
- if (!req) {
- // No request, invaldate all locks to this line
- lockList.clear();
- } else {
- // Only invalidate locks that overlap with this request
- auto lock_itr = lockList.begin();
- while (lock_itr != lockList.end()) {
- if (lock_itr->overlapping(req)) {
- lock_itr = lockList.erase(lock_itr);
- } else {
- ++lock_itr;
- }
+ auto l = lockList.begin();
+ while (l != lockList.end()) {
+ if (l->intersects(req) && l->contextId != req->contextId()) {
+ l = lockList.erase(l);
+ } else {
+ ++l;
}
}
}
/**
* Pretty-print a tag, and interpret state bits to readable form
- * including mapping to a MOESI stat.
+ * including mapping to a MOESI state.
*
* @return string with basic state information
*/
* E 1 0 1
* S 0 0 1
* I 0 0 0
+ *
+ * Note that only one cache ever has a block in Modified or
+ * Owned state, i.e., only one cache owns the block, or
+ * equivalently has the BlkDirty bit set. However, multiple
+ * caches on the same path to memory can have a block in the
+ * Exclusive state (despite the name). Exclusive means this
+ * cache has the only copy at this level of the hierarchy,
+ * i.e., there may be copies in caches above this cache (in
+ * various states), but there are no peers that have copies on
+ * this branch of the hierarchy, and no caches at or above
+ * this level on any other branch have copies either.
**/
unsigned state = isWritable() << 2 | isDirty() << 1 | isValid();
char s = '?';
*/
bool checkWrite(PacketPtr pkt)
{
+ assert(pkt->isWrite());
+
// common case
if (!pkt->isLLSC() && lockList.empty())
return true;
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
if (pkt->isLLSC()) {
// it's a store conditional... have to check for matching
// load locked.
bool success = false;
- for (const auto& l : lockList) {
- if (l.matchesContext(req)) {
- // it's a store conditional, and as far as the memory
- // system can tell, the requesting context's lock is
- // still valid.
+ auto l = lockList.begin();
+ while (!success && l != lockList.end()) {
+ if (l->matches(pkt->req)) {
+ // it's a store conditional, and as far as the
+ // memory system can tell, the requesting
+ // context's lock is still valid.
success = true;
- break;
+ lockList.erase(l);
+ } else {
+ ++l;
}
}
req->setExtraData(success ? 1 : 0);
+ // clear any intersected locks from other contexts (our LL
+ // should already have cleared them)
clearLoadLocks(req);
return success;
} else {
- // for *all* stores (conditional or otherwise) we have to
- // clear the list of load-locks as they're all invalid now.
+ // a normal write, if there is any lock not from this
+ // context we clear the list, thus for a private cache we
+ // never clear locks on normal writes
clearLoadLocks(req);
return true;
}
}
};
+/**
+ * Special instance of CacheBlk for use with tempBlk that deals with its
+ * block address regeneration.
+ * @sa Cache
+ */
+class TempCacheBlk final : public CacheBlk
+{
+ private:
+ /**
+ * Copy of the block's address, used to regenerate tempBlock's address.
+ */
+ Addr _addr;
+
+ public:
+ /**
+ * Creates a temporary cache block, with its own storage.
+ * @param size The size (in bytes) of this cache block.
+ */
+ TempCacheBlk(unsigned size) : CacheBlk()
+ {
+ data = new uint8_t[size];
+ }
+ TempCacheBlk(const TempCacheBlk&) = delete;
+ TempCacheBlk& operator=(const TempCacheBlk&) = delete;
+ ~TempCacheBlk() { delete [] data; };
+
+ /**
+ * Invalidate the block and clear all state.
+ */
+ void invalidate() override {
+ CacheBlk::invalidate();
+
+ _addr = MaxAddr;
+ }
+
+ void insert(const Addr addr, const bool is_secure,
+ const int src_master_ID=0, const uint32_t task_ID=0) override
+ {
+ // Set block address
+ _addr = addr;
+
+ // Set secure state
+ if (is_secure) {
+ status = BlkSecure;
+ } else {
+ status = 0;
+ }
+ }
+
+ /**
+ * Get block's address.
+ *
+ * @return addr Address value.
+ */
+ Addr getAddr() const
+ {
+ return _addr;
+ }
+};
+
/**
* Simple class to provide virtual print() method on cache blocks
* without allocating a vtable pointer for every single cache block.
const std::string &prefix = "") const;
};
-/**
- * Base class for cache block visitor, operating on the cache block
- * base class (later subclassed for the various tag classes). This
- * visitor class is used as part of the forEachBlk interface in the
- * tag classes.
- */
-class CacheBlkVisitor
-{
- public:
-
- CacheBlkVisitor() {}
- virtual ~CacheBlkVisitor() {}
-
- virtual bool operator()(CacheBlk &blk) = 0;
-};
-
#endif //__MEM_CACHE_BLK_HH__