2 * Copyright (c) 2012-2017 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
45 * Definitions of a simple cache block class.
48 #ifndef __MEM_CACHE_BLK_HH__
49 #define __MEM_CACHE_BLK_HH__
53 #include "base/printable.hh"
54 #include "mem/packet.hh"
55 #include "mem/request.hh"
58 * Cache block status bit assignments
60 enum CacheBlkStatusBits : unsigned {
61 /** valid, readable */
63 /** write permission */
65 /** read permission (yes, block can be valid but not readable) */
67 /** dirty (modified) */
69 /** block was a hardware prefetch yet unaccessed*/
70 BlkHWPrefetched = 0x20,
71 /** block holds data from the secure memory space */
76 * A Basic Cache block.
77 * Contains the tag, status, and a pointer to data.
82 /** Task Id associated with this block */
85 /** Data block tag value. */
88 * Contains a copy of the data in this block for easy access. This is used
89 * for efficient execution when the data could be actually stored in
90 * another format (COW, compressed, sub-blocked, etc). In all cases the
91 * data stored here should be kept consistant with the actual data
92 * referenced by this block.
96 /** block state: OR of CacheBlkStatusBit */
97 typedef unsigned State;
99 /** The current status of this block. @sa CacheBlockStatusBits */
102 /** Which curTick() will this block be accessible */
106 * The set and way this block belongs to.
107 * @todo Move this into subclasses when we fix CacheTags to use them.
112 * Whether this block has been touched since simulation started.
113 * Used to calculate number of used tags.
117 /** Number of references to this block since it was brought in. */
120 /** holds the source requestor ID for this block. */
123 /** Tick on which the block was inserted in the cache. */
127 * Replacement policy data. As of now it is only an update timestamp.
128 * Tick on which the block was last touched.
133 * Re-Reference Interval Prediction Value. Used with RRIP repl policy.
139 * Represents that the indicated thread context has a "lock" on
140 * the block, in the LL/SC sense.
144 ContextID contextId; // locking context
145 Addr lowAddr; // low address of lock range
146 Addr highAddr; // high address of lock range
148 // check for matching execution context, and an address that
149 // is within the lock
150 bool matches(const RequestPtr req) const
152 Addr req_low = req->getPaddr();
153 Addr req_high = req_low + req->getSize() -1;
154 return (contextId == req->contextId()) &&
155 (req_low >= lowAddr) && (req_high <= highAddr);
158 // check if a request is intersecting and thus invalidating the lock
159 bool intersects(const RequestPtr req) const
161 Addr req_low = req->getPaddr();
162 Addr req_high = req_low + req->getSize() - 1;
164 return (req_low <= highAddr) && (req_high >= lowAddr);
167 Lock(const RequestPtr req)
168 : contextId(req->contextId()),
169 lowAddr(req->getPaddr()),
170 highAddr(lowAddr + req->getSize() - 1)
175 /** List of thread contexts that have performed a load-locked (LL)
176 * on the block since the last store. */
177 std::list<Lock> lockList;
186 CacheBlk(const CacheBlk&) = delete;
187 CacheBlk& operator=(const CacheBlk&) = delete;
188 virtual ~CacheBlk() {};
191 * Checks the write permissions of this block.
192 * @return True if the block is writable.
194 bool isWritable() const
196 const State needed_bits = BlkWritable | BlkValid;
197 return (status & needed_bits) == needed_bits;
201 * Checks the read permissions of this block. Note that a block
202 * can be valid but not readable if there is an outstanding write
204 * @return True if the block is readable.
206 bool isReadable() const
208 const State needed_bits = BlkReadable | BlkValid;
209 return (status & needed_bits) == needed_bits;
213 * Checks that a block is valid.
214 * @return True if the block is valid.
218 return (status & BlkValid) != 0;
222 * Invalidate the block and clear all state.
224 virtual void invalidate()
227 task_id = ContextSwitchTaskId::Unknown;
232 srcMasterId = Request::invldMasterId;
233 tickInserted = MaxTick;
238 * Check to see if a block has been written.
239 * @return True if the block is dirty.
243 return (status & BlkDirty) != 0;
247 * Check if this block was the result of a hardware prefetch, yet to
249 * @return True if the block was a hardware prefetch, unaccesed.
251 bool wasPrefetched() const
253 return (status & BlkHWPrefetched) != 0;
257 * Check if this block holds data from the secure memory space.
258 * @return True if the block holds data from the secure memory space.
260 bool isSecure() const
262 return (status & BlkSecure) != 0;
266 * Track the fact that a local locked was issued to the
267 * block. Invalidate any previous LL to the same address.
269 void trackLoadLocked(PacketPtr pkt)
271 assert(pkt->isLLSC());
272 auto l = lockList.begin();
273 while (l != lockList.end()) {
274 if (l->intersects(pkt->req))
275 l = lockList.erase(l);
280 lockList.emplace_front(pkt->req);
284 * Clear the any load lock that intersect the request, and is from
285 * a different context.
287 void clearLoadLocks(RequestPtr req)
289 auto l = lockList.begin();
290 while (l != lockList.end()) {
291 if (l->intersects(req) && l->contextId != req->contextId()) {
292 l = lockList.erase(l);
300 * Pretty-print a tag, and interpret state bits to readable form
301 * including mapping to a MOESI state.
303 * @return string with basic state information
305 std::string print() const
313 * state writable dirty valid
320 * Note that only one cache ever has a block in Modified or
321 * Owned state, i.e., only one cache owns the block, or
322 * equivalently has the BlkDirty bit set. However, multiple
323 * caches on the same path to memory can have a block in the
324 * Exclusive state (despite the name). Exclusive means this
325 * cache has the only copy at this level of the hierarchy,
326 * i.e., there may be copies in caches above this cache (in
327 * various states), but there are no peers that have copies on
328 * this branch of the hierarchy, and no caches at or above
329 * this level on any other branch have copies either.
331 unsigned state = isWritable() << 2 | isDirty() << 1 | isValid();
334 case 0b111: s = 'M'; break;
335 case 0b011: s = 'O'; break;
336 case 0b101: s = 'E'; break;
337 case 0b001: s = 'S'; break;
338 case 0b000: s = 'I'; break;
339 default: s = 'T'; break; // @TODO add other types
341 return csprintf("state: %x (%c) valid: %d writable: %d readable: %d "
342 "dirty: %d tag: %x", status, s, isValid(),
343 isWritable(), isReadable(), isDirty(), tag);
347 * Handle interaction of load-locked operations and stores.
348 * @return True if write should proceed, false otherwise. Returns
349 * false only in the case of a failed store conditional.
351 bool checkWrite(PacketPtr pkt)
353 assert(pkt->isWrite());
356 if (!pkt->isLLSC() && lockList.empty())
359 RequestPtr req = pkt->req;
362 // it's a store conditional... have to check for matching
364 bool success = false;
366 auto l = lockList.begin();
367 while (!success && l != lockList.end()) {
368 if (l->matches(pkt->req)) {
369 // it's a store conditional, and as far as the
370 // memory system can tell, the requesting
371 // context's lock is still valid.
379 req->setExtraData(success ? 1 : 0);
380 // clear any intersected locks from other contexts (our LL
381 // should already have cleared them)
385 // a normal write, if there is any lock not from this
386 // context we clear the list, thus for a private cache we
387 // never clear locks on normal writes
395 * Simple class to provide virtual print() method on cache blocks
396 * without allocating a vtable pointer for every single cache block.
397 * Just wrap the CacheBlk object in an instance of this before passing
398 * to a function that requires a Printable object.
400 class CacheBlkPrintWrapper : public Printable
404 CacheBlkPrintWrapper(CacheBlk *_blk) : blk(_blk) {}
405 virtual ~CacheBlkPrintWrapper() {}
406 void print(std::ostream &o, int verbosity = 0,
407 const std::string &prefix = "") const;
411 * Base class for cache block visitor, operating on the cache block
412 * base class (later subclassed for the various tag classes). This
413 * visitor class is used as part of the forEachBlk interface in the
416 class CacheBlkVisitor
421 virtual ~CacheBlkVisitor() {}
423 virtual bool operator()(CacheBlk &blk) = 0;
426 #endif //__MEM_CACHE_BLK_HH__