misc: merge branch 'release-staging-v19.0.0.0' into develop
[gem5.git] / src / mem / cache / cache_blk.hh
1 /*
2 * Copyright (c) 2012-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41 /** @file
42 * Definitions of a simple cache block class.
43 */
44
45 #ifndef __MEM_CACHE_CACHE_BLK_HH__
46 #define __MEM_CACHE_CACHE_BLK_HH__
47
48 #include <cassert>
49 #include <cstdint>
50 #include <iosfwd>
51 #include <list>
52 #include <string>
53
54 #include "base/printable.hh"
55 #include "base/types.hh"
56 #include "mem/cache/replacement_policies/base.hh"
57 #include "mem/packet.hh"
58 #include "mem/request.hh"
59
60 /**
61 * Cache block status bit assignments
62 */
63 enum CacheBlkStatusBits : unsigned {
64 /** valid, readable */
65 BlkValid = 0x01,
66 /** write permission */
67 BlkWritable = 0x02,
68 /** read permission (yes, block can be valid but not readable) */
69 BlkReadable = 0x04,
70 /** dirty (modified) */
71 BlkDirty = 0x08,
72 /** block was a hardware prefetch yet unaccessed*/
73 BlkHWPrefetched = 0x20,
74 /** block holds data from the secure memory space */
75 BlkSecure = 0x40,
76 /** block holds compressed data */
77 BlkCompressed = 0x80
78 };
79
80 /**
81 * A Basic Cache block.
82 * Contains the tag, status, and a pointer to data.
83 */
84 class CacheBlk : public ReplaceableEntry
85 {
86 public:
87 /** Task Id associated with this block */
88 uint32_t task_id;
89
90 /** Data block tag value. */
91 Addr tag;
92 /**
93 * Contains a copy of the data in this block for easy access. This is used
94 * for efficient execution when the data could be actually stored in
95 * another format (COW, compressed, sub-blocked, etc). In all cases the
96 * data stored here should be kept consistant with the actual data
97 * referenced by this block.
98 */
99 uint8_t *data;
100
101 /** block state: OR of CacheBlkStatusBit */
102 typedef unsigned State;
103
104 /** The current status of this block. @sa CacheBlockStatusBits */
105 State status;
106
107 /**
108 * Which curTick() will this block be accessible. Its value is only
109 * meaningful if the block is valid.
110 */
111 Tick whenReady;
112
113 /** Number of references to this block since it was brought in. */
114 unsigned refCount;
115
116 /** holds the source requestor ID for this block. */
117 int srcMasterId;
118
119 /**
120 * Tick on which the block was inserted in the cache. Its value is only
121 * meaningful if the block is valid.
122 */
123 Tick tickInserted;
124
125 protected:
126 /**
127 * Represents that the indicated thread context has a "lock" on
128 * the block, in the LL/SC sense.
129 */
130 class Lock {
131 public:
132 ContextID contextId; // locking context
133 Addr lowAddr; // low address of lock range
134 Addr highAddr; // high address of lock range
135
136 // check for matching execution context, and an address that
137 // is within the lock
138 bool matches(const RequestPtr &req) const
139 {
140 Addr req_low = req->getPaddr();
141 Addr req_high = req_low + req->getSize() -1;
142 return (contextId == req->contextId()) &&
143 (req_low >= lowAddr) && (req_high <= highAddr);
144 }
145
146 // check if a request is intersecting and thus invalidating the lock
147 bool intersects(const RequestPtr &req) const
148 {
149 Addr req_low = req->getPaddr();
150 Addr req_high = req_low + req->getSize() - 1;
151
152 return (req_low <= highAddr) && (req_high >= lowAddr);
153 }
154
155 Lock(const RequestPtr &req)
156 : contextId(req->contextId()),
157 lowAddr(req->getPaddr()),
158 highAddr(lowAddr + req->getSize() - 1)
159 {
160 }
161 };
162
163 /** List of thread contexts that have performed a load-locked (LL)
164 * on the block since the last store. */
165 std::list<Lock> lockList;
166
167 public:
168 CacheBlk() : data(nullptr), tickInserted(0)
169 {
170 invalidate();
171 }
172
173 CacheBlk(const CacheBlk&) = delete;
174 CacheBlk& operator=(const CacheBlk&) = delete;
175 virtual ~CacheBlk() {};
176
177 /**
178 * Checks the write permissions of this block.
179 * @return True if the block is writable.
180 */
181 bool isWritable() const
182 {
183 const State needed_bits = BlkWritable | BlkValid;
184 return (status & needed_bits) == needed_bits;
185 }
186
187 /**
188 * Checks the read permissions of this block. Note that a block
189 * can be valid but not readable if there is an outstanding write
190 * upgrade miss.
191 * @return True if the block is readable.
192 */
193 bool isReadable() const
194 {
195 const State needed_bits = BlkReadable | BlkValid;
196 return (status & needed_bits) == needed_bits;
197 }
198
199 /**
200 * Checks that a block is valid.
201 * @return True if the block is valid.
202 */
203 bool isValid() const
204 {
205 return (status & BlkValid) != 0;
206 }
207
208 /**
209 * Invalidate the block and clear all state.
210 */
211 virtual void invalidate()
212 {
213 tag = MaxAddr;
214 task_id = ContextSwitchTaskId::Unknown;
215 status = 0;
216 whenReady = MaxTick;
217 refCount = 0;
218 srcMasterId = Request::invldMasterId;
219 lockList.clear();
220 }
221
222 /**
223 * Check to see if a block has been written.
224 * @return True if the block is dirty.
225 */
226 bool isDirty() const
227 {
228 return (status & BlkDirty) != 0;
229 }
230
231 /**
232 * Check if this block was the result of a hardware prefetch, yet to
233 * be touched.
234 * @return True if the block was a hardware prefetch, unaccesed.
235 */
236 bool wasPrefetched() const
237 {
238 return (status & BlkHWPrefetched) != 0;
239 }
240
241 /**
242 * Check if this block holds data from the secure memory space.
243 * @return True if the block holds data from the secure memory space.
244 */
245 bool isSecure() const
246 {
247 return (status & BlkSecure) != 0;
248 }
249
250 /**
251 * Set valid bit.
252 */
253 virtual void setValid()
254 {
255 assert(!isValid());
256 status |= BlkValid;
257 }
258
259 /**
260 * Set secure bit.
261 */
262 virtual void setSecure()
263 {
264 status |= BlkSecure;
265 }
266
267 /**
268 * Get tick at which block's data will be available for access.
269 *
270 * @return Data ready tick.
271 */
272 Tick getWhenReady() const
273 {
274 assert(whenReady != MaxTick);
275 return whenReady;
276 }
277
278 /**
279 * Set tick at which block's data will be available for access. The new
280 * tick must be chronologically sequential with respect to previous
281 * accesses.
282 *
283 * @param tick New data ready tick.
284 */
285 void setWhenReady(const Tick tick)
286 {
287 assert(tick >= tickInserted);
288 whenReady = tick;
289 }
290
291 /**
292 * Set member variables when a block insertion occurs. Resets reference
293 * count to 1 (the insertion counts as a reference), and touch block if
294 * it hadn't been touched previously. Sets the insertion tick to the
295 * current tick. Marks the block valid.
296 *
297 * @param tag Block address tag.
298 * @param is_secure Whether the block is in secure space or not.
299 * @param src_master_ID The source requestor ID.
300 * @param task_ID The new task ID.
301 */
302 virtual void insert(const Addr tag, const bool is_secure,
303 const int src_master_ID, const uint32_t task_ID);
304
305 /**
306 * Track the fact that a local locked was issued to the
307 * block. Invalidate any previous LL to the same address.
308 */
309 void trackLoadLocked(PacketPtr pkt)
310 {
311 assert(pkt->isLLSC());
312 auto l = lockList.begin();
313 while (l != lockList.end()) {
314 if (l->intersects(pkt->req))
315 l = lockList.erase(l);
316 else
317 ++l;
318 }
319
320 lockList.emplace_front(pkt->req);
321 }
322
323 /**
324 * Clear the any load lock that intersect the request, and is from
325 * a different context.
326 */
327 void clearLoadLocks(const RequestPtr &req)
328 {
329 auto l = lockList.begin();
330 while (l != lockList.end()) {
331 if (l->intersects(req) && l->contextId != req->contextId()) {
332 l = lockList.erase(l);
333 } else {
334 ++l;
335 }
336 }
337 }
338
339 /**
340 * Pretty-print tag, set and way, and interpret state bits to readable form
341 * including mapping to a MOESI state.
342 *
343 * @return string with basic state information
344 */
345 std::string
346 print() const override
347 {
348 /**
349 * state M O E S I
350 * writable 1 0 1 0 0
351 * dirty 1 1 0 0 0
352 * valid 1 1 1 1 0
353 *
354 * state writable dirty valid
355 * M 1 1 1
356 * O 0 1 1
357 * E 1 0 1
358 * S 0 0 1
359 * I 0 0 0
360 *
361 * Note that only one cache ever has a block in Modified or
362 * Owned state, i.e., only one cache owns the block, or
363 * equivalently has the BlkDirty bit set. However, multiple
364 * caches on the same path to memory can have a block in the
365 * Exclusive state (despite the name). Exclusive means this
366 * cache has the only copy at this level of the hierarchy,
367 * i.e., there may be copies in caches above this cache (in
368 * various states), but there are no peers that have copies on
369 * this branch of the hierarchy, and no caches at or above
370 * this level on any other branch have copies either.
371 **/
372 unsigned state = isWritable() << 2 | isDirty() << 1 | isValid();
373 char s = '?';
374 switch (state) {
375 case 0b111: s = 'M'; break;
376 case 0b011: s = 'O'; break;
377 case 0b101: s = 'E'; break;
378 case 0b001: s = 'S'; break;
379 case 0b000: s = 'I'; break;
380 default: s = 'T'; break; // @TODO add other types
381 }
382 return csprintf("state: %x (%c) valid: %d writable: %d readable: %d "
383 "dirty: %d | tag: %#x %s", status, s,
384 isValid(), isWritable(), isReadable(), isDirty(), tag,
385 ReplaceableEntry::print());
386 }
387
388 /**
389 * Handle interaction of load-locked operations and stores.
390 * @return True if write should proceed, false otherwise. Returns
391 * false only in the case of a failed store conditional.
392 */
393 bool checkWrite(PacketPtr pkt)
394 {
395 assert(pkt->isWrite());
396
397 // common case
398 if (!pkt->isLLSC() && lockList.empty())
399 return true;
400
401 const RequestPtr &req = pkt->req;
402
403 if (pkt->isLLSC()) {
404 // it's a store conditional... have to check for matching
405 // load locked.
406 bool success = false;
407
408 auto l = lockList.begin();
409 while (!success && l != lockList.end()) {
410 if (l->matches(pkt->req)) {
411 // it's a store conditional, and as far as the
412 // memory system can tell, the requesting
413 // context's lock is still valid.
414 success = true;
415 lockList.erase(l);
416 } else {
417 ++l;
418 }
419 }
420
421 req->setExtraData(success ? 1 : 0);
422 // clear any intersected locks from other contexts (our LL
423 // should already have cleared them)
424 clearLoadLocks(req);
425 return success;
426 } else {
427 // a normal write, if there is any lock not from this
428 // context we clear the list, thus for a private cache we
429 // never clear locks on normal writes
430 clearLoadLocks(req);
431 return true;
432 }
433 }
434 };
435
436 /**
437 * Special instance of CacheBlk for use with tempBlk that deals with its
438 * block address regeneration.
439 * @sa Cache
440 */
441 class TempCacheBlk final : public CacheBlk
442 {
443 private:
444 /**
445 * Copy of the block's address, used to regenerate tempBlock's address.
446 */
447 Addr _addr;
448
449 public:
450 /**
451 * Creates a temporary cache block, with its own storage.
452 * @param size The size (in bytes) of this cache block.
453 */
454 TempCacheBlk(unsigned size) : CacheBlk()
455 {
456 data = new uint8_t[size];
457 }
458 TempCacheBlk(const TempCacheBlk&) = delete;
459 TempCacheBlk& operator=(const TempCacheBlk&) = delete;
460 ~TempCacheBlk() { delete [] data; };
461
462 /**
463 * Invalidate the block and clear all state.
464 */
465 void invalidate() override {
466 CacheBlk::invalidate();
467
468 _addr = MaxAddr;
469 }
470
471 void insert(const Addr addr, const bool is_secure,
472 const int src_master_ID=0, const uint32_t task_ID=0) override
473 {
474 // Make sure that the block has been properly invalidated
475 assert(status == 0);
476
477 // Set block address
478 _addr = addr;
479
480 // Set secure state
481 if (is_secure) {
482 setSecure();
483 }
484
485 // Validate block
486 setValid();
487 }
488
489 /**
490 * Get block's address.
491 *
492 * @return addr Address value.
493 */
494 Addr getAddr() const
495 {
496 return _addr;
497 }
498 };
499
500 /**
501 * Simple class to provide virtual print() method on cache blocks
502 * without allocating a vtable pointer for every single cache block.
503 * Just wrap the CacheBlk object in an instance of this before passing
504 * to a function that requires a Printable object.
505 */
506 class CacheBlkPrintWrapper : public Printable
507 {
508 CacheBlk *blk;
509 public:
510 CacheBlkPrintWrapper(CacheBlk *_blk) : blk(_blk) {}
511 virtual ~CacheBlkPrintWrapper() {}
512 void print(std::ostream &o, int verbosity = 0,
513 const std::string &prefix = "") const;
514 };
515
516 #endif //__MEM_CACHE_CACHE_BLK_HH__