mem-cache: Remove a std::move clang says is unnecessary.
[gem5.git] / src / mem / cache / cache_blk.hh
1 /*
2 * Copyright (c) 2012-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Andreas Sandberg
42 */
43
44 /** @file
45 * Definitions of a simple cache block class.
46 */
47
48 #ifndef __MEM_CACHE_CACHE_BLK_HH__
49 #define __MEM_CACHE_CACHE_BLK_HH__
50
51 #include <cassert>
52 #include <cstdint>
53 #include <iosfwd>
54 #include <list>
55 #include <string>
56
57 #include "base/printable.hh"
58 #include "base/types.hh"
59 #include "mem/cache/replacement_policies/base.hh"
60 #include "mem/packet.hh"
61 #include "mem/request.hh"
62
63 /**
64 * Cache block status bit assignments
65 */
66 enum CacheBlkStatusBits : unsigned {
67 /** valid, readable */
68 BlkValid = 0x01,
69 /** write permission */
70 BlkWritable = 0x02,
71 /** read permission (yes, block can be valid but not readable) */
72 BlkReadable = 0x04,
73 /** dirty (modified) */
74 BlkDirty = 0x08,
75 /** block was a hardware prefetch yet unaccessed*/
76 BlkHWPrefetched = 0x20,
77 /** block holds data from the secure memory space */
78 BlkSecure = 0x40,
79 /** block holds compressed data */
80 BlkCompressed = 0x80
81 };
82
83 /**
84 * A Basic Cache block.
85 * Contains the tag, status, and a pointer to data.
86 */
87 class CacheBlk : public ReplaceableEntry
88 {
89 public:
90 /** Task Id associated with this block */
91 uint32_t task_id;
92
93 /** Data block tag value. */
94 Addr tag;
95 /**
96 * Contains a copy of the data in this block for easy access. This is used
97 * for efficient execution when the data could be actually stored in
98 * another format (COW, compressed, sub-blocked, etc). In all cases the
99 * data stored here should be kept consistant with the actual data
100 * referenced by this block.
101 */
102 uint8_t *data;
103
104 /** block state: OR of CacheBlkStatusBit */
105 typedef unsigned State;
106
107 /** The current status of this block. @sa CacheBlockStatusBits */
108 State status;
109
110 /**
111 * Which curTick() will this block be accessible. Its value is only
112 * meaningful if the block is valid.
113 */
114 Tick whenReady;
115
116 /** Number of references to this block since it was brought in. */
117 unsigned refCount;
118
119 /** holds the source requestor ID for this block. */
120 int srcMasterId;
121
122 /**
123 * Tick on which the block was inserted in the cache. Its value is only
124 * meaningful if the block is valid.
125 */
126 Tick tickInserted;
127
128 protected:
129 /**
130 * Represents that the indicated thread context has a "lock" on
131 * the block, in the LL/SC sense.
132 */
133 class Lock {
134 public:
135 ContextID contextId; // locking context
136 Addr lowAddr; // low address of lock range
137 Addr highAddr; // high address of lock range
138
139 // check for matching execution context, and an address that
140 // is within the lock
141 bool matches(const RequestPtr &req) const
142 {
143 Addr req_low = req->getPaddr();
144 Addr req_high = req_low + req->getSize() -1;
145 return (contextId == req->contextId()) &&
146 (req_low >= lowAddr) && (req_high <= highAddr);
147 }
148
149 // check if a request is intersecting and thus invalidating the lock
150 bool intersects(const RequestPtr &req) const
151 {
152 Addr req_low = req->getPaddr();
153 Addr req_high = req_low + req->getSize() - 1;
154
155 return (req_low <= highAddr) && (req_high >= lowAddr);
156 }
157
158 Lock(const RequestPtr &req)
159 : contextId(req->contextId()),
160 lowAddr(req->getPaddr()),
161 highAddr(lowAddr + req->getSize() - 1)
162 {
163 }
164 };
165
166 /** List of thread contexts that have performed a load-locked (LL)
167 * on the block since the last store. */
168 std::list<Lock> lockList;
169
170 public:
171 CacheBlk() : data(nullptr), tickInserted(0)
172 {
173 invalidate();
174 }
175
176 CacheBlk(const CacheBlk&) = delete;
177 CacheBlk& operator=(const CacheBlk&) = delete;
178 virtual ~CacheBlk() {};
179
180 /**
181 * Checks the write permissions of this block.
182 * @return True if the block is writable.
183 */
184 bool isWritable() const
185 {
186 const State needed_bits = BlkWritable | BlkValid;
187 return (status & needed_bits) == needed_bits;
188 }
189
190 /**
191 * Checks the read permissions of this block. Note that a block
192 * can be valid but not readable if there is an outstanding write
193 * upgrade miss.
194 * @return True if the block is readable.
195 */
196 bool isReadable() const
197 {
198 const State needed_bits = BlkReadable | BlkValid;
199 return (status & needed_bits) == needed_bits;
200 }
201
202 /**
203 * Checks that a block is valid.
204 * @return True if the block is valid.
205 */
206 bool isValid() const
207 {
208 return (status & BlkValid) != 0;
209 }
210
211 /**
212 * Invalidate the block and clear all state.
213 */
214 virtual void invalidate()
215 {
216 tag = MaxAddr;
217 task_id = ContextSwitchTaskId::Unknown;
218 status = 0;
219 whenReady = MaxTick;
220 refCount = 0;
221 srcMasterId = Request::invldMasterId;
222 lockList.clear();
223 }
224
225 /**
226 * Check to see if a block has been written.
227 * @return True if the block is dirty.
228 */
229 bool isDirty() const
230 {
231 return (status & BlkDirty) != 0;
232 }
233
234 /**
235 * Check if this block was the result of a hardware prefetch, yet to
236 * be touched.
237 * @return True if the block was a hardware prefetch, unaccesed.
238 */
239 bool wasPrefetched() const
240 {
241 return (status & BlkHWPrefetched) != 0;
242 }
243
244 /**
245 * Check if this block holds data from the secure memory space.
246 * @return True if the block holds data from the secure memory space.
247 */
248 bool isSecure() const
249 {
250 return (status & BlkSecure) != 0;
251 }
252
253 /**
254 * Set valid bit.
255 */
256 virtual void setValid()
257 {
258 assert(!isValid());
259 status |= BlkValid;
260 }
261
262 /**
263 * Set secure bit.
264 */
265 virtual void setSecure()
266 {
267 status |= BlkSecure;
268 }
269
270 /**
271 * Get tick at which block's data will be available for access.
272 *
273 * @return Data ready tick.
274 */
275 Tick getWhenReady() const
276 {
277 assert(whenReady != MaxTick);
278 return whenReady;
279 }
280
281 /**
282 * Set tick at which block's data will be available for access. The new
283 * tick must be chronologically sequential with respect to previous
284 * accesses.
285 *
286 * @param tick New data ready tick.
287 */
288 void setWhenReady(const Tick tick)
289 {
290 assert(tick >= tickInserted);
291 whenReady = tick;
292 }
293
294 /**
295 * Set member variables when a block insertion occurs. Resets reference
296 * count to 1 (the insertion counts as a reference), and touch block if
297 * it hadn't been touched previously. Sets the insertion tick to the
298 * current tick. Marks the block valid.
299 *
300 * @param tag Block address tag.
301 * @param is_secure Whether the block is in secure space or not.
302 * @param src_master_ID The source requestor ID.
303 * @param task_ID The new task ID.
304 */
305 virtual void insert(const Addr tag, const bool is_secure,
306 const int src_master_ID, const uint32_t task_ID);
307
308 /**
309 * Track the fact that a local locked was issued to the
310 * block. Invalidate any previous LL to the same address.
311 */
312 void trackLoadLocked(PacketPtr pkt)
313 {
314 assert(pkt->isLLSC());
315 auto l = lockList.begin();
316 while (l != lockList.end()) {
317 if (l->intersects(pkt->req))
318 l = lockList.erase(l);
319 else
320 ++l;
321 }
322
323 lockList.emplace_front(pkt->req);
324 }
325
326 /**
327 * Clear the any load lock that intersect the request, and is from
328 * a different context.
329 */
330 void clearLoadLocks(const RequestPtr &req)
331 {
332 auto l = lockList.begin();
333 while (l != lockList.end()) {
334 if (l->intersects(req) && l->contextId != req->contextId()) {
335 l = lockList.erase(l);
336 } else {
337 ++l;
338 }
339 }
340 }
341
342 /**
343 * Pretty-print tag, set and way, and interpret state bits to readable form
344 * including mapping to a MOESI state.
345 *
346 * @return string with basic state information
347 */
348 virtual std::string print() const
349 {
350 /**
351 * state M O E S I
352 * writable 1 0 1 0 0
353 * dirty 1 1 0 0 0
354 * valid 1 1 1 1 0
355 *
356 * state writable dirty valid
357 * M 1 1 1
358 * O 0 1 1
359 * E 1 0 1
360 * S 0 0 1
361 * I 0 0 0
362 *
363 * Note that only one cache ever has a block in Modified or
364 * Owned state, i.e., only one cache owns the block, or
365 * equivalently has the BlkDirty bit set. However, multiple
366 * caches on the same path to memory can have a block in the
367 * Exclusive state (despite the name). Exclusive means this
368 * cache has the only copy at this level of the hierarchy,
369 * i.e., there may be copies in caches above this cache (in
370 * various states), but there are no peers that have copies on
371 * this branch of the hierarchy, and no caches at or above
372 * this level on any other branch have copies either.
373 **/
374 unsigned state = isWritable() << 2 | isDirty() << 1 | isValid();
375 char s = '?';
376 switch (state) {
377 case 0b111: s = 'M'; break;
378 case 0b011: s = 'O'; break;
379 case 0b101: s = 'E'; break;
380 case 0b001: s = 'S'; break;
381 case 0b000: s = 'I'; break;
382 default: s = 'T'; break; // @TODO add other types
383 }
384 return csprintf("state: %x (%c) valid: %d writable: %d readable: %d "
385 "dirty: %d | tag: %#x set: %#x way: %#x", status, s,
386 isValid(), isWritable(), isReadable(), isDirty(), tag,
387 getSet(), getWay());
388 }
389
390 /**
391 * Handle interaction of load-locked operations and stores.
392 * @return True if write should proceed, false otherwise. Returns
393 * false only in the case of a failed store conditional.
394 */
395 bool checkWrite(PacketPtr pkt)
396 {
397 assert(pkt->isWrite());
398
399 // common case
400 if (!pkt->isLLSC() && lockList.empty())
401 return true;
402
403 const RequestPtr &req = pkt->req;
404
405 if (pkt->isLLSC()) {
406 // it's a store conditional... have to check for matching
407 // load locked.
408 bool success = false;
409
410 auto l = lockList.begin();
411 while (!success && l != lockList.end()) {
412 if (l->matches(pkt->req)) {
413 // it's a store conditional, and as far as the
414 // memory system can tell, the requesting
415 // context's lock is still valid.
416 success = true;
417 lockList.erase(l);
418 } else {
419 ++l;
420 }
421 }
422
423 req->setExtraData(success ? 1 : 0);
424 // clear any intersected locks from other contexts (our LL
425 // should already have cleared them)
426 clearLoadLocks(req);
427 return success;
428 } else {
429 // a normal write, if there is any lock not from this
430 // context we clear the list, thus for a private cache we
431 // never clear locks on normal writes
432 clearLoadLocks(req);
433 return true;
434 }
435 }
436 };
437
438 /**
439 * Special instance of CacheBlk for use with tempBlk that deals with its
440 * block address regeneration.
441 * @sa Cache
442 */
443 class TempCacheBlk final : public CacheBlk
444 {
445 private:
446 /**
447 * Copy of the block's address, used to regenerate tempBlock's address.
448 */
449 Addr _addr;
450
451 public:
452 /**
453 * Creates a temporary cache block, with its own storage.
454 * @param size The size (in bytes) of this cache block.
455 */
456 TempCacheBlk(unsigned size) : CacheBlk()
457 {
458 data = new uint8_t[size];
459 }
460 TempCacheBlk(const TempCacheBlk&) = delete;
461 TempCacheBlk& operator=(const TempCacheBlk&) = delete;
462 ~TempCacheBlk() { delete [] data; };
463
464 /**
465 * Invalidate the block and clear all state.
466 */
467 void invalidate() override {
468 CacheBlk::invalidate();
469
470 _addr = MaxAddr;
471 }
472
473 void insert(const Addr addr, const bool is_secure,
474 const int src_master_ID=0, const uint32_t task_ID=0) override
475 {
476 // Make sure that the block has been properly invalidated
477 assert(status == 0);
478
479 // Set block address
480 _addr = addr;
481
482 // Set secure state
483 if (is_secure) {
484 setSecure();
485 }
486
487 // Validate block
488 setValid();
489 }
490
491 /**
492 * Get block's address.
493 *
494 * @return addr Address value.
495 */
496 Addr getAddr() const
497 {
498 return _addr;
499 }
500 };
501
502 /**
503 * Simple class to provide virtual print() method on cache blocks
504 * without allocating a vtable pointer for every single cache block.
505 * Just wrap the CacheBlk object in an instance of this before passing
506 * to a function that requires a Printable object.
507 */
508 class CacheBlkPrintWrapper : public Printable
509 {
510 CacheBlk *blk;
511 public:
512 CacheBlkPrintWrapper(CacheBlk *_blk) : blk(_blk) {}
513 virtual ~CacheBlkPrintWrapper() {}
514 void print(std::ostream &o, int verbosity = 0,
515 const std::string &prefix = "") const;
516 };
517
518 #endif //__MEM_CACHE_CACHE_BLK_HH__