ruby: Replace Time with Cycles in SequencerMessage
[gem5.git] / src / mem / cache / blk.hh
1 /*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Andreas Sandberg
42 */
43
44 /** @file
45 * Definitions of a simple cache block class.
46 */
47
48 #ifndef __CACHE_BLK_HH__
49 #define __CACHE_BLK_HH__
50
51 #include <list>
52
53 #include "base/printable.hh"
54 #include "mem/packet.hh"
55 #include "mem/request.hh"
56 #include "sim/core.hh" // for Tick
57
58 /**
59 * Cache block status bit assignments
60 */
61 enum CacheBlkStatusBits {
62 /** valid, readable */
63 BlkValid = 0x01,
64 /** write permission */
65 BlkWritable = 0x02,
66 /** read permission (yes, block can be valid but not readable) */
67 BlkReadable = 0x04,
68 /** dirty (modified) */
69 BlkDirty = 0x08,
70 /** block was referenced */
71 BlkReferenced = 0x10,
72 /** block was a hardware prefetch yet unaccessed*/
73 BlkHWPrefetched = 0x20
74 };
75
76 /**
77 * A Basic Cache block.
78 * Contains the tag, status, and a pointer to data.
79 */
80 class CacheBlk
81 {
82 public:
83 /** The address space ID of this block. */
84 int asid;
85 /** Data block tag value. */
86 Addr tag;
87 /**
88 * Contains a copy of the data in this block for easy access. This is used
89 * for efficient execution when the data could be actually stored in
90 * another format (COW, compressed, sub-blocked, etc). In all cases the
91 * data stored here should be kept consistant with the actual data
92 * referenced by this block.
93 */
94 uint8_t *data;
95 /** the number of bytes stored in this block. */
96 int size;
97
98 /** block state: OR of CacheBlkStatusBit */
99 typedef unsigned State;
100
101 /** The current status of this block. @sa CacheBlockStatusBits */
102 State status;
103
104 /** Which curTick() will this block be accessable */
105 Tick whenReady;
106
107 /**
108 * The set this block belongs to.
109 * @todo Move this into subclasses when we fix CacheTags to use them.
110 */
111 int set;
112
113 /** whether this block has been touched */
114 bool isTouched;
115
116 /** Number of references to this block since it was brought in. */
117 int refCount;
118
119 /** holds the source requestor ID for this block. */
120 int srcMasterId;
121
122 protected:
123 /**
124 * Represents that the indicated thread context has a "lock" on
125 * the block, in the LL/SC sense.
126 */
127 class Lock {
128 public:
129 int contextId; // locking context
130 Addr lowAddr; // low address of lock range
131 Addr highAddr; // high address of lock range
132
133 // check for matching execution context
134 bool matchesContext(Request *req)
135 {
136 Addr req_low = req->getPaddr();
137 Addr req_high = req_low + req->getSize() -1;
138 return (contextId == req->contextId()) &&
139 (req_low >= lowAddr) && (req_high <= highAddr);
140 }
141
142 bool overlapping(Request *req)
143 {
144 Addr req_low = req->getPaddr();
145 Addr req_high = req_low + req->getSize() - 1;
146
147 return (req_low <= highAddr) && (req_high >= lowAddr);
148 }
149
150 Lock(Request *req)
151 : contextId(req->contextId()),
152 lowAddr(req->getPaddr()),
153 highAddr(lowAddr + req->getSize() - 1)
154 {
155 }
156 };
157
158 /** List of thread contexts that have performed a load-locked (LL)
159 * on the block since the last store. */
160 std::list<Lock> lockList;
161
162 public:
163
164 CacheBlk()
165 : asid(-1), tag(0), data(0) ,size(0), status(0), whenReady(0),
166 set(-1), isTouched(false), refCount(0),
167 srcMasterId(Request::invldMasterId)
168 {}
169
170 /**
171 * Copy the state of the given block into this one.
172 * @param rhs The block to copy.
173 * @return a const reference to this block.
174 */
175 const CacheBlk& operator=(const CacheBlk& rhs)
176 {
177 asid = rhs.asid;
178 tag = rhs.tag;
179 data = rhs.data;
180 size = rhs.size;
181 status = rhs.status;
182 whenReady = rhs.whenReady;
183 set = rhs.set;
184 refCount = rhs.refCount;
185 return *this;
186 }
187
188 /**
189 * Checks the write permissions of this block.
190 * @return True if the block is writable.
191 */
192 bool isWritable() const
193 {
194 const State needed_bits = BlkWritable | BlkValid;
195 return (status & needed_bits) == needed_bits;
196 }
197
198 /**
199 * Checks the read permissions of this block. Note that a block
200 * can be valid but not readable if there is an outstanding write
201 * upgrade miss.
202 * @return True if the block is readable.
203 */
204 bool isReadable() const
205 {
206 const State needed_bits = BlkReadable | BlkValid;
207 return (status & needed_bits) == needed_bits;
208 }
209
210 /**
211 * Checks that a block is valid.
212 * @return True if the block is valid.
213 */
214 bool isValid() const
215 {
216 return (status & BlkValid) != 0;
217 }
218
219 /**
220 * Invalidate the block and clear all state.
221 */
222 void invalidate()
223 {
224 status = 0;
225 isTouched = false;
226 clearLoadLocks();
227 }
228
229 /**
230 * Check to see if a block has been written.
231 * @return True if the block is dirty.
232 */
233 bool isDirty() const
234 {
235 return (status & BlkDirty) != 0;
236 }
237
238 /**
239 * Check if this block has been referenced.
240 * @return True if the block has been referenced.
241 */
242 bool isReferenced() const
243 {
244 return (status & BlkReferenced) != 0;
245 }
246
247 /**
248 * Check if this block was the result of a hardware prefetch, yet to
249 * be touched.
250 * @return True if the block was a hardware prefetch, unaccesed.
251 */
252 bool wasPrefetched() const
253 {
254 return (status & BlkHWPrefetched) != 0;
255 }
256
257 /**
258 * Track the fact that a local locked was issued to the block. If
259 * multiple LLs get issued from the same context we could have
260 * redundant records on the list, but that's OK, as they'll all
261 * get blown away at the next store.
262 */
263 void trackLoadLocked(PacketPtr pkt)
264 {
265 assert(pkt->isLLSC());
266 lockList.push_front(Lock(pkt->req));
267 }
268
269 /**
270 * Clear the list of valid load locks. Should be called whenever
271 * block is written to or invalidated.
272 */
273 void clearLoadLocks(Request *req = NULL)
274 {
275 if (!req) {
276 // No request, invaldate all locks to this line
277 lockList.clear();
278 } else {
279 // Only invalidate locks that overlap with this request
280 std::list<Lock>::iterator lock_itr = lockList.begin();
281 while (lock_itr != lockList.end()) {
282 if (lock_itr->overlapping(req)) {
283 lock_itr = lockList.erase(lock_itr);
284 } else {
285 ++lock_itr;
286 }
287 }
288 }
289 }
290
291 /**
292 * Handle interaction of load-locked operations and stores.
293 * @return True if write should proceed, false otherwise. Returns
294 * false only in the case of a failed store conditional.
295 */
296 bool checkWrite(PacketPtr pkt)
297 {
298 Request *req = pkt->req;
299 if (pkt->isLLSC()) {
300 // it's a store conditional... have to check for matching
301 // load locked.
302 bool success = false;
303
304 for (std::list<Lock>::iterator i = lockList.begin();
305 i != lockList.end(); ++i)
306 {
307 if (i->matchesContext(req)) {
308 // it's a store conditional, and as far as the memory
309 // system can tell, the requesting context's lock is
310 // still valid.
311 success = true;
312 break;
313 }
314 }
315
316 req->setExtraData(success ? 1 : 0);
317 clearLoadLocks(req);
318 return success;
319 } else {
320 // for *all* stores (conditional or otherwise) we have to
321 // clear the list of load-locks as they're all invalid now.
322 clearLoadLocks(req);
323 return true;
324 }
325 }
326 };
327
328 /**
329 * Simple class to provide virtual print() method on cache blocks
330 * without allocating a vtable pointer for every single cache block.
331 * Just wrap the CacheBlk object in an instance of this before passing
332 * to a function that requires a Printable object.
333 */
334 class CacheBlkPrintWrapper : public Printable
335 {
336 CacheBlk *blk;
337 public:
338 CacheBlkPrintWrapper(CacheBlk *_blk) : blk(_blk) {}
339 virtual ~CacheBlkPrintWrapper() {}
340 void print(std::ostream &o, int verbosity = 0,
341 const std::string &prefix = "") const;
342 };
343
344 /**
345 * Wrap a method and present it as a cache block visitor.
346 *
347 * For example the forEachBlk method in the tag arrays expects a
348 * callable object/function as their parameter. This class wraps a
349 * method in an object and presents callable object that adheres to
350 * the cache block visitor protocol.
351 */
352 template <typename T, typename BlkType>
353 class CacheBlkVisitorWrapper
354 {
355 public:
356 typedef bool (T::*visitorPtr)(BlkType &blk);
357
358 CacheBlkVisitorWrapper(T &_obj, visitorPtr _visitor)
359 : obj(_obj), visitor(_visitor) {}
360
361 bool operator()(BlkType &blk) {
362 return (obj.*visitor)(blk);
363 }
364
365 private:
366 T &obj;
367 visitorPtr visitor;
368 };
369
370 /**
371 * Cache block visitor that determines if there are dirty blocks in a
372 * cache.
373 *
374 * Use with the forEachBlk method in the tag array to determine if the
375 * array contains dirty blocks.
376 */
377 template <typename BlkType>
378 class CacheBlkIsDirtyVisitor
379 {
380 public:
381 CacheBlkIsDirtyVisitor()
382 : _isDirty(false) {}
383
384 bool operator()(BlkType &blk) {
385 if (blk.isDirty()) {
386 _isDirty = true;
387 return false;
388 } else {
389 return true;
390 }
391 }
392
393 /**
394 * Does the array contain a dirty line?
395 *
396 * \return true if yes, false otherwise.
397 */
398 bool isDirty() const { return _isDirty; };
399
400 private:
401 bool _isDirty;
402 };
403
404 #endif //__CACHE_BLK_HH__