fc69bda645b664d3c141ef341e87f026101c9977
[gem5.git] / src / mem / cache / tags / fa_lru.cc
1 /*
2 * Copyright (c) 2018 Inria
3 * Copyright (c) 2013,2016-2018 ARM Limited
4 * All rights reserved.
5 *
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
14 *
15 * Copyright (c) 2003-2005 The Regents of The University of Michigan
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 /**
43 * @file
44 * Definitions a fully associative LRU tagstore.
45 */
46
47 #include "mem/cache/tags/fa_lru.hh"
48
49 #include <cassert>
50 #include <sstream>
51
52 #include "base/intmath.hh"
53 #include "base/logging.hh"
54 #include "mem/cache/base.hh"
55 #include "mem/cache/replacement_policies/replaceable_entry.hh"
56
57 std::string
58 FALRUBlk::print() const
59 {
60 return csprintf("%s inCachesMask: %#x", CacheBlk::print(), inCachesMask);
61 }
62
63 FALRU::FALRU(const Params &p)
64 : BaseTags(p),
65
66 cacheTracking(p.min_tracked_cache_size, size, blkSize)
67 {
68 if (!isPowerOf2(blkSize))
69 fatal("cache block size (in bytes) `%d' must be a power of two",
70 blkSize);
71 if (!isPowerOf2(size))
72 fatal("Cache Size must be power of 2 for now");
73
74 blks = new FALRUBlk[numBlocks];
75 }
76
77 FALRU::~FALRU()
78 {
79 delete[] blks;
80 }
81
82 void
83 FALRU::tagsInit()
84 {
85 head = &(blks[0]);
86 head->prev = nullptr;
87 head->next = &(blks[1]);
88 head->setPosition(0, 0);
89 head->data = &dataBlks[0];
90
91 for (unsigned i = 1; i < numBlocks - 1; i++) {
92 blks[i].prev = &(blks[i-1]);
93 blks[i].next = &(blks[i+1]);
94 blks[i].setPosition(0, i);
95
96 // Associate a data chunk to the block
97 blks[i].data = &dataBlks[blkSize*i];
98 }
99
100 tail = &(blks[numBlocks - 1]);
101 tail->prev = &(blks[numBlocks - 2]);
102 tail->next = nullptr;
103 tail->setPosition(0, numBlocks - 1);
104 tail->data = &dataBlks[(numBlocks - 1) * blkSize];
105
106 cacheTracking.init(head, tail);
107 }
108
109 void
110 FALRU::regStats()
111 {
112 BaseTags::regStats();
113 cacheTracking.regStats(name());
114 }
115
116 void
117 FALRU::invalidate(CacheBlk *blk)
118 {
119 // Erase block entry reference in the hash table
120 M5_VAR_USED auto num_erased =
121 tagHash.erase(std::make_pair(blk->getTag(), blk->isSecure()));
122
123 // Sanity check; only one block reference should be erased
124 assert(num_erased == 1);
125
126 // Invalidate block entry. Must be done after the hash is erased
127 BaseTags::invalidate(blk);
128
129 // Decrease the number of tags in use
130 stats.tagsInUse--;
131
132 // Move the block to the tail to make it the next victim
133 moveToTail((FALRUBlk*)blk);
134 }
135
136 CacheBlk*
137 FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
138 {
139 return accessBlock(addr, is_secure, lat, 0);
140 }
141
142 CacheBlk*
143 FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
144 CachesMask *in_caches_mask)
145 {
146 CachesMask mask = 0;
147 FALRUBlk* blk = static_cast<FALRUBlk*>(findBlock(addr, is_secure));
148
149 // If a cache hit
150 if (blk && blk->isValid()) {
151 mask = blk->inCachesMask;
152
153 moveToHead(blk);
154 }
155
156 if (in_caches_mask) {
157 *in_caches_mask = mask;
158 }
159
160 cacheTracking.recordAccess(blk);
161
162 // The tag lookup latency is the same for a hit or a miss
163 lat = lookupLatency;
164
165 return blk;
166 }
167
168 CacheBlk*
169 FALRU::findBlock(Addr addr, bool is_secure) const
170 {
171 FALRUBlk* blk = nullptr;
172
173 Addr tag = extractTag(addr);
174 auto iter = tagHash.find(std::make_pair(tag, is_secure));
175 if (iter != tagHash.end()) {
176 blk = (*iter).second;
177 }
178
179 if (blk && blk->isValid()) {
180 assert(blk->getTag() == tag);
181 assert(blk->isSecure() == is_secure);
182 }
183
184 return blk;
185 }
186
187 ReplaceableEntry*
188 FALRU::findBlockBySetAndWay(int set, int way) const
189 {
190 assert(set == 0);
191 return &blks[way];
192 }
193
194 CacheBlk*
195 FALRU::findVictim(Addr addr, const bool is_secure, const std::size_t size,
196 std::vector<CacheBlk*>& evict_blks)
197 {
198 // The victim is always stored on the tail for the FALRU
199 FALRUBlk* victim = tail;
200
201 // There is only one eviction for this replacement
202 evict_blks.push_back(victim);
203
204 return victim;
205 }
206
207 void
208 FALRU::insertBlock(const PacketPtr pkt, CacheBlk *blk)
209 {
210 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
211
212 // Make sure block is not present in the cache
213 assert(falruBlk->inCachesMask == 0);
214
215 // Do common block insertion functionality
216 BaseTags::insertBlock(pkt, blk);
217
218 // Increment tag counter
219 stats.tagsInUse++;
220
221 // New block is the MRU
222 moveToHead(falruBlk);
223
224 // Insert new block in the hash table
225 tagHash[std::make_pair(blk->getTag(), blk->isSecure())] = falruBlk;
226 }
227
228 void
229 FALRU::moveToHead(FALRUBlk *blk)
230 {
231 // If block is not already head, do the moving
232 if (blk != head) {
233 cacheTracking.moveBlockToHead(blk);
234 // If block is tail, set previous block as new tail
235 if (blk == tail){
236 assert(blk->next == nullptr);
237 tail = blk->prev;
238 tail->next = nullptr;
239 // Inform block's surrounding blocks that it has been moved
240 } else {
241 blk->prev->next = blk->next;
242 blk->next->prev = blk->prev;
243 }
244
245 // Swap pointers
246 blk->next = head;
247 blk->prev = nullptr;
248 head->prev = blk;
249 head = blk;
250
251 cacheTracking.check(head, tail);
252 }
253 }
254
255 void
256 FALRU::moveToTail(FALRUBlk *blk)
257 {
258 // If block is not already tail, do the moving
259 if (blk != tail) {
260 cacheTracking.moveBlockToTail(blk);
261 // If block is head, set next block as new head
262 if (blk == head){
263 assert(blk->prev == nullptr);
264 head = blk->next;
265 head->prev = nullptr;
266 // Inform block's surrounding blocks that it has been moved
267 } else {
268 blk->prev->next = blk->next;
269 blk->next->prev = blk->prev;
270 }
271
272 // Swap pointers
273 blk->prev = tail;
274 blk->next = nullptr;
275 tail->next = blk;
276 tail = blk;
277
278 cacheTracking.check(head, tail);
279 }
280 }
281
282 FALRU *
283 FALRUParams::create() const
284 {
285 return new FALRU(*this);
286 }
287
288 void
289 FALRU::CacheTracking::check(const FALRUBlk *head, const FALRUBlk *tail) const
290 {
291 #ifdef FALRU_DEBUG
292 const FALRUBlk* blk = head;
293 unsigned curr_size = 0;
294 unsigned tracked_cache_size = minTrackedSize;
295 CachesMask in_caches_mask = inAllCachesMask;
296 int j = 0;
297
298 while (blk) {
299 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
300 "%x found %x", blk->inCachesMask, in_caches_mask);
301
302 curr_size += blkSize;
303 if (curr_size == tracked_cache_size && blk != tail) {
304 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
305 "cache", j);
306 tracked_cache_size <<= 1;
307 // from this point, blocks fit only in the larger caches
308 in_caches_mask &= ~(1U << j);
309 ++j;
310 }
311 blk = blk->next;
312 }
313 #endif // FALRU_DEBUG
314 }
315
316 void
317 FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
318 {
319 // early exit if we are not tracking any extra caches
320 FALRUBlk* blk = numTrackedCaches ? head : nullptr;
321 unsigned curr_size = 0;
322 unsigned tracked_cache_size = minTrackedSize;
323 CachesMask in_caches_mask = inAllCachesMask;
324 int j = 0;
325
326 while (blk) {
327 blk->inCachesMask = in_caches_mask;
328
329 curr_size += blkSize;
330 if (curr_size == tracked_cache_size && blk != tail) {
331 boundaries[j] = blk;
332
333 tracked_cache_size <<= 1;
334 // from this point, blocks fit only in the larger caches
335 in_caches_mask &= ~(1U << j);
336 ++j;
337 }
338 blk = blk->next;
339 }
340 }
341
342
343 void
344 FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
345 {
346 // Get the mask of all caches, in which the block didn't fit
347 // before moving it to the head
348 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
349
350 for (int i = 0; i < numTrackedCaches; i++) {
351 CachesMask current_cache_mask = 1U << i;
352 if (current_cache_mask & update_caches_mask) {
353 // if the ith cache didn't fit the block (before it is moved to
354 // the head), move the ith boundary 1 block closer to the
355 // MRU
356 boundaries[i]->inCachesMask &= ~current_cache_mask;
357 boundaries[i] = boundaries[i]->prev;
358 } else if (boundaries[i] == blk) {
359 // Make sure the boundary doesn't point to the block
360 // we are about to move
361 boundaries[i] = blk->prev;
362 }
363 }
364
365 // Make block reside in all caches
366 blk->inCachesMask = inAllCachesMask;
367 }
368
369 void
370 FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
371 {
372 CachesMask update_caches_mask = blk->inCachesMask;
373
374 for (int i = 0; i < numTrackedCaches; i++) {
375 CachesMask current_cache_mask = 1U << i;
376 if (current_cache_mask & update_caches_mask) {
377 // if the ith cache fitted the block (before it is moved to
378 // the tail), move the ith boundary 1 block closer to the
379 // LRU
380 boundaries[i] = boundaries[i]->next;
381 if (boundaries[i] == blk) {
382 // Make sure the boundary doesn't point to the block
383 // we are about to move
384 boundaries[i] = blk->next;
385 }
386 boundaries[i]->inCachesMask |= current_cache_mask;
387 }
388 }
389
390 // The block now fits only in the actual cache
391 blk->inCachesMask = 0;
392 }
393
394 void
395 FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
396 {
397 for (int i = 0; i < numTrackedCaches; i++) {
398 if (blk && ((1U << i) & blk->inCachesMask)) {
399 hits[i]++;
400 } else {
401 misses[i]++;
402 }
403 }
404
405 // Record stats for the actual cache too
406 if (blk && blk->isValid()) {
407 hits[numTrackedCaches]++;
408 } else {
409 misses[numTrackedCaches]++;
410 }
411
412 accesses++;
413 }
414
415 void
416 printSize(std::ostream &stream, size_t size)
417 {
418 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
419 int div = 0;
420 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
421 div++;
422 size >>= 10;
423 }
424 stream << size << SIZES[div];
425 }
426
427 void
428 FALRU::CacheTracking::regStats(std::string name)
429 {
430 hits
431 .init(numTrackedCaches + 1)
432 .name(name + ".falru_hits")
433 .desc("The number of hits in each cache size.")
434 ;
435 misses
436 .init(numTrackedCaches + 1)
437 .name(name + ".falru_misses")
438 .desc("The number of misses in each cache size.")
439 ;
440 accesses
441 .name(name + ".falru_accesses")
442 .desc("The number of accesses to the FA LRU cache.")
443 ;
444
445 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
446 std::stringstream size_str;
447 printSize(size_str, minTrackedSize << i);
448 hits.subname(i, size_str.str());
449 hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
450 misses.subname(i, size_str.str());
451 misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
452 }
453 }