mem-cache: Use secure flag in FALRU's findBlock
[gem5.git] / src / mem / cache / tags / fa_lru.cc
1 /*
2 * Copyright (c) 2013,2016-2018 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44 /**
45 * @file
46 * Definitions a fully associative LRU tagstore.
47 */
48
49 #include "mem/cache/tags/fa_lru.hh"
50
51 #include <cassert>
52 #include <sstream>
53
54 #include "base/intmath.hh"
55 #include "base/logging.hh"
56
57 FALRU::FALRU(const Params *p)
58 : BaseTags(p),
59
60 cacheTracking(p->min_tracked_cache_size, size, blkSize)
61 {
62 if (!isPowerOf2(blkSize))
63 fatal("cache block size (in bytes) `%d' must be a power of two",
64 blkSize);
65 if (!isPowerOf2(size))
66 fatal("Cache Size must be power of 2 for now");
67
68 blks = new FALRUBlk[numBlocks];
69
70 head = &(blks[0]);
71 head->prev = nullptr;
72 head->next = &(blks[1]);
73 head->set = 0;
74 head->way = 0;
75 head->data = &dataBlks[0];
76
77 for (unsigned i = 1; i < numBlocks - 1; i++) {
78 blks[i].prev = &(blks[i-1]);
79 blks[i].next = &(blks[i+1]);
80 blks[i].set = 0;
81 blks[i].way = i;
82
83 // Associate a data chunk to the block
84 blks[i].data = &dataBlks[blkSize*i];
85 }
86
87 tail = &(blks[numBlocks - 1]);
88 tail->prev = &(blks[numBlocks - 2]);
89 tail->next = nullptr;
90 tail->set = 0;
91 tail->way = numBlocks - 1;
92 tail->data = &dataBlks[(numBlocks - 1) * blkSize];
93
94 cacheTracking.init(head, tail);
95 }
96
97 FALRU::~FALRU()
98 {
99 delete[] blks;
100 }
101
102 void
103 FALRU::regStats()
104 {
105 BaseTags::regStats();
106 cacheTracking.regStats(name());
107 }
108
109 FALRUBlk *
110 FALRU::hashLookup(Addr addr) const
111 {
112 tagIterator iter = tagHash.find(addr);
113 if (iter != tagHash.end()) {
114 return (*iter).second;
115 }
116 return nullptr;
117 }
118
119 void
120 FALRU::invalidate(CacheBlk *blk)
121 {
122 BaseTags::invalidate(blk);
123
124 // Move the block to the tail to make it the next victim
125 moveToTail((FALRUBlk*)blk);
126
127 // Erase block entry in the hash table
128 tagHash.erase(blk->tag);
129 }
130
131 CacheBlk*
132 FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
133 {
134 return accessBlock(addr, is_secure, lat, 0);
135 }
136
137 CacheBlk*
138 FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat,
139 CachesMask *in_caches_mask)
140 {
141 CachesMask mask = 0;
142 Addr blkAddr = blkAlign(addr);
143 FALRUBlk* blk = hashLookup(blkAddr);
144
145 if (blk && blk->isValid()) {
146 // If a cache hit
147 lat = accessLatency;
148 // Check if the block to be accessed is available. If not,
149 // apply the accessLatency on top of block->whenReady.
150 if (blk->whenReady > curTick() &&
151 cache->ticksToCycles(blk->whenReady - curTick()) >
152 accessLatency) {
153 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
154 accessLatency;
155 }
156 assert(blk->tag == blkAddr);
157 mask = blk->inCachesMask;
158 moveToHead(blk);
159 } else {
160 // If a cache miss
161 lat = lookupLatency;
162 blk = nullptr;
163 }
164 if (in_caches_mask) {
165 *in_caches_mask = mask;
166 }
167
168 cacheTracking.recordAccess(blk);
169
170 return blk;
171 }
172
173
174 CacheBlk*
175 FALRU::findBlock(Addr addr, bool is_secure) const
176 {
177 Addr blkAddr = blkAlign(addr);
178 FALRUBlk* blk = hashLookup(blkAddr);
179
180 if (blk && blk->isValid()) {
181 assert(blk->tag == blkAddr);
182 assert(blk->isSecure() == is_secure);
183 } else {
184 blk = nullptr;
185 }
186 return blk;
187 }
188
189 CacheBlk*
190 FALRU::findBlockBySetAndWay(int set, int way) const
191 {
192 assert(set == 0);
193 return &blks[way];
194 }
195
196 CacheBlk*
197 FALRU::findVictim(Addr addr)
198 {
199 return tail;
200 }
201
202 void
203 FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
204 {
205 FALRUBlk* falruBlk = static_cast<FALRUBlk*>(blk);
206
207 // Make sure block is not present in the cache
208 assert(falruBlk->inCachesMask == 0);
209
210 // Do common block insertion functionality
211 BaseTags::insertBlock(pkt, blk);
212
213 // New block is the MRU
214 moveToHead(falruBlk);
215
216 // Insert new block in the hash table
217 tagHash[falruBlk->tag] = falruBlk;
218 }
219
220 void
221 FALRU::moveToHead(FALRUBlk *blk)
222 {
223 // If block is not already head, do the moving
224 if (blk != head) {
225 cacheTracking.moveBlockToHead(blk);
226 // If block is tail, set previous block as new tail
227 if (blk == tail){
228 assert(blk->next == nullptr);
229 tail = blk->prev;
230 tail->next = nullptr;
231 // Inform block's surrounding blocks that it has been moved
232 } else {
233 blk->prev->next = blk->next;
234 blk->next->prev = blk->prev;
235 }
236
237 // Swap pointers
238 blk->next = head;
239 blk->prev = nullptr;
240 head->prev = blk;
241 head = blk;
242
243 cacheTracking.check(head, tail);
244 }
245 }
246
247 void
248 FALRU::moveToTail(FALRUBlk *blk)
249 {
250 // If block is not already tail, do the moving
251 if (blk != tail) {
252 cacheTracking.moveBlockToTail(blk);
253 // If block is head, set next block as new head
254 if (blk == head){
255 assert(blk->prev == nullptr);
256 head = blk->next;
257 head->prev = nullptr;
258 // Inform block's surrounding blocks that it has been moved
259 } else {
260 blk->prev->next = blk->next;
261 blk->next->prev = blk->prev;
262 }
263
264 // Swap pointers
265 blk->prev = tail;
266 blk->next = nullptr;
267 tail->next = blk;
268 tail = blk;
269
270 cacheTracking.check(head, tail);
271 }
272 }
273
274 FALRU *
275 FALRUParams::create()
276 {
277 return new FALRU(this);
278 }
279
280 void
281 FALRU::CacheTracking::check(FALRUBlk *head, FALRUBlk *tail)
282 {
283 #ifdef FALRU_DEBUG
284 FALRUBlk* blk = head;
285 unsigned curr_size = 0;
286 unsigned tracked_cache_size = minTrackedSize;
287 CachesMask in_caches_mask = inAllCachesMask;
288 int j = 0;
289
290 while (blk) {
291 panic_if(blk->inCachesMask != in_caches_mask, "Expected cache mask "
292 "%x found %x", blk->inCachesMask, in_caches_mask);
293
294 curr_size += blkSize;
295 if (curr_size == tracked_cache_size && blk != tail) {
296 panic_if(boundaries[j] != blk, "Unexpected boundary for the %d-th "
297 "cache", j);
298 tracked_cache_size <<= 1;
299 // from this point, blocks fit only in the larger caches
300 in_caches_mask &= ~(1U << j);
301 ++j;
302 }
303 blk = blk->next;
304 }
305 #endif // FALRU_DEBUG
306 }
307
308 void
309 FALRU::CacheTracking::init(FALRUBlk *head, FALRUBlk *tail)
310 {
311 // early exit if we are not tracking any extra caches
312 FALRUBlk* blk = numTrackedCaches ? head : nullptr;
313 unsigned curr_size = 0;
314 unsigned tracked_cache_size = minTrackedSize;
315 CachesMask in_caches_mask = inAllCachesMask;
316 int j = 0;
317
318 while (blk) {
319 blk->inCachesMask = in_caches_mask;
320
321 curr_size += blkSize;
322 if (curr_size == tracked_cache_size && blk != tail) {
323 boundaries[j] = blk;
324
325 tracked_cache_size <<= 1;
326 // from this point, blocks fit only in the larger caches
327 in_caches_mask &= ~(1U << j);
328 ++j;
329 }
330 blk = blk->next;
331 }
332 }
333
334
335 void
336 FALRU::CacheTracking::moveBlockToHead(FALRUBlk *blk)
337 {
338 // Get the mask of all caches, in which the block didn't fit
339 // before moving it to the head
340 CachesMask update_caches_mask = inAllCachesMask ^ blk->inCachesMask;
341
342 for (int i = 0; i < numTrackedCaches; i++) {
343 CachesMask current_cache_mask = 1U << i;
344 if (current_cache_mask & update_caches_mask) {
345 // if the ith cache didn't fit the block (before it is moved to
346 // the head), move the ith boundary 1 block closer to the
347 // MRU
348 boundaries[i]->inCachesMask &= ~current_cache_mask;
349 boundaries[i] = boundaries[i]->prev;
350 } else if (boundaries[i] == blk) {
351 // Make sure the boundary doesn't point to the block
352 // we are about to move
353 boundaries[i] = blk->prev;
354 }
355 }
356
357 // Make block reside in all caches
358 blk->inCachesMask = inAllCachesMask;
359 }
360
361 void
362 FALRU::CacheTracking::moveBlockToTail(FALRUBlk *blk)
363 {
364 CachesMask update_caches_mask = blk->inCachesMask;
365
366 for (int i = 0; i < numTrackedCaches; i++) {
367 CachesMask current_cache_mask = 1U << i;
368 if (current_cache_mask & update_caches_mask) {
369 // if the ith cache fitted the block (before it is moved to
370 // the tail), move the ith boundary 1 block closer to the
371 // LRU
372 boundaries[i] = boundaries[i]->next;
373 if (boundaries[i] == blk) {
374 // Make sure the boundary doesn't point to the block
375 // we are about to move
376 boundaries[i] = blk->next;
377 }
378 boundaries[i]->inCachesMask |= current_cache_mask;
379 }
380 }
381
382 // The block now fits only in the actual cache
383 blk->inCachesMask = 0;
384 }
385
386 void
387 FALRU::CacheTracking::recordAccess(FALRUBlk *blk)
388 {
389 for (int i = 0; i < numTrackedCaches; i++) {
390 if (blk && ((1U << i) & blk->inCachesMask)) {
391 hits[i]++;
392 } else {
393 misses[i]++;
394 }
395 }
396
397 // Record stats for the actual cache too
398 if (blk) {
399 hits[numTrackedCaches]++;
400 } else {
401 misses[numTrackedCaches]++;
402 }
403
404 accesses++;
405 }
406
407 void
408 printSize(std::ostream &stream, size_t size)
409 {
410 static const char *SIZES[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
411 int div = 0;
412 while (size >= 1024 && div < (sizeof SIZES / sizeof *SIZES)) {
413 div++;
414 size >>= 10;
415 }
416 stream << size << SIZES[div];
417 }
418
419 void
420 FALRU::CacheTracking::regStats(std::string name)
421 {
422 hits
423 .init(numTrackedCaches + 1)
424 .name(name + ".falru_hits")
425 .desc("The number of hits in each cache size.")
426 ;
427 misses
428 .init(numTrackedCaches + 1)
429 .name(name + ".falru_misses")
430 .desc("The number of misses in each cache size.")
431 ;
432 accesses
433 .name(name + ".falru_accesses")
434 .desc("The number of accesses to the FA LRU cache.")
435 ;
436
437 for (unsigned i = 0; i < numTrackedCaches + 1; ++i) {
438 std::stringstream size_str;
439 printSize(size_str, minTrackedSize << i);
440 hits.subname(i, size_str.str());
441 hits.subdesc(i, "Hits in a " + size_str.str() + " cache");
442 misses.subname(i, size_str.str());
443 misses.subdesc(i, "Misses in a " + size_str.str() + " cache");
444 }
445 }