2 * Copyright (c) 2018 Inria
3 * Copyright (c) 2013,2016-2018 ARM Limited
6 * The license below extends only to copyright in the software and shall
7 * not be construed as granting a license to any other intellectual
8 * property including but not limited to intellectual property relating
9 * to a hardware implementation of the functionality of the software
10 * licensed hereunder. You may use the software subject to the license
11 * terms below provided that you ensure that this notice is replicated
12 * unmodified and in its entirety in all distributions of the software,
13 * modified or unmodified, in source code or in binary form.
15 * Copyright (c) 2003-2005 The Regents of The University of Michigan
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 * Definitions a fully associative LRU tagstore.
47 #include "mem/cache/tags/fa_lru.hh"
52 #include "base/intmath.hh"
53 #include "base/logging.hh"
54 #include "mem/cache/base.hh"
55 #include "mem/cache/replacement_policies/replaceable_entry.hh"
58 FALRUBlk::print() const
60 return csprintf("%s inCachesMask: %#x", CacheBlk::print(), inCachesMask
);
63 FALRU::FALRU(const Params
&p
)
66 cacheTracking(p
.min_tracked_cache_size
, size
, blkSize
)
68 if (!isPowerOf2(blkSize
))
69 fatal("cache block size (in bytes) `%d' must be a power of two",
71 if (!isPowerOf2(size
))
72 fatal("Cache Size must be power of 2 for now");
74 blks
= new FALRUBlk
[numBlocks
];
87 head
->next
= &(blks
[1]);
88 head
->setPosition(0, 0);
89 head
->data
= &dataBlks
[0];
91 for (unsigned i
= 1; i
< numBlocks
- 1; i
++) {
92 blks
[i
].prev
= &(blks
[i
-1]);
93 blks
[i
].next
= &(blks
[i
+1]);
94 blks
[i
].setPosition(0, i
);
96 // Associate a data chunk to the block
97 blks
[i
].data
= &dataBlks
[blkSize
*i
];
100 tail
= &(blks
[numBlocks
- 1]);
101 tail
->prev
= &(blks
[numBlocks
- 2]);
102 tail
->next
= nullptr;
103 tail
->setPosition(0, numBlocks
- 1);
104 tail
->data
= &dataBlks
[(numBlocks
- 1) * blkSize
];
106 cacheTracking
.init(head
, tail
);
112 BaseTags::regStats();
113 cacheTracking
.regStats(name());
117 FALRU::invalidate(CacheBlk
*blk
)
119 // Erase block entry reference in the hash table
120 M5_VAR_USED
auto num_erased
=
121 tagHash
.erase(std::make_pair(blk
->getTag(), blk
->isSecure()));
123 // Sanity check; only one block reference should be erased
124 assert(num_erased
== 1);
126 // Invalidate block entry. Must be done after the hash is erased
127 BaseTags::invalidate(blk
);
129 // Decrease the number of tags in use
132 // Move the block to the tail to make it the next victim
133 moveToTail((FALRUBlk
*)blk
);
137 FALRU::accessBlock(Addr addr
, bool is_secure
, Cycles
&lat
)
139 return accessBlock(addr
, is_secure
, lat
, 0);
143 FALRU::accessBlock(Addr addr
, bool is_secure
, Cycles
&lat
,
144 CachesMask
*in_caches_mask
)
147 FALRUBlk
* blk
= static_cast<FALRUBlk
*>(findBlock(addr
, is_secure
));
150 if (blk
&& blk
->isValid()) {
151 mask
= blk
->inCachesMask
;
156 if (in_caches_mask
) {
157 *in_caches_mask
= mask
;
160 cacheTracking
.recordAccess(blk
);
162 // The tag lookup latency is the same for a hit or a miss
169 FALRU::findBlock(Addr addr
, bool is_secure
) const
171 FALRUBlk
* blk
= nullptr;
173 Addr tag
= extractTag(addr
);
174 auto iter
= tagHash
.find(std::make_pair(tag
, is_secure
));
175 if (iter
!= tagHash
.end()) {
176 blk
= (*iter
).second
;
179 if (blk
&& blk
->isValid()) {
180 assert(blk
->getTag() == tag
);
181 assert(blk
->isSecure() == is_secure
);
188 FALRU::findBlockBySetAndWay(int set
, int way
) const
195 FALRU::findVictim(Addr addr
, const bool is_secure
, const std::size_t size
,
196 std::vector
<CacheBlk
*>& evict_blks
)
198 // The victim is always stored on the tail for the FALRU
199 FALRUBlk
* victim
= tail
;
201 // There is only one eviction for this replacement
202 evict_blks
.push_back(victim
);
208 FALRU::insertBlock(const PacketPtr pkt
, CacheBlk
*blk
)
210 FALRUBlk
* falruBlk
= static_cast<FALRUBlk
*>(blk
);
212 // Make sure block is not present in the cache
213 assert(falruBlk
->inCachesMask
== 0);
215 // Do common block insertion functionality
216 BaseTags::insertBlock(pkt
, blk
);
218 // Increment tag counter
221 // New block is the MRU
222 moveToHead(falruBlk
);
224 // Insert new block in the hash table
225 tagHash
[std::make_pair(blk
->getTag(), blk
->isSecure())] = falruBlk
;
229 FALRU::moveToHead(FALRUBlk
*blk
)
231 // If block is not already head, do the moving
233 cacheTracking
.moveBlockToHead(blk
);
234 // If block is tail, set previous block as new tail
236 assert(blk
->next
== nullptr);
238 tail
->next
= nullptr;
239 // Inform block's surrounding blocks that it has been moved
241 blk
->prev
->next
= blk
->next
;
242 blk
->next
->prev
= blk
->prev
;
251 cacheTracking
.check(head
, tail
);
256 FALRU::moveToTail(FALRUBlk
*blk
)
258 // If block is not already tail, do the moving
260 cacheTracking
.moveBlockToTail(blk
);
261 // If block is head, set next block as new head
263 assert(blk
->prev
== nullptr);
265 head
->prev
= nullptr;
266 // Inform block's surrounding blocks that it has been moved
268 blk
->prev
->next
= blk
->next
;
269 blk
->next
->prev
= blk
->prev
;
278 cacheTracking
.check(head
, tail
);
283 FALRUParams::create() const
285 return new FALRU(*this);
289 FALRU::CacheTracking::check(const FALRUBlk
*head
, const FALRUBlk
*tail
) const
292 const FALRUBlk
* blk
= head
;
293 unsigned curr_size
= 0;
294 unsigned tracked_cache_size
= minTrackedSize
;
295 CachesMask in_caches_mask
= inAllCachesMask
;
299 panic_if(blk
->inCachesMask
!= in_caches_mask
, "Expected cache mask "
300 "%x found %x", blk
->inCachesMask
, in_caches_mask
);
302 curr_size
+= blkSize
;
303 if (curr_size
== tracked_cache_size
&& blk
!= tail
) {
304 panic_if(boundaries
[j
] != blk
, "Unexpected boundary for the %d-th "
306 tracked_cache_size
<<= 1;
307 // from this point, blocks fit only in the larger caches
308 in_caches_mask
&= ~(1U << j
);
313 #endif // FALRU_DEBUG
317 FALRU::CacheTracking::init(FALRUBlk
*head
, FALRUBlk
*tail
)
319 // early exit if we are not tracking any extra caches
320 FALRUBlk
* blk
= numTrackedCaches
? head
: nullptr;
321 unsigned curr_size
= 0;
322 unsigned tracked_cache_size
= minTrackedSize
;
323 CachesMask in_caches_mask
= inAllCachesMask
;
327 blk
->inCachesMask
= in_caches_mask
;
329 curr_size
+= blkSize
;
330 if (curr_size
== tracked_cache_size
&& blk
!= tail
) {
333 tracked_cache_size
<<= 1;
334 // from this point, blocks fit only in the larger caches
335 in_caches_mask
&= ~(1U << j
);
344 FALRU::CacheTracking::moveBlockToHead(FALRUBlk
*blk
)
346 // Get the mask of all caches, in which the block didn't fit
347 // before moving it to the head
348 CachesMask update_caches_mask
= inAllCachesMask
^ blk
->inCachesMask
;
350 for (int i
= 0; i
< numTrackedCaches
; i
++) {
351 CachesMask current_cache_mask
= 1U << i
;
352 if (current_cache_mask
& update_caches_mask
) {
353 // if the ith cache didn't fit the block (before it is moved to
354 // the head), move the ith boundary 1 block closer to the
356 boundaries
[i
]->inCachesMask
&= ~current_cache_mask
;
357 boundaries
[i
] = boundaries
[i
]->prev
;
358 } else if (boundaries
[i
] == blk
) {
359 // Make sure the boundary doesn't point to the block
360 // we are about to move
361 boundaries
[i
] = blk
->prev
;
365 // Make block reside in all caches
366 blk
->inCachesMask
= inAllCachesMask
;
370 FALRU::CacheTracking::moveBlockToTail(FALRUBlk
*blk
)
372 CachesMask update_caches_mask
= blk
->inCachesMask
;
374 for (int i
= 0; i
< numTrackedCaches
; i
++) {
375 CachesMask current_cache_mask
= 1U << i
;
376 if (current_cache_mask
& update_caches_mask
) {
377 // if the ith cache fitted the block (before it is moved to
378 // the tail), move the ith boundary 1 block closer to the
380 boundaries
[i
] = boundaries
[i
]->next
;
381 if (boundaries
[i
] == blk
) {
382 // Make sure the boundary doesn't point to the block
383 // we are about to move
384 boundaries
[i
] = blk
->next
;
386 boundaries
[i
]->inCachesMask
|= current_cache_mask
;
390 // The block now fits only in the actual cache
391 blk
->inCachesMask
= 0;
395 FALRU::CacheTracking::recordAccess(FALRUBlk
*blk
)
397 for (int i
= 0; i
< numTrackedCaches
; i
++) {
398 if (blk
&& ((1U << i
) & blk
->inCachesMask
)) {
405 // Record stats for the actual cache too
406 if (blk
&& blk
->isValid()) {
407 hits
[numTrackedCaches
]++;
409 misses
[numTrackedCaches
]++;
416 printSize(std::ostream
&stream
, size_t size
)
418 static const char *SIZES
[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
420 while (size
>= 1024 && div
< (sizeof SIZES
/ sizeof *SIZES
)) {
424 stream
<< size
<< SIZES
[div
];
428 FALRU::CacheTracking::regStats(std::string name
)
431 .init(numTrackedCaches
+ 1)
432 .name(name
+ ".falru_hits")
433 .desc("The number of hits in each cache size.")
436 .init(numTrackedCaches
+ 1)
437 .name(name
+ ".falru_misses")
438 .desc("The number of misses in each cache size.")
441 .name(name
+ ".falru_accesses")
442 .desc("The number of accesses to the FA LRU cache.")
445 for (unsigned i
= 0; i
< numTrackedCaches
+ 1; ++i
) {
446 std::stringstream size_str
;
447 printSize(size_str
, minTrackedSize
<< i
);
448 hits
.subname(i
, size_str
.str());
449 hits
.subdesc(i
, "Hits in a " + size_str
.str() + " cache");
450 misses
.subname(i
, size_str
.str());
451 misses
.subdesc(i
, "Misses in a " + size_str
.str() + " cache");