2 * Copyright (c) 2013,2016-2018 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
46 * Definitions a fully associative LRU tagstore.
49 #include "mem/cache/tags/fa_lru.hh"
54 #include "base/intmath.hh"
55 #include "base/logging.hh"
57 FALRU::FALRU(const Params
*p
)
60 cacheTracking(p
->min_tracked_cache_size
, size
, blkSize
)
62 if (!isPowerOf2(blkSize
))
63 fatal("cache block size (in bytes) `%d' must be a power of two",
65 if (!isPowerOf2(size
))
66 fatal("Cache Size must be power of 2 for now");
68 blks
= new FALRUBlk
[numBlocks
];
72 head
->next
= &(blks
[1]);
75 head
->data
= &dataBlks
[0];
77 for (unsigned i
= 1; i
< numBlocks
- 1; i
++) {
78 blks
[i
].prev
= &(blks
[i
-1]);
79 blks
[i
].next
= &(blks
[i
+1]);
83 // Associate a data chunk to the block
84 blks
[i
].data
= &dataBlks
[blkSize
*i
];
87 tail
= &(blks
[numBlocks
- 1]);
88 tail
->prev
= &(blks
[numBlocks
- 2]);
91 tail
->way
= numBlocks
- 1;
92 tail
->data
= &dataBlks
[(numBlocks
- 1) * blkSize
];
94 cacheTracking
.init(head
, tail
);
105 BaseTags::regStats();
106 cacheTracking
.regStats(name());
110 FALRU::hashLookup(Addr addr
) const
112 tagIterator iter
= tagHash
.find(addr
);
113 if (iter
!= tagHash
.end()) {
114 return (*iter
).second
;
120 FALRU::invalidate(CacheBlk
*blk
)
122 BaseTags::invalidate(blk
);
124 // Move the block to the tail to make it the next victim
125 moveToTail((FALRUBlk
*)blk
);
127 // Erase block entry in the hash table
128 tagHash
.erase(blk
->tag
);
132 FALRU::accessBlock(Addr addr
, bool is_secure
, Cycles
&lat
)
134 return accessBlock(addr
, is_secure
, lat
, 0);
138 FALRU::accessBlock(Addr addr
, bool is_secure
, Cycles
&lat
,
139 CachesMask
*in_caches_mask
)
142 Addr blkAddr
= blkAlign(addr
);
143 FALRUBlk
* blk
= hashLookup(blkAddr
);
145 if (blk
&& blk
->isValid()) {
148 // Check if the block to be accessed is available. If not,
149 // apply the accessLatency on top of block->whenReady.
150 if (blk
->whenReady
> curTick() &&
151 cache
->ticksToCycles(blk
->whenReady
- curTick()) >
153 lat
= cache
->ticksToCycles(blk
->whenReady
- curTick()) +
156 assert(blk
->tag
== blkAddr
);
157 mask
= blk
->inCachesMask
;
164 if (in_caches_mask
) {
165 *in_caches_mask
= mask
;
168 cacheTracking
.recordAccess(blk
);
175 FALRU::findBlock(Addr addr
, bool is_secure
) const
177 Addr blkAddr
= blkAlign(addr
);
178 FALRUBlk
* blk
= hashLookup(blkAddr
);
180 if (blk
&& blk
->isValid()) {
181 assert(blk
->tag
== blkAddr
);
182 assert(blk
->isSecure() == is_secure
);
190 FALRU::findBlockBySetAndWay(int set
, int way
) const
197 FALRU::findVictim(Addr addr
)
203 FALRU::insertBlock(PacketPtr pkt
, CacheBlk
*blk
)
205 FALRUBlk
* falruBlk
= static_cast<FALRUBlk
*>(blk
);
207 // Make sure block is not present in the cache
208 assert(falruBlk
->inCachesMask
== 0);
210 // Do common block insertion functionality
211 BaseTags::insertBlock(pkt
, blk
);
213 // New block is the MRU
214 moveToHead(falruBlk
);
216 // Insert new block in the hash table
217 tagHash
[falruBlk
->tag
] = falruBlk
;
221 FALRU::moveToHead(FALRUBlk
*blk
)
223 // If block is not already head, do the moving
225 cacheTracking
.moveBlockToHead(blk
);
226 // If block is tail, set previous block as new tail
228 assert(blk
->next
== nullptr);
230 tail
->next
= nullptr;
231 // Inform block's surrounding blocks that it has been moved
233 blk
->prev
->next
= blk
->next
;
234 blk
->next
->prev
= blk
->prev
;
243 cacheTracking
.check(head
, tail
);
248 FALRU::moveToTail(FALRUBlk
*blk
)
250 // If block is not already tail, do the moving
252 cacheTracking
.moveBlockToTail(blk
);
253 // If block is head, set next block as new head
255 assert(blk
->prev
== nullptr);
257 head
->prev
= nullptr;
258 // Inform block's surrounding blocks that it has been moved
260 blk
->prev
->next
= blk
->next
;
261 blk
->next
->prev
= blk
->prev
;
270 cacheTracking
.check(head
, tail
);
275 FALRUParams::create()
277 return new FALRU(this);
281 FALRU::CacheTracking::check(FALRUBlk
*head
, FALRUBlk
*tail
)
284 FALRUBlk
* blk
= head
;
285 unsigned curr_size
= 0;
286 unsigned tracked_cache_size
= minTrackedSize
;
287 CachesMask in_caches_mask
= inAllCachesMask
;
291 panic_if(blk
->inCachesMask
!= in_caches_mask
, "Expected cache mask "
292 "%x found %x", blk
->inCachesMask
, in_caches_mask
);
294 curr_size
+= blkSize
;
295 if (curr_size
== tracked_cache_size
&& blk
!= tail
) {
296 panic_if(boundaries
[j
] != blk
, "Unexpected boundary for the %d-th "
298 tracked_cache_size
<<= 1;
299 // from this point, blocks fit only in the larger caches
300 in_caches_mask
&= ~(1U << j
);
305 #endif // FALRU_DEBUG
309 FALRU::CacheTracking::init(FALRUBlk
*head
, FALRUBlk
*tail
)
311 // early exit if we are not tracking any extra caches
312 FALRUBlk
* blk
= numTrackedCaches
? head
: nullptr;
313 unsigned curr_size
= 0;
314 unsigned tracked_cache_size
= minTrackedSize
;
315 CachesMask in_caches_mask
= inAllCachesMask
;
319 blk
->inCachesMask
= in_caches_mask
;
321 curr_size
+= blkSize
;
322 if (curr_size
== tracked_cache_size
&& blk
!= tail
) {
325 tracked_cache_size
<<= 1;
326 // from this point, blocks fit only in the larger caches
327 in_caches_mask
&= ~(1U << j
);
336 FALRU::CacheTracking::moveBlockToHead(FALRUBlk
*blk
)
338 // Get the mask of all caches, in which the block didn't fit
339 // before moving it to the head
340 CachesMask update_caches_mask
= inAllCachesMask
^ blk
->inCachesMask
;
342 for (int i
= 0; i
< numTrackedCaches
; i
++) {
343 CachesMask current_cache_mask
= 1U << i
;
344 if (current_cache_mask
& update_caches_mask
) {
345 // if the ith cache didn't fit the block (before it is moved to
346 // the head), move the ith boundary 1 block closer to the
348 boundaries
[i
]->inCachesMask
&= ~current_cache_mask
;
349 boundaries
[i
] = boundaries
[i
]->prev
;
350 } else if (boundaries
[i
] == blk
) {
351 // Make sure the boundary doesn't point to the block
352 // we are about to move
353 boundaries
[i
] = blk
->prev
;
357 // Make block reside in all caches
358 blk
->inCachesMask
= inAllCachesMask
;
362 FALRU::CacheTracking::moveBlockToTail(FALRUBlk
*blk
)
364 CachesMask update_caches_mask
= blk
->inCachesMask
;
366 for (int i
= 0; i
< numTrackedCaches
; i
++) {
367 CachesMask current_cache_mask
= 1U << i
;
368 if (current_cache_mask
& update_caches_mask
) {
369 // if the ith cache fitted the block (before it is moved to
370 // the tail), move the ith boundary 1 block closer to the
372 boundaries
[i
] = boundaries
[i
]->next
;
373 if (boundaries
[i
] == blk
) {
374 // Make sure the boundary doesn't point to the block
375 // we are about to move
376 boundaries
[i
] = blk
->next
;
378 boundaries
[i
]->inCachesMask
|= current_cache_mask
;
382 // The block now fits only in the actual cache
383 blk
->inCachesMask
= 0;
387 FALRU::CacheTracking::recordAccess(FALRUBlk
*blk
)
389 for (int i
= 0; i
< numTrackedCaches
; i
++) {
390 if (blk
&& ((1U << i
) & blk
->inCachesMask
)) {
397 // Record stats for the actual cache too
399 hits
[numTrackedCaches
]++;
401 misses
[numTrackedCaches
]++;
408 printSize(std::ostream
&stream
, size_t size
)
410 static const char *SIZES
[] = { "B", "kB", "MB", "GB", "TB", "ZB" };
412 while (size
>= 1024 && div
< (sizeof SIZES
/ sizeof *SIZES
)) {
416 stream
<< size
<< SIZES
[div
];
420 FALRU::CacheTracking::regStats(std::string name
)
423 .init(numTrackedCaches
+ 1)
424 .name(name
+ ".falru_hits")
425 .desc("The number of hits in each cache size.")
428 .init(numTrackedCaches
+ 1)
429 .name(name
+ ".falru_misses")
430 .desc("The number of misses in each cache size.")
433 .name(name
+ ".falru_accesses")
434 .desc("The number of accesses to the FA LRU cache.")
437 for (unsigned i
= 0; i
< numTrackedCaches
+ 1; ++i
) {
438 std::stringstream size_str
;
439 printSize(size_str
, minTrackedSize
<< i
);
440 hits
.subname(i
, size_str
.str());
441 hits
.subdesc(i
, "Hits in a " + size_str
.str() + " cache");
442 misses
.subname(i
, size_str
.str());
443 misses
.subdesc(i
, "Misses in a " + size_str
.str() + " cache");