2 * Copyright (c) 2012-2013,2016,2018 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
46 * Declaration of a fully associative LRU tag store.
49 #ifndef __MEM_CACHE_TAGS_FA_LRU_HH__
50 #define __MEM_CACHE_TAGS_FA_LRU_HH__
55 #include <unordered_map>
57 #include "base/bitfield.hh"
58 #include "base/intmath.hh"
59 #include "base/logging.hh"
60 #include "base/statistics.hh"
61 #include "base/types.hh"
62 #include "mem/cache/blk.hh"
63 #include "mem/cache/tags/base.hh"
64 #include "mem/packet.hh"
65 #include "params/FALRU.hh"
67 // Uncomment to enable sanity checks for the FALRU cache and the
68 // TrackedCaches class
71 // A bitmask of the caches we are keeping track of. Currently the
72 // lowest bit is the smallest cache we are tracking, as it is
73 // specified by the corresponding parameter. The rest of the bits are
74 // for exponentially growing cache sizes.
75 typedef uint32_t CachesMask;
78 * A fully associative cache block.
80 class FALRUBlk : public CacheBlk
83 /** The previous block in LRU order. */
85 /** The next block in LRU order. */
88 /** A bit mask of the caches that fit this block. */
89 CachesMask inCachesMask;
93 * A fully associative LRU cache. Keeps statistics for accesses to a number of
94 * cache sizes at once.
96 class FALRU : public BaseTags
99 /** Typedef the block type used in this class. */
100 typedef FALRUBlk BlkType;
103 /** The cache blocks. */
106 /** The MRU block. */
108 /** The LRU block. */
111 /** Hash table type mapping addresses to cache block pointers. */
112 typedef std::unordered_map<Addr, FALRUBlk *, std::hash<Addr> > hash_t;
113 /** Iterator into the address hash table. */
114 typedef hash_t::const_iterator tagIterator;
116 /** The address hash table. */
120 * Find the cache block for the given address.
121 * @param addr The address to find.
122 * @return The cache block of the address, if any.
124 FALRUBlk * hashLookup(Addr addr) const;
127 * Move a cache block to the MRU position.
129 * @param blk The block to promote.
131 void moveToHead(FALRUBlk *blk);
134 * Move a cache block to the LRU position.
136 * @param blk The block to demote.
138 void moveToTail(FALRUBlk *blk);
141 typedef FALRUParams Params;
144 * Construct and initialize this cache tagstore.
146 FALRU(const Params *p);
150 * Register the stats for this object.
152 void regStats() override;
155 * Invalidate a cache block.
156 * @param blk The block to invalidate.
158 void invalidate(CacheBlk *blk) override;
161 * Access block and update replacement data. May not succeed, in which
162 * case nullptr pointer is returned. This has all the implications of a
163 * cache access and should only be used as such.
164 * Returns the access latency and inCachesMask flags as a side effect.
165 * @param addr The address to look for.
166 * @param is_secure True if the target memory space is secure.
167 * @param lat The latency of the access.
168 * @param in_cache_mask Mask indicating the caches in which the blk fits.
169 * @return Pointer to the cache block.
171 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
172 CachesMask *in_cache_mask);
175 * Just a wrapper of above function to conform with the base interface.
177 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override;
180 * Find the block in the cache, do not update the replacement data.
181 * @param addr The address to look for.
182 * @param is_secure True if the target memory space is secure.
183 * @param asid The address space ID.
184 * @return Pointer to the cache block.
186 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
189 * Find replacement victim based on address.
191 * @param addr Address to find a victim for.
192 * @return Cache block to be replaced.
194 CacheBlk* findVictim(Addr addr) override;
197 * Insert the new block into the cache and update replacement data.
199 * @param pkt Packet holding the address to update
200 * @param blk The block to update.
202 void insertBlock(PacketPtr pkt, CacheBlk *blk) override;
205 * Find the cache block given set and way
206 * @param set The set of the block.
207 * @param way The way of the block.
208 * @return The cache block.
210 CacheBlk* findBlockBySetAndWay(int set, int way) const override;
213 * Generate the tag from the addres. For fully associative this is just the
215 * @param addr The address to get the tag from.
218 Addr extractTag(Addr addr) const override
220 return blkAlign(addr);
224 * Regenerate the block address from the tag.
226 * @param block The block.
227 * @return the block address.
229 Addr regenerateBlkAddr(const CacheBlk* blk) const override
234 void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
235 for (int i = 0; i < numBlocks; i++) {
240 bool anyBlk(std::function<bool(CacheBlk &)> visitor) override {
241 for (int i = 0; i < numBlocks; i++) {
242 if (visitor(blks[i])) {
251 * Mechanism that allows us to simultaneously collect miss
252 * statistics for multiple caches. Currently, we keep track of
253 * caches from a set minimum size of interest up to the actual
259 CacheTracking(unsigned min_size, unsigned max_size,
261 : blkSize(block_size),
262 minTrackedSize(min_size),
263 numTrackedCaches(max_size > min_size ?
264 floorLog2(max_size) - floorLog2(min_size) : 0),
265 inAllCachesMask(mask(numTrackedCaches)),
266 boundaries(new FALRUBlk *[numTrackedCaches])
268 fatal_if(numTrackedCaches > sizeof(CachesMask) * 8,
269 "Not enough bits (%s) in type CachesMask type to keep "
270 "track of %d caches\n", sizeof(CachesMask),
280 * Initialiaze cache blocks and the tracking mechanism
282 * All blocks in the cache need to be initialized once.
284 * @param blk the MRU block
285 * @param blk the LRU block
287 void init(FALRUBlk *head, FALRUBlk *tail);
290 * Update boundaries as a block will be moved to the MRU.
292 * For all caches that didn't fit the block before moving it,
293 * we move their boundaries one block closer to the MRU. We
294 * also update InCacheMasks as neccessary.
296 * @param blk the block that will be moved to the head
298 void moveBlockToHead(FALRUBlk *blk);
301 * Update boundaries as a block will be moved to the LRU.
303 * For all caches that fitted the block before moving it, we
304 * move their boundaries one block closer to the LRU. We
305 * also update InCacheMasks as neccessary.
307 * @param blk the block that will be moved to the head
309 void moveBlockToTail(FALRUBlk *blk);
312 * Notify of a block access.
314 * This should be called every time a block is accessed and it
315 * updates statistics. If the input block is nullptr then we
316 * treat the access as a miss. The block's InCacheMask
317 * determines the caches in which the block fits.
319 * @param blk the block to record the access for
321 void recordAccess(FALRUBlk *blk);
324 * Check that the tracking mechanism is in consistent state.
326 * Iterate from the head (MRU) to the tail (LRU) of the list
327 * of blocks and assert the inCachesMask and the boundaries
328 * are in consistent state.
330 * @param head the MRU block of the actual cache
331 * @param head the LRU block of the actual cache
333 void check(FALRUBlk *head, FALRUBlk *tail);
336 * Register the stats for this object.
338 void regStats(std::string name);
341 /** The size of the cache block */
342 const unsigned blkSize;
343 /** The smallest cache we are tracking */
344 const unsigned minTrackedSize;
345 /** The number of different size caches being tracked. */
346 const int numTrackedCaches;
347 /** A mask for all cache being tracked. */
348 const CachesMask inAllCachesMask;
349 /** Array of pointers to blocks at the cache boundaries. */
350 FALRUBlk** boundaries;
354 * @defgroup FALRUStats Fully Associative LRU specific statistics
355 * The FA lru stack lets us track multiple cache sizes at once. These
356 * statistics track the hits and misses for different cache sizes.
360 /** Hits in each cache */
362 /** Misses in each cache */
363 Stats::Vector misses;
364 /** Total number of accesses */
365 Stats::Scalar accesses;
371 CacheTracking cacheTracking;
374 #endif // __MEM_CACHE_TAGS_FA_LRU_HH__