2 * Copyright (c) 2012-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
45 * Declaration of a base set associative tag store.
48 #ifndef __MEM_CACHE_TAGS_BASESETASSOC_HH__
49 #define __MEM_CACHE_TAGS_BASESETASSOC_HH__
55 #include "mem/cache/base.hh"
56 #include "mem/cache/blk.hh"
57 #include "mem/cache/tags/base.hh"
58 #include "mem/cache/tags/cacheset.hh"
59 #include "mem/packet.hh"
60 #include "params/BaseSetAssoc.hh"
63 * A BaseSetAssoc cache tag store.
64 * @sa \ref gem5MemorySystem "gem5 Memory System"
66 * The BaseSetAssoc tags provide a base, as well as the functionality
67 * common to any set associative tags. Any derived class must implement
68 * the methods related to the specifics of the actual replacment policy.
71 * BlkType* accessBlock();
72 * BlkType* findVictim();
76 class BaseSetAssoc : public BaseTags
79 /** Typedef the block type used in this tag store. */
80 typedef CacheBlk BlkType;
81 /** Typedef the set type used in this tag store. */
82 typedef CacheSet<CacheBlk> SetType;
86 /** The associativity of the cache. */
88 /** The allocatable associativity of the cache (alloc mask). */
90 /** The number of sets in the cache. */
91 const unsigned numSets;
92 /** Whether tags and data are accessed sequentially. */
93 const bool sequentialAccess;
95 /** The cache sets. */
98 /** The cache blocks. */
100 /** The data blocks, 1 per cache block. */
103 /** The amount to shift the address to get the set. */
105 /** The amount to shift the address to get the tag. */
107 /** Mask out all bits that aren't part of the set index. */
112 /** Convenience typedef. */
113 typedef BaseSetAssocParams Params;
116 * Construct and initialize this tag store.
118 BaseSetAssoc(const Params *p);
123 virtual ~BaseSetAssoc();
126 * Find the cache block given set and way
127 * @param set The set of the block.
128 * @param way The way of the block.
129 * @return The cache block.
131 CacheBlk *findBlockBySetAndWay(int set, int way) const override;
134 * Invalidate the given block.
135 * @param blk The block to invalidate.
137 void invalidate(CacheBlk *blk) override
140 assert(blk->isValid());
142 assert(blk->srcMasterId < cache->system->maxMasters());
143 occupancies[blk->srcMasterId]--;
144 blk->srcMasterId = Request::invldMasterId;
145 blk->task_id = ContextSwitchTaskId::Unknown;
146 blk->tickInserted = curTick();
150 * Access block and update replacement data. May not succeed, in which case
151 * nullptr is returned. This has all the implications of a cache
152 * access and should only be used as such. Returns the access latency as a
154 * @param addr The address to find.
155 * @param is_secure True if the target memory space is secure.
156 * @param lat The access latency.
157 * @return Pointer to the cache block if found.
159 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override
161 Addr tag = extractTag(addr);
162 int set = extractSet(addr);
163 BlkType *blk = sets[set].findBlk(tag, is_secure);
165 // Access all tags in parallel, hence one in each way. The data side
166 // either accesses all blocks in parallel, or one block sequentially on
167 // a hit. Sequential access with a miss doesn't access data.
168 tagAccesses += allocAssoc;
169 if (sequentialAccess) {
170 if (blk != nullptr) {
174 dataAccesses += allocAssoc;
177 if (blk != nullptr) {
180 // Check if the block to be accessed is available. If not,
181 // apply the accessLatency on top of block->whenReady.
182 if (blk->whenReady > curTick() &&
183 cache->ticksToCycles(blk->whenReady - curTick()) >
185 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
198 * Finds the given address in the cache, do not update replacement data.
199 * i.e. This is a no-side-effect find of a block.
200 * @param addr The address to find.
201 * @param is_secure True if the target memory space is secure.
202 * @param asid The address space ID.
203 * @return Pointer to the cache block if found.
205 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
208 * Find an invalid block to evict for the address provided.
209 * If there are no invalid blocks, this will return the block
210 * in the least-recently-used position.
211 * @param addr The addr to a find a replacement candidate for.
212 * @return The candidate block.
214 CacheBlk* findVictim(Addr addr) override
216 BlkType *blk = nullptr;
217 int set = extractSet(addr);
219 // prefer to evict an invalid block
220 for (int i = 0; i < allocAssoc; ++i) {
221 blk = sets[set].blks[i];
230 * Insert the new block into the cache.
231 * @param pkt Packet holding the address to update
232 * @param blk The block to update.
234 void insertBlock(PacketPtr pkt, CacheBlk *blk) override
236 Addr addr = pkt->getAddr();
237 MasterID master_id = pkt->req->masterId();
238 uint32_t task_id = pkt->req->taskId();
240 if (!blk->isTouched) {
242 blk->isTouched = true;
243 if (!warmedUp && tagsInUse.value() >= warmupBound) {
245 warmupCycle = curTick();
249 // If we're replacing a block that was previously valid update
250 // stats for it. This can't be done in findBlock() because a
251 // found block might not actually be replaced there if the
252 // coherence protocol says it can't be.
253 if (blk->isValid()) {
255 totalRefs += blk->refCount;
259 // deal with evicted block
260 assert(blk->srcMasterId < cache->system->maxMasters());
261 occupancies[blk->srcMasterId]--;
266 blk->isTouched = true;
268 // Set tag for new block. Caller is responsible for setting status.
269 blk->tag = extractTag(addr);
271 // deal with what we are bringing in
272 assert(master_id < cache->system->maxMasters());
273 occupancies[master_id]++;
274 blk->srcMasterId = master_id;
275 blk->task_id = task_id;
276 blk->tickInserted = curTick();
278 // We only need to write into one tag and one data block.
284 * Limit the allocation for the cache ways.
285 * @param ways The maximum number of ways available for replacement.
287 virtual void setWayAllocationMax(int ways) override
289 fatal_if(ways < 1, "Allocation limit must be greater than zero");
294 * Get the way allocation mask limit.
295 * @return The maximum number of ways available for replacement.
297 virtual int getWayAllocationMax() const override
303 * Generate the tag from the given address.
304 * @param addr The address to get the tag from.
305 * @return The tag of the address.
307 Addr extractTag(Addr addr) const override
309 return (addr >> tagShift);
313 * Calculate the set index from the address.
314 * @param addr The address to get the set from.
315 * @return The set index of the address.
317 int extractSet(Addr addr) const override
319 return ((addr >> setShift) & setMask);
323 * Regenerate the block address from the tag.
324 * @param tag The tag of the block.
325 * @param set The set of the block.
326 * @return The block address.
328 Addr regenerateBlkAddr(Addr tag, unsigned set) const override
330 return ((tag << tagShift) | ((Addr)set << setShift));
334 * Called at end of simulation to complete average block reference stats.
336 void cleanupRefs() override;
339 * Print all tags used
341 std::string print() const override;
344 * Called prior to dumping stats to compute task occupancy
346 void computeStats() override;
349 * Visit each block in the tag store and apply a visitor to the
352 * The visitor should be a function (or object that behaves like a
353 * function) that takes a cache block reference as its parameter
354 * and returns a bool. A visitor can request the traversal to be
355 * stopped by returning false, returning true causes it to be
356 * called for the next block in the tag store.
358 * \param visitor Visitor to call on each block.
360 void forEachBlk(CacheBlkVisitor &visitor) override {
361 for (unsigned i = 0; i < numSets * assoc; ++i) {
362 if (!visitor(blks[i]))
368 #endif // __MEM_CACHE_TAGS_BASESETASSOC_HH__