2 * Copyright (c) 2012-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
45 * Declaration of a base set associative tag store.
48 #ifndef __MEM_CACHE_TAGS_BASE_SET_ASSOC_HH__
49 #define __MEM_CACHE_TAGS_BASE_SET_ASSOC_HH__
56 #include "mem/cache/base.hh"
57 #include "mem/cache/blk.hh"
58 #include "mem/cache/tags/base.hh"
59 #include "mem/cache/tags/cacheset.hh"
60 #include "mem/packet.hh"
61 #include "params/BaseSetAssoc.hh"
64 * A BaseSetAssoc cache tag store.
65 * @sa \ref gem5MemorySystem "gem5 Memory System"
67 * The BaseSetAssoc tags provide a base, as well as the functionality
68 * common to any set associative tags. Any derived class must implement
69 * the methods related to the specifics of the actual replacment policy.
72 * BlkType* accessBlock();
73 * BlkType* findVictim();
77 class BaseSetAssoc : public BaseTags
80 /** Typedef the block type used in this tag store. */
81 typedef CacheBlk BlkType;
82 /** Typedef the set type used in this tag store. */
83 typedef CacheSet<CacheBlk> SetType;
87 /** The associativity of the cache. */
89 /** The allocatable associativity of the cache (alloc mask). */
92 /** The cache blocks. */
93 std::vector<BlkType> blks;
94 /** The data blocks, 1 per cache block. */
95 std::unique_ptr<uint8_t[]> dataBlks;
97 /** The number of sets in the cache. */
98 const unsigned numSets;
100 /** Whether tags and data are accessed sequentially. */
101 const bool sequentialAccess;
103 /** The cache sets. */
104 std::vector<SetType> sets;
106 /** The amount to shift the address to get the set. */
108 /** The amount to shift the address to get the tag. */
110 /** Mask out all bits that aren't part of the set index. */
115 /** Convenience typedef. */
116 typedef BaseSetAssocParams Params;
119 * Construct and initialize this tag store.
121 BaseSetAssoc(const Params *p);
126 virtual ~BaseSetAssoc() {};
129 * Find the cache block given set and way
130 * @param set The set of the block.
131 * @param way The way of the block.
132 * @return The cache block.
134 CacheBlk *findBlockBySetAndWay(int set, int way) const override;
137 * Invalidate the given block.
138 * @param blk The block to invalidate.
140 void invalidate(CacheBlk *blk) override
143 assert(blk->isValid());
145 assert(blk->srcMasterId < cache->system->maxMasters());
146 occupancies[blk->srcMasterId]--;
147 blk->srcMasterId = Request::invldMasterId;
148 blk->task_id = ContextSwitchTaskId::Unknown;
149 blk->tickInserted = curTick();
153 * Access block and update replacement data. May not succeed, in which case
154 * nullptr is returned. This has all the implications of a cache
155 * access and should only be used as such. Returns the access latency as a
157 * @param addr The address to find.
158 * @param is_secure True if the target memory space is secure.
159 * @param lat The access latency.
160 * @return Pointer to the cache block if found.
162 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override
164 BlkType *blk = findBlock(addr, is_secure);
166 // Access all tags in parallel, hence one in each way. The data side
167 // either accesses all blocks in parallel, or one block sequentially on
168 // a hit. Sequential access with a miss doesn't access data.
169 tagAccesses += allocAssoc;
170 if (sequentialAccess) {
171 if (blk != nullptr) {
175 dataAccesses += allocAssoc;
178 if (blk != nullptr) {
181 // Check if the block to be accessed is available. If not,
182 // apply the accessLatency on top of block->whenReady.
183 if (blk->whenReady > curTick() &&
184 cache->ticksToCycles(blk->whenReady - curTick()) >
186 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
199 * Finds the given address in the cache, do not update replacement data.
200 * i.e. This is a no-side-effect find of a block.
201 * @param addr The address to find.
202 * @param is_secure True if the target memory space is secure.
203 * @param asid The address space ID.
204 * @return Pointer to the cache block if found.
206 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
209 * Find an invalid block to evict for the address provided.
210 * If there are no invalid blocks, this will return the block
211 * in the least-recently-used position.
212 * @param addr The addr to a find a replacement candidate for.
213 * @return The candidate block.
215 CacheBlk* findVictim(Addr addr) override
217 BlkType *blk = nullptr;
218 int set = extractSet(addr);
220 // prefer to evict an invalid block
221 for (int i = 0; i < allocAssoc; ++i) {
222 blk = sets[set].blks[i];
231 * Insert the new block into the cache.
232 * @param pkt Packet holding the address to update
233 * @param blk The block to update.
235 void insertBlock(PacketPtr pkt, CacheBlk *blk) override
237 Addr addr = pkt->getAddr();
238 MasterID master_id = pkt->req->masterId();
239 uint32_t task_id = pkt->req->taskId();
241 if (!blk->isTouched) {
243 blk->isTouched = true;
244 if (!warmedUp && tagsInUse.value() >= warmupBound) {
246 warmupCycle = curTick();
250 // If we're replacing a block that was previously valid update
251 // stats for it. This can't be done in findBlock() because a
252 // found block might not actually be replaced there if the
253 // coherence protocol says it can't be.
254 if (blk->isValid()) {
256 totalRefs += blk->refCount;
263 blk->isTouched = true;
265 // Set tag for new block. Caller is responsible for setting status.
266 blk->tag = extractTag(addr);
268 // deal with what we are bringing in
269 assert(master_id < cache->system->maxMasters());
270 occupancies[master_id]++;
271 blk->srcMasterId = master_id;
272 blk->task_id = task_id;
273 blk->tickInserted = curTick();
275 // We only need to write into one tag and one data block.
281 * Limit the allocation for the cache ways.
282 * @param ways The maximum number of ways available for replacement.
284 virtual void setWayAllocationMax(int ways) override
286 fatal_if(ways < 1, "Allocation limit must be greater than zero");
291 * Get the way allocation mask limit.
292 * @return The maximum number of ways available for replacement.
294 virtual int getWayAllocationMax() const override
300 * Generate the tag from the given address.
301 * @param addr The address to get the tag from.
302 * @return The tag of the address.
304 Addr extractTag(Addr addr) const override
306 return (addr >> tagShift);
310 * Calculate the set index from the address.
311 * @param addr The address to get the set from.
312 * @return The set index of the address.
314 int extractSet(Addr addr) const override
316 return ((addr >> setShift) & setMask);
320 * Regenerate the block address from the tag.
321 * @param tag The tag of the block.
322 * @param set The set of the block.
323 * @return The block address.
325 Addr regenerateBlkAddr(Addr tag, unsigned set) const override
327 return ((tag << tagShift) | ((Addr)set << setShift));
331 * Called at end of simulation to complete average block reference stats.
333 void cleanupRefs() override;
336 * Print all tags used
338 std::string print() const override;
341 * Called prior to dumping stats to compute task occupancy
343 void computeStats() override;
346 * Visit each block in the tag store and apply a visitor to the
349 * The visitor should be a function (or object that behaves like a
350 * function) that takes a cache block reference as its parameter
351 * and returns a bool. A visitor can request the traversal to be
352 * stopped by returning false, returning true causes it to be
353 * called for the next block in the tag store.
355 * \param visitor Visitor to call on each block.
357 void forEachBlk(CacheBlkVisitor &visitor) override {
358 for (unsigned i = 0; i < numSets * assoc; ++i) {
359 if (!visitor(blks[i]))
365 #endif //__MEM_CACHE_TAGS_BASE_SET_ASSOC_HH__