2 * Copyright (c) 2012-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
45 * Declaration of a base set associative tag store.
48 #ifndef __MEM_CACHE_TAGS_BASE_SET_ASSOC_HH__
49 #define __MEM_CACHE_TAGS_BASE_SET_ASSOC_HH__
56 #include "mem/cache/base.hh"
57 #include "mem/cache/blk.hh"
58 #include "mem/cache/tags/base.hh"
59 #include "mem/cache/tags/cacheset.hh"
60 #include "mem/packet.hh"
61 #include "params/BaseSetAssoc.hh"
64 * A BaseSetAssoc cache tag store.
65 * @sa \ref gem5MemorySystem "gem5 Memory System"
67 * The BaseSetAssoc tags provide a base, as well as the functionality
68 * common to any set associative tags. Any derived class must implement
69 * the methods related to the specifics of the actual replacment policy.
72 * BlkType* accessBlock();
73 * BlkType* findVictim();
77 class BaseSetAssoc : public BaseTags
80 /** Typedef the block type used in this tag store. */
81 typedef CacheBlk BlkType;
82 /** Typedef the set type used in this tag store. */
83 typedef CacheSet<CacheBlk> SetType;
87 /** The associativity of the cache. */
89 /** The allocatable associativity of the cache (alloc mask). */
92 /** The cache blocks. */
93 std::vector<BlkType> blks;
94 /** The data blocks, 1 per cache block. */
95 std::unique_ptr<uint8_t[]> dataBlks;
97 /** The number of sets in the cache. */
98 const unsigned numSets;
100 /** Whether tags and data are accessed sequentially. */
101 const bool sequentialAccess;
103 /** The cache sets. */
104 std::vector<SetType> sets;
106 /** The amount to shift the address to get the set. */
108 /** The amount to shift the address to get the tag. */
110 /** Mask out all bits that aren't part of the set index. */
115 /** Convenience typedef. */
116 typedef BaseSetAssocParams Params;
119 * Construct and initialize this tag store.
121 BaseSetAssoc(const Params *p);
126 virtual ~BaseSetAssoc() {};
129 * Find the cache block given set and way
130 * @param set The set of the block.
131 * @param way The way of the block.
132 * @return The cache block.
134 CacheBlk *findBlockBySetAndWay(int set, int way) const override;
137 * Invalidate the given block.
138 * @param blk The block to invalidate.
140 void invalidate(CacheBlk *blk) override
143 assert(blk->isValid());
145 assert(blk->srcMasterId < cache->system->maxMasters());
146 occupancies[blk->srcMasterId]--;
147 blk->srcMasterId = Request::invldMasterId;
148 blk->task_id = ContextSwitchTaskId::Unknown;
149 blk->tickInserted = curTick();
153 * Access block and update replacement data. May not succeed, in which case
154 * nullptr is returned. This has all the implications of a cache
155 * access and should only be used as such. Returns the access latency as a
157 * @param addr The address to find.
158 * @param is_secure True if the target memory space is secure.
159 * @param lat The access latency.
160 * @return Pointer to the cache block if found.
162 CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) override
164 Addr tag = extractTag(addr);
165 int set = extractSet(addr);
166 BlkType *blk = sets[set].findBlk(tag, is_secure);
168 // Access all tags in parallel, hence one in each way. The data side
169 // either accesses all blocks in parallel, or one block sequentially on
170 // a hit. Sequential access with a miss doesn't access data.
171 tagAccesses += allocAssoc;
172 if (sequentialAccess) {
173 if (blk != nullptr) {
177 dataAccesses += allocAssoc;
180 if (blk != nullptr) {
183 // Check if the block to be accessed is available. If not,
184 // apply the accessLatency on top of block->whenReady.
185 if (blk->whenReady > curTick() &&
186 cache->ticksToCycles(blk->whenReady - curTick()) >
188 lat = cache->ticksToCycles(blk->whenReady - curTick()) +
201 * Finds the given address in the cache, do not update replacement data.
202 * i.e. This is a no-side-effect find of a block.
203 * @param addr The address to find.
204 * @param is_secure True if the target memory space is secure.
205 * @param asid The address space ID.
206 * @return Pointer to the cache block if found.
208 CacheBlk* findBlock(Addr addr, bool is_secure) const override;
211 * Find an invalid block to evict for the address provided.
212 * If there are no invalid blocks, this will return the block
213 * in the least-recently-used position.
214 * @param addr The addr to a find a replacement candidate for.
215 * @return The candidate block.
217 CacheBlk* findVictim(Addr addr) override
219 BlkType *blk = nullptr;
220 int set = extractSet(addr);
222 // prefer to evict an invalid block
223 for (int i = 0; i < allocAssoc; ++i) {
224 blk = sets[set].blks[i];
233 * Insert the new block into the cache.
234 * @param pkt Packet holding the address to update
235 * @param blk The block to update.
237 void insertBlock(PacketPtr pkt, CacheBlk *blk) override
239 Addr addr = pkt->getAddr();
240 MasterID master_id = pkt->req->masterId();
241 uint32_t task_id = pkt->req->taskId();
243 if (!blk->isTouched) {
245 blk->isTouched = true;
246 if (!warmedUp && tagsInUse.value() >= warmupBound) {
248 warmupCycle = curTick();
252 // If we're replacing a block that was previously valid update
253 // stats for it. This can't be done in findBlock() because a
254 // found block might not actually be replaced there if the
255 // coherence protocol says it can't be.
256 if (blk->isValid()) {
258 totalRefs += blk->refCount;
262 // deal with evicted block
263 assert(blk->srcMasterId < cache->system->maxMasters());
264 occupancies[blk->srcMasterId]--;
269 blk->isTouched = true;
271 // Set tag for new block. Caller is responsible for setting status.
272 blk->tag = extractTag(addr);
274 // deal with what we are bringing in
275 assert(master_id < cache->system->maxMasters());
276 occupancies[master_id]++;
277 blk->srcMasterId = master_id;
278 blk->task_id = task_id;
279 blk->tickInserted = curTick();
281 // We only need to write into one tag and one data block.
287 * Limit the allocation for the cache ways.
288 * @param ways The maximum number of ways available for replacement.
290 virtual void setWayAllocationMax(int ways) override
292 fatal_if(ways < 1, "Allocation limit must be greater than zero");
297 * Get the way allocation mask limit.
298 * @return The maximum number of ways available for replacement.
300 virtual int getWayAllocationMax() const override
306 * Generate the tag from the given address.
307 * @param addr The address to get the tag from.
308 * @return The tag of the address.
310 Addr extractTag(Addr addr) const override
312 return (addr >> tagShift);
316 * Calculate the set index from the address.
317 * @param addr The address to get the set from.
318 * @return The set index of the address.
320 int extractSet(Addr addr) const override
322 return ((addr >> setShift) & setMask);
326 * Regenerate the block address from the tag.
327 * @param tag The tag of the block.
328 * @param set The set of the block.
329 * @return The block address.
331 Addr regenerateBlkAddr(Addr tag, unsigned set) const override
333 return ((tag << tagShift) | ((Addr)set << setShift));
337 * Called at end of simulation to complete average block reference stats.
339 void cleanupRefs() override;
342 * Print all tags used
344 std::string print() const override;
347 * Called prior to dumping stats to compute task occupancy
349 void computeStats() override;
352 * Visit each block in the tag store and apply a visitor to the
355 * The visitor should be a function (or object that behaves like a
356 * function) that takes a cache block reference as its parameter
357 * and returns a bool. A visitor can request the traversal to be
358 * stopped by returning false, returning true causes it to be
359 * called for the next block in the tag store.
361 * \param visitor Visitor to call on each block.
363 void forEachBlk(CacheBlkVisitor &visitor) override {
364 for (unsigned i = 0; i < numSets * assoc; ++i) {
365 if (!visitor(blks[i]))
371 #endif //__MEM_CACHE_TAGS_BASE_SET_ASSOC_HH__