2 * Copyright (c) 2012-2014 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
49 * Describes a cache based on template policies.
55 #include "base/misc.hh" // fatal, panic, and warn
56 #include "mem/cache/base.hh"
57 #include "mem/cache/blk.hh"
58 #include "mem/cache/mshr.hh"
59 #include "sim/eventq.hh"
65 * A template-policy based cache. The behavior of the cache can be altered by
66 * supplying different template policies. TagStore handles all tag and data
67 * storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System"
69 template <class TagStore>
70 class Cache : public BaseCache
73 /** Define the type of cache block to use. */
74 typedef typename TagStore::BlkType BlkType;
75 /** A typedef for a list of BlkType pointers. */
76 typedef typename TagStore::BlkList BlkList;
79 typedef CacheBlkVisitorWrapper<Cache<TagStore>, BlkType> WrappedBlkVisitor;
82 * The CPU-side port extends the base cache slave port with access
83 * functions for functional, atomic and timing requests.
85 class CpuSidePort : public CacheSlavePort
89 // a pointer to our specific cache implementation
90 Cache<TagStore> *cache;
94 virtual bool recvTimingSnoopResp(PacketPtr pkt);
96 virtual bool recvTimingReq(PacketPtr pkt);
98 virtual Tick recvAtomic(PacketPtr pkt);
100 virtual void recvFunctional(PacketPtr pkt);
102 virtual AddrRangeList getAddrRanges() const;
106 CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
107 const std::string &_label);
112 * Override the default behaviour of sendDeferredPacket to enable
113 * the memory-side cache port to also send requests based on the
114 * current MSHR status. This queue has a pointer to our specific
115 * cache implementation and is used by the MemSidePort.
117 class MemSidePacketQueue : public MasterPacketQueue
122 Cache<TagStore> &cache;
126 MemSidePacketQueue(Cache<TagStore> &cache, MasterPort &port,
127 const std::string &label) :
128 MasterPacketQueue(cache, port, label), cache(cache) { }
131 * Override the normal sendDeferredPacket and do not only
132 * consider the transmit list (used for responses), but also
135 virtual void sendDeferredPacket();
140 * The memory-side port extends the base cache master port with
141 * access functions for functional, atomic and timing snoops.
143 class MemSidePort : public CacheMasterPort
147 /** The cache-specific queue. */
148 MemSidePacketQueue _queue;
150 // a pointer to our specific cache implementation
151 Cache<TagStore> *cache;
155 virtual void recvTimingSnoopReq(PacketPtr pkt);
157 virtual bool recvTimingResp(PacketPtr pkt);
159 virtual Tick recvAtomicSnoop(PacketPtr pkt);
161 virtual void recvFunctionalSnoop(PacketPtr pkt);
165 MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
166 const std::string &_label);
169 /** Tag and data Storage */
173 BasePrefetcher *prefetcher;
175 /** Temporary cache block for occasional transitory use */
179 * This cache should allocate a block on a line-sized write miss.
181 const bool doFastWrites;
184 * Turn line-sized writes into WriteInvalidate transactions.
186 void promoteWholeLineWrites(PacketPtr pkt);
189 * Notify the prefetcher on every access, not just misses.
191 const bool prefetchOnAccess;
194 * @todo this is a temporary workaround until the 4-phase code is committed.
195 * upstream caches need this packet until true is returned, so hold it for
196 * deletion until a subsequent call
198 std::vector<PacketPtr> pendingDelete;
201 * Does all the processing necessary to perform the provided request.
202 * @param pkt The memory request to perform.
203 * @param blk The cache block to be updated.
204 * @param lat The latency of the access.
205 * @param writebacks List for any writebacks that need to be performed.
206 * @return Boolean indicating whether the request was satisfied.
208 bool access(PacketPtr pkt, BlkType *&blk,
209 Cycles &lat, PacketList &writebacks);
212 *Handle doing the Compare and Swap function for SPARC.
214 void cmpAndSwap(BlkType *blk, PacketPtr pkt);
217 * Find a block frame for new block at address addr targeting the
218 * given security space, assuming that the block is not currently
219 * in the cache. Append writebacks if any to provided packet
220 * list. Return free block frame. May return NULL if there are
221 * no replaceable blocks at the moment.
223 BlkType *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
226 * Populates a cache block and handles all outstanding requests for the
227 * satisfied fill request. This version takes two memory requests. One
228 * contains the fill data, the other is an optional target to satisfy.
229 * @param pkt The memory request with the fill data.
230 * @param blk The cache block if it already exists.
231 * @param writebacks List for any writebacks that need to be performed.
232 * @return Pointer to the new cache block.
234 BlkType *handleFill(PacketPtr pkt, BlkType *blk,
235 PacketList &writebacks);
239 * Performs the access specified by the request.
240 * @param pkt The request to perform.
241 * @return The result of the access.
243 bool recvTimingReq(PacketPtr pkt);
246 * Handles a response (cache line fill/write ack) from the bus.
247 * @param pkt The response packet
249 void recvTimingResp(PacketPtr pkt);
252 * Snoops bus transactions to maintain coherence.
253 * @param pkt The current bus transaction.
255 void recvTimingSnoopReq(PacketPtr pkt);
258 * Handle a snoop response.
259 * @param pkt Snoop response packet
261 void recvTimingSnoopResp(PacketPtr pkt);
264 * Performs the access specified by the request.
265 * @param pkt The request to perform.
266 * @return The number of ticks required for the access.
268 Tick recvAtomic(PacketPtr pkt);
271 * Snoop for the provided request in the cache and return the estimated
273 * @param pkt The memory request to snoop
274 * @return The number of ticks required for the snoop.
276 Tick recvAtomicSnoop(PacketPtr pkt);
279 * Performs the access specified by the request.
280 * @param pkt The request to perform.
281 * @param fromCpuSide from the CPU side port or the memory side port
283 void functionalAccess(PacketPtr pkt, bool fromCpuSide);
285 void satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
286 bool deferred_response = false,
287 bool pending_downgrade = false);
288 bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, BlkType *blk);
290 void doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
291 bool already_copied, bool pending_inval);
294 * Sets the blk to the new state.
295 * @param blk The cache block being snooped.
296 * @param new_state The new coherence state for the block.
298 void handleSnoop(PacketPtr ptk, BlkType *blk,
299 bool is_timing, bool is_deferred, bool pending_inval);
302 * Create a writeback request for the given block.
303 * @param blk The block to writeback.
304 * @return The writeback request for the block.
306 PacketPtr writebackBlk(BlkType *blk);
310 void memInvalidate();
311 bool isDirty() const;
314 * Cache block visitor that writes back dirty cache blocks using
317 * \return Always returns true.
319 bool writebackVisitor(BlkType &blk);
321 * Cache block visitor that invalidates all blocks in the cache.
323 * @warn Dirty cache lines will not be written back to memory.
325 * \return Always returns true.
327 bool invalidateVisitor(BlkType &blk);
330 * Flush a cache line due to an uncacheable memory access to the
333 * @note This shouldn't normally happen, but we need to handle it
334 * since some architecture models don't implement cache
335 * maintenance operations. We won't even try to get a decent
336 * timing here since the line should have been flushed earlier by
337 * a cache maintenance operation.
339 void uncacheableFlush(PacketPtr pkt);
342 * Squash all requests associated with specified thread.
343 * intended for use by I-cache.
344 * @param threadNum The thread to squash.
346 void squash(int threadNum);
349 * Generate an appropriate downstream bus request packet for the
351 * @param cpu_pkt The upstream request that needs to be satisfied.
352 * @param blk The block currently in the cache corresponding to
353 * cpu_pkt (NULL if none).
354 * @param needsExclusive Indicates that an exclusive copy is required
355 * even if the request in cpu_pkt doesn't indicate that.
356 * @return A new Packet containing the request, or NULL if the
357 * current request in cpu_pkt should just be forwarded on.
359 PacketPtr getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
360 bool needsExclusive) const;
363 * Return the next MSHR to service, either a pending miss from the
364 * mshrQueue, a buffered write from the write buffer, or something
365 * from the prefetcher. This function is responsible for
366 * prioritizing among those sources on the fly.
371 * Selects an outstanding request to service. Called when the
372 * cache gets granted the downstream bus in timing mode.
373 * @return The request to service, NULL if none found.
375 PacketPtr getTimingPacket();
378 * Marks a request as in service (sent on the bus). This can have side
379 * effect since storage for no response commands is deallocated once they
380 * are successfully sent.
381 * @param pkt The request that was sent on the bus.
383 void markInService(MSHR *mshr, PacketPtr pkt = 0);
386 * Return whether there are any outstanding misses.
388 bool outstandingMisses() const
390 return mshrQueue.allocated != 0;
393 CacheBlk *findBlock(Addr addr, bool is_secure) const {
394 return tags->findBlock(addr, is_secure);
397 bool inCache(Addr addr, bool is_secure) const {
398 return (tags->findBlock(addr, is_secure) != 0);
401 bool inMissQueue(Addr addr, bool is_secure) const {
402 return (mshrQueue.findMatch(addr, is_secure) != 0);
406 * Find next request ready time from among possible sources.
408 Tick nextMSHRReadyTime() const;
411 /** Instantiates a basic cache object. */
412 Cache(const Params *p);
414 /** Non-default destructor is needed to deallocate memory. */
419 /** serialize the state of the caches
420 * We currently don't support checkpointing cache state, so this panics.
422 virtual void serialize(std::ostream &os);
423 void unserialize(Checkpoint *cp, const std::string §ion);
426 #endif // __CACHE_HH__