2 * Copyright (c) 2012-2016 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
49 * Describes a cache based on template policies.
52 #ifndef __MEM_CACHE_CACHE_HH__
53 #define __MEM_CACHE_CACHE_HH__
55 #include "base/misc.hh" // fatal, panic, and warn
56 #include "enums/Clusivity.hh"
57 #include "mem/cache/base.hh"
58 #include "mem/cache/blk.hh"
59 #include "mem/cache/mshr.hh"
60 #include "mem/cache/tags/base.hh"
61 #include "params/Cache.hh"
62 #include "sim/eventq.hh"
68 * A template-policy based cache. The behavior of the cache can be altered by
69 * supplying different template policies. TagStore handles all tag and data
70 * storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System"
72 class Cache : public BaseCache
76 /** A typedef for a list of CacheBlk pointers. */
77 typedef std::list<CacheBlk*> BlkList;
82 * The CPU-side port extends the base cache slave port with access
83 * functions for functional, atomic and timing requests.
85 class CpuSidePort : public CacheSlavePort
89 // a pointer to our specific cache implementation
94 virtual bool recvTimingSnoopResp(PacketPtr pkt);
96 virtual bool recvTimingReq(PacketPtr pkt);
98 virtual Tick recvAtomic(PacketPtr pkt);
100 virtual void recvFunctional(PacketPtr pkt);
102 virtual AddrRangeList getAddrRanges() const;
106 CpuSidePort(const std::string &_name, Cache *_cache,
107 const std::string &_label);
112 * Override the default behaviour of sendDeferredPacket to enable
113 * the memory-side cache port to also send requests based on the
114 * current MSHR status. This queue has a pointer to our specific
115 * cache implementation and is used by the MemSidePort.
117 class CacheReqPacketQueue : public ReqPacketQueue
123 SnoopRespPacketQueue &snoopRespQueue;
127 CacheReqPacketQueue(Cache &cache, MasterPort &port,
128 SnoopRespPacketQueue &snoop_resp_queue,
129 const std::string &label) :
130 ReqPacketQueue(cache, port, label), cache(cache),
131 snoopRespQueue(snoop_resp_queue) { }
134 * Override the normal sendDeferredPacket and do not only
135 * consider the transmit list (used for responses), but also
138 virtual void sendDeferredPacket();
141 * Check if there is a conflicting snoop response about to be
142 * send out, and if so simply stall any requests, and schedule
143 * a send event at the same time as the next snoop response is
146 bool checkConflictingSnoop(Addr addr)
148 if (snoopRespQueue.hasAddr(addr)) {
149 DPRINTF(CachePort, "Waiting for snoop response to be "
151 Tick when = snoopRespQueue.deferredPacketReadyTime();
152 schedSendEvent(when);
160 * The memory-side port extends the base cache master port with
161 * access functions for functional, atomic and timing snoops.
163 class MemSidePort : public CacheMasterPort
167 /** The cache-specific queue. */
168 CacheReqPacketQueue _reqQueue;
170 SnoopRespPacketQueue _snoopRespQueue;
172 // a pointer to our specific cache implementation
177 virtual void recvTimingSnoopReq(PacketPtr pkt);
179 virtual bool recvTimingResp(PacketPtr pkt);
181 virtual Tick recvAtomicSnoop(PacketPtr pkt);
183 virtual void recvFunctionalSnoop(PacketPtr pkt);
187 MemSidePort(const std::string &_name, Cache *_cache,
188 const std::string &_label);
191 /** Tag and data Storage */
195 BasePrefetcher *prefetcher;
197 /** Temporary cache block for occasional transitory use */
201 * This cache should allocate a block on a line-sized write miss.
203 const bool doFastWrites;
206 * Turn line-sized writes into WriteInvalidate transactions.
208 void promoteWholeLineWrites(PacketPtr pkt);
211 * Notify the prefetcher on every access, not just misses.
213 const bool prefetchOnAccess;
216 * Clusivity with respect to the upstream cache, determining if we
217 * fill into both this cache and the cache above on a miss. Note
218 * that we currently do not support strict clusivity policies.
220 const Enums::Clusivity clusivity;
223 * Determine if clean lines should be written back or not. In
224 * cases where a downstream cache is mostly inclusive we likely
225 * want it to act as a victim cache also for lines that have not
226 * been modified. Hence, we cannot simply drop the line (or send a
227 * clean evict), but rather need to send the actual data.
229 const bool writebackClean;
232 * Upstream caches need this packet until true is returned, so
233 * hold it for deletion until a subsequent call
235 std::unique_ptr<Packet> pendingDelete;
238 * Writebacks from the tempBlock, resulting on the response path
239 * in atomic mode, must happen after the call to recvAtomic has
240 * finished (for the right ordering of the packets). We therefore
241 * need to hold on to the packets, and have a method and an event
244 PacketPtr tempBlockWriteback;
247 * Send the outstanding tempBlock writeback. To be called after
248 * recvAtomic finishes in cases where the block we filled is in
249 * fact the tempBlock, and now needs to be written back.
251 void writebackTempBlockAtomic() {
252 assert(tempBlockWriteback != nullptr);
253 PacketList writebacks{tempBlockWriteback};
254 doWritebacksAtomic(writebacks);
255 tempBlockWriteback = nullptr;
259 * An event to writeback the tempBlock after recvAtomic
260 * finishes. To avoid other calls to recvAtomic getting in
261 * between, we create this event with a higher priority.
263 EventWrapper<Cache, &Cache::writebackTempBlockAtomic> \
264 writebackTempBlockAtomicEvent;
267 * Store the outstanding requests that we are expecting snoop
268 * responses from so we can determine which snoop responses we
269 * generated and which ones were merely forwarded.
271 std::unordered_set<RequestPtr> outstandingSnoop;
274 * Does all the processing necessary to perform the provided request.
275 * @param pkt The memory request to perform.
276 * @param blk The cache block to be updated.
277 * @param lat The latency of the access.
278 * @param writebacks List for any writebacks that need to be performed.
279 * @return Boolean indicating whether the request was satisfied.
281 bool access(PacketPtr pkt, CacheBlk *&blk,
282 Cycles &lat, PacketList &writebacks);
285 *Handle doing the Compare and Swap function for SPARC.
287 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
290 * Find a block frame for new block at address addr targeting the
291 * given security space, assuming that the block is not currently
292 * in the cache. Append writebacks if any to provided packet
293 * list. Return free block frame. May return nullptr if there are
294 * no replaceable blocks at the moment.
296 CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
299 * Invalidate a cache block.
301 * @param blk Block to invalidate
303 void invalidateBlock(CacheBlk *blk);
306 * Maintain the clusivity of this cache by potentially
307 * invalidating a block. This method works in conjunction with
308 * satisfyRequest, but is separate to allow us to handle all MSHR
309 * targets before potentially dropping a block.
311 * @param from_cache Whether we have dealt with a packet from a cache
312 * @param blk The block that should potentially be dropped
314 void maintainClusivity(bool from_cache, CacheBlk *blk);
317 * Populates a cache block and handles all outstanding requests for the
318 * satisfied fill request. This version takes two memory requests. One
319 * contains the fill data, the other is an optional target to satisfy.
320 * @param pkt The memory request with the fill data.
321 * @param blk The cache block if it already exists.
322 * @param writebacks List for any writebacks that need to be performed.
323 * @param allocate Whether to allocate a block or use the temp block
324 * @return Pointer to the new cache block.
326 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
327 PacketList &writebacks, bool allocate);
330 * Determine whether we should allocate on a fill or not. If this
331 * cache is mostly inclusive with regards to the upstream cache(s)
332 * we always allocate (for any non-forwarded and cacheable
333 * requests). In the case of a mostly exclusive cache, we allocate
334 * on fill if the packet did not come from a cache, thus if we:
335 * are dealing with a whole-line write (the latter behaves much
336 * like a writeback), the original target packet came from a
337 * non-caching source, or if we are performing a prefetch or LLSC.
339 * @param cmd Command of the incoming requesting packet
340 * @return Whether we should allocate on the fill
342 inline bool allocOnFill(MemCmd cmd) const override
344 return clusivity == Enums::mostly_incl ||
345 cmd == MemCmd::WriteLineReq ||
346 cmd == MemCmd::ReadReq ||
347 cmd == MemCmd::WriteReq ||
353 * Performs the access specified by the request.
354 * @param pkt The request to perform.
355 * @return The result of the access.
357 bool recvTimingReq(PacketPtr pkt);
360 * Insert writebacks into the write buffer
362 void doWritebacks(PacketList& writebacks, Tick forward_time);
365 * Send writebacks down the memory hierarchy in atomic mode
367 void doWritebacksAtomic(PacketList& writebacks);
370 * Handling the special case of uncacheable write responses to
371 * make recvTimingResp less cluttered.
373 void handleUncacheableWriteResp(PacketPtr pkt);
376 * Handles a response (cache line fill/write ack) from the bus.
377 * @param pkt The response packet
379 void recvTimingResp(PacketPtr pkt);
382 * Snoops bus transactions to maintain coherence.
383 * @param pkt The current bus transaction.
385 void recvTimingSnoopReq(PacketPtr pkt);
388 * Handle a snoop response.
389 * @param pkt Snoop response packet
391 void recvTimingSnoopResp(PacketPtr pkt);
394 * Performs the access specified by the request.
395 * @param pkt The request to perform.
396 * @return The number of ticks required for the access.
398 Tick recvAtomic(PacketPtr pkt);
401 * Snoop for the provided request in the cache and return the estimated
403 * @param pkt The memory request to snoop
404 * @return The number of ticks required for the snoop.
406 Tick recvAtomicSnoop(PacketPtr pkt);
409 * Performs the access specified by the request.
410 * @param pkt The request to perform.
411 * @param fromCpuSide from the CPU side port or the memory side port
413 void functionalAccess(PacketPtr pkt, bool fromCpuSide);
416 * Perform any necessary updates to the block and perform any data
417 * exchange between the packet and the block. The flags of the
418 * packet are also set accordingly.
420 * @param pkt Request packet from upstream that hit a block
421 * @param blk Cache block that the packet hit
422 * @param deferred_response Whether this hit is to block that
424 * @param pending_downgrade Whether the writable flag is to be removed
426 * @return True if the block is to be invalidated
428 void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
429 bool deferred_response = false,
430 bool pending_downgrade = false);
432 void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
433 bool already_copied, bool pending_inval);
436 * Perform an upward snoop if needed, and update the block state
437 * (possibly invalidating the block). Also create a response if required.
439 * @param pkt Snoop packet
440 * @param blk Cache block being snooped
441 * @param is_timing Timing or atomic for the response
442 * @param is_deferred Is this a deferred snoop or not?
443 * @param pending_inval Do we have a pending invalidation?
445 * @return The snoop delay incurred by the upwards snoop
447 uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk,
448 bool is_timing, bool is_deferred, bool pending_inval);
451 * Create a writeback request for the given block.
452 * @param blk The block to writeback.
453 * @return The writeback request for the block.
455 PacketPtr writebackBlk(CacheBlk *blk);
458 * Create a CleanEvict request for the given block.
459 * @param blk The block to evict.
460 * @return The CleanEvict request for the block.
462 PacketPtr cleanEvictBlk(CacheBlk *blk);
465 void memWriteback() override;
466 void memInvalidate() override;
467 bool isDirty() const override;
470 * Cache block visitor that writes back dirty cache blocks using
473 * \return Always returns true.
475 bool writebackVisitor(CacheBlk &blk);
477 * Cache block visitor that invalidates all blocks in the cache.
479 * @warn Dirty cache lines will not be written back to memory.
481 * \return Always returns true.
483 bool invalidateVisitor(CacheBlk &blk);
486 * Create an appropriate downstream bus request packet for the
488 * @param cpu_pkt The miss that needs to be satisfied.
489 * @param blk The block currently in the cache corresponding to
490 * cpu_pkt (nullptr if none).
491 * @param needsWritable Indicates that the block must be writable
492 * even if the request in cpu_pkt doesn't indicate that.
493 * @return A new Packet containing the request, or nullptr if the
494 * current request in cpu_pkt should just be forwarded on.
496 PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
497 bool needsWritable) const;
500 * Return the next queue entry to service, either a pending miss
501 * from the MSHR queue, a buffered write from the write buffer, or
502 * something from the prefetcher. This function is responsible
503 * for prioritizing among those sources on the fly.
505 QueueEntry* getNextQueueEntry();
508 * Send up a snoop request and find cached copies. If cached copies are
509 * found, set the BLOCK_CACHED flag in pkt.
511 bool isCachedAbove(PacketPtr pkt, bool is_timing = true) const;
514 * Return whether there are any outstanding misses.
516 bool outstandingMisses() const
518 return !mshrQueue.isEmpty();
521 CacheBlk *findBlock(Addr addr, bool is_secure) const {
522 return tags->findBlock(addr, is_secure);
525 bool inCache(Addr addr, bool is_secure) const override {
526 return (tags->findBlock(addr, is_secure) != 0);
529 bool inMissQueue(Addr addr, bool is_secure) const override {
530 return (mshrQueue.findMatch(addr, is_secure) != 0);
534 * Find next request ready time from among possible sources.
536 Tick nextQueueReadyTime() const;
539 /** Instantiates a basic cache object. */
540 Cache(const CacheParams *p);
542 /** Non-default destructor is needed to deallocate memory. */
545 void regStats() override;
548 * Take an MSHR, turn it into a suitable downstream packet, and
549 * send it out. This construct allows a queue entry to choose a suitable
550 * approach based on its type.
552 * @param mshr The MSHR to turn into a packet and send
553 * @return True if the port is waiting for a retry
555 bool sendMSHRQueuePacket(MSHR* mshr);
558 * Similar to sendMSHR, but for a write-queue entry
559 * instead. Create the packet, and send it, and if successful also
560 * mark the entry in service.
562 * @param wq_entry The write-queue entry to turn into a packet and send
563 * @return True if the port is waiting for a retry
565 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
567 /** serialize the state of the caches
568 * We currently don't support checkpointing cache state, so this panics.
570 void serialize(CheckpointOut &cp) const override;
571 void unserialize(CheckpointIn &cp) override;
575 * Wrap a method and present it as a cache block visitor.
577 * For example the forEachBlk method in the tag arrays expects a
578 * callable object/function as their parameter. This class wraps a
579 * method in an object and presents callable object that adheres to
580 * the cache block visitor protocol.
582 class CacheBlkVisitorWrapper : public CacheBlkVisitor
585 typedef bool (Cache::*VisitorPtr)(CacheBlk &blk);
587 CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor)
588 : cache(_cache), visitor(_visitor) {}
590 bool operator()(CacheBlk &blk) override {
591 return (cache.*visitor)(blk);
600 * Cache block visitor that determines if there are dirty blocks in a
603 * Use with the forEachBlk method in the tag array to determine if the
604 * array contains dirty blocks.
606 class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
609 CacheBlkIsDirtyVisitor()
612 bool operator()(CacheBlk &blk) override {
622 * Does the array contain a dirty line?
624 * \return true if yes, false otherwise.
626 bool isDirty() const { return _isDirty; };
632 #endif // __MEM_CACHE_CACHE_HH__