2 * Copyright (c) 2012-2016 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
49 * Describes a cache based on template policies.
52 #ifndef __MEM_CACHE_CACHE_HH__
53 #define __MEM_CACHE_CACHE_HH__
55 #include "base/misc.hh" // fatal, panic, and warn
56 #include "enums/Clusivity.hh"
57 #include "mem/cache/base.hh"
58 #include "mem/cache/blk.hh"
59 #include "mem/cache/mshr.hh"
60 #include "mem/cache/tags/base.hh"
61 #include "params/Cache.hh"
62 #include "sim/eventq.hh"
68 * A template-policy based cache. The behavior of the cache can be altered by
69 * supplying different template policies. TagStore handles all tag and data
70 * storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System"
72 class Cache : public BaseCache
76 /** A typedef for a list of CacheBlk pointers. */
77 typedef std::list<CacheBlk*> BlkList;
82 * The CPU-side port extends the base cache slave port with access
83 * functions for functional, atomic and timing requests.
85 class CpuSidePort : public CacheSlavePort
89 // a pointer to our specific cache implementation
94 virtual bool recvTimingSnoopResp(PacketPtr pkt);
96 virtual bool recvTimingReq(PacketPtr pkt);
98 virtual Tick recvAtomic(PacketPtr pkt);
100 virtual void recvFunctional(PacketPtr pkt);
102 virtual AddrRangeList getAddrRanges() const;
106 CpuSidePort(const std::string &_name, Cache *_cache,
107 const std::string &_label);
112 * Override the default behaviour of sendDeferredPacket to enable
113 * the memory-side cache port to also send requests based on the
114 * current MSHR status. This queue has a pointer to our specific
115 * cache implementation and is used by the MemSidePort.
117 class CacheReqPacketQueue : public ReqPacketQueue
123 SnoopRespPacketQueue &snoopRespQueue;
127 CacheReqPacketQueue(Cache &cache, MasterPort &port,
128 SnoopRespPacketQueue &snoop_resp_queue,
129 const std::string &label) :
130 ReqPacketQueue(cache, port, label), cache(cache),
131 snoopRespQueue(snoop_resp_queue) { }
134 * Override the normal sendDeferredPacket and do not only
135 * consider the transmit list (used for responses), but also
138 virtual void sendDeferredPacket();
141 * Check if there is a conflicting snoop response about to be
142 * send out, and if so simply stall any requests, and schedule
143 * a send event at the same time as the next snoop response is
146 bool checkConflictingSnoop(Addr addr)
148 if (snoopRespQueue.hasAddr(addr)) {
149 DPRINTF(CachePort, "Waiting for snoop response to be "
151 Tick when = snoopRespQueue.deferredPacketReadyTime();
152 schedSendEvent(when);
160 * The memory-side port extends the base cache master port with
161 * access functions for functional, atomic and timing snoops.
163 class MemSidePort : public CacheMasterPort
167 /** The cache-specific queue. */
168 CacheReqPacketQueue _reqQueue;
170 SnoopRespPacketQueue _snoopRespQueue;
172 // a pointer to our specific cache implementation
177 virtual void recvTimingSnoopReq(PacketPtr pkt);
179 virtual bool recvTimingResp(PacketPtr pkt);
181 virtual Tick recvAtomicSnoop(PacketPtr pkt);
183 virtual void recvFunctionalSnoop(PacketPtr pkt);
187 MemSidePort(const std::string &_name, Cache *_cache,
188 const std::string &_label);
191 /** Tag and data Storage */
195 BasePrefetcher *prefetcher;
197 /** Temporary cache block for occasional transitory use */
201 * This cache should allocate a block on a line-sized write miss.
203 const bool doFastWrites;
206 * Turn line-sized writes into WriteInvalidate transactions.
208 void promoteWholeLineWrites(PacketPtr pkt);
211 * Notify the prefetcher on every access, not just misses.
213 const bool prefetchOnAccess;
216 * Clusivity with respect to the upstream cache, determining if we
217 * fill into both this cache and the cache above on a miss. Note
218 * that we currently do not support strict clusivity policies.
220 const Enums::Clusivity clusivity;
223 * Determine if clean lines should be written back or not. In
224 * cases where a downstream cache is mostly inclusive we likely
225 * want it to act as a victim cache also for lines that have not
226 * been modified. Hence, we cannot simply drop the line (or send a
227 * clean evict), but rather need to send the actual data.
229 const bool writebackClean;
232 * Upstream caches need this packet until true is returned, so
233 * hold it for deletion until a subsequent call
235 std::unique_ptr<Packet> pendingDelete;
238 * Writebacks from the tempBlock, resulting on the response path
239 * in atomic mode, must happen after the call to recvAtomic has
240 * finished (for the right ordering of the packets). We therefore
241 * need to hold on to the packets, and have a method and an event
244 PacketPtr tempBlockWriteback;
247 * Send the outstanding tempBlock writeback. To be called after
248 * recvAtomic finishes in cases where the block we filled is in
249 * fact the tempBlock, and now needs to be written back.
251 void writebackTempBlockAtomic() {
252 assert(tempBlockWriteback != nullptr);
253 PacketList writebacks{tempBlockWriteback};
254 doWritebacksAtomic(writebacks);
255 tempBlockWriteback = nullptr;
259 * An event to writeback the tempBlock after recvAtomic
260 * finishes. To avoid other calls to recvAtomic getting in
261 * between, we create this event with a higher priority.
263 EventWrapper<Cache, &Cache::writebackTempBlockAtomic> \
264 writebackTempBlockAtomicEvent;
267 * Store the outstanding requests that we are expecting snoop
268 * responses from so we can determine which snoop responses we
269 * generated and which ones were merely forwarded.
271 std::unordered_set<RequestPtr> outstandingSnoop;
274 * Does all the processing necessary to perform the provided request.
275 * @param pkt The memory request to perform.
276 * @param blk The cache block to be updated.
277 * @param lat The latency of the access.
278 * @param writebacks List for any writebacks that need to be performed.
279 * @return Boolean indicating whether the request was satisfied.
281 bool access(PacketPtr pkt, CacheBlk *&blk,
282 Cycles &lat, PacketList &writebacks);
285 *Handle doing the Compare and Swap function for SPARC.
287 void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
290 * Find a block frame for new block at address addr targeting the
291 * given security space, assuming that the block is not currently
292 * in the cache. Append writebacks if any to provided packet
293 * list. Return free block frame. May return nullptr if there are
294 * no replaceable blocks at the moment.
296 CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
299 * Invalidate a cache block.
301 * @param blk Block to invalidate
303 void invalidateBlock(CacheBlk *blk);
306 * Populates a cache block and handles all outstanding requests for the
307 * satisfied fill request. This version takes two memory requests. One
308 * contains the fill data, the other is an optional target to satisfy.
309 * @param pkt The memory request with the fill data.
310 * @param blk The cache block if it already exists.
311 * @param writebacks List for any writebacks that need to be performed.
312 * @param allocate Whether to allocate a block or use the temp block
313 * @return Pointer to the new cache block.
315 CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
316 PacketList &writebacks, bool allocate);
319 * Determine whether we should allocate on a fill or not. If this
320 * cache is mostly inclusive with regards to the upstream cache(s)
321 * we always allocate (for any non-forwarded and cacheable
322 * requests). In the case of a mostly exclusive cache, we allocate
323 * on fill if the packet did not come from a cache, thus if we:
324 * are dealing with a whole-line write (the latter behaves much
325 * like a writeback), the original target packet came from a
326 * non-caching source, or if we are performing a prefetch or LLSC.
328 * @param cmd Command of the incoming requesting packet
329 * @return Whether we should allocate on the fill
331 inline bool allocOnFill(MemCmd cmd) const override
333 return clusivity == Enums::mostly_incl ||
334 cmd == MemCmd::WriteLineReq ||
335 cmd == MemCmd::ReadReq ||
336 cmd == MemCmd::WriteReq ||
342 * Performs the access specified by the request.
343 * @param pkt The request to perform.
344 * @return The result of the access.
346 bool recvTimingReq(PacketPtr pkt);
349 * Insert writebacks into the write buffer
351 void doWritebacks(PacketList& writebacks, Tick forward_time);
354 * Send writebacks down the memory hierarchy in atomic mode
356 void doWritebacksAtomic(PacketList& writebacks);
359 * Handling the special case of uncacheable write responses to
360 * make recvTimingResp less cluttered.
362 void handleUncacheableWriteResp(PacketPtr pkt);
365 * Handles a response (cache line fill/write ack) from the bus.
366 * @param pkt The response packet
368 void recvTimingResp(PacketPtr pkt);
371 * Snoops bus transactions to maintain coherence.
372 * @param pkt The current bus transaction.
374 void recvTimingSnoopReq(PacketPtr pkt);
377 * Handle a snoop response.
378 * @param pkt Snoop response packet
380 void recvTimingSnoopResp(PacketPtr pkt);
383 * Performs the access specified by the request.
384 * @param pkt The request to perform.
385 * @return The number of ticks required for the access.
387 Tick recvAtomic(PacketPtr pkt);
390 * Snoop for the provided request in the cache and return the estimated
392 * @param pkt The memory request to snoop
393 * @return The number of ticks required for the snoop.
395 Tick recvAtomicSnoop(PacketPtr pkt);
398 * Performs the access specified by the request.
399 * @param pkt The request to perform.
400 * @param fromCpuSide from the CPU side port or the memory side port
402 void functionalAccess(PacketPtr pkt, bool fromCpuSide);
404 void satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
405 bool deferred_response = false,
406 bool pending_downgrade = false);
407 bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, CacheBlk *blk);
409 void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
410 bool already_copied, bool pending_inval);
413 * Perform an upward snoop if needed, and update the block state
414 * (possibly invalidating the block). Also create a response if required.
416 * @param pkt Snoop packet
417 * @param blk Cache block being snooped
418 * @param is_timing Timing or atomic for the response
419 * @param is_deferred Is this a deferred snoop or not?
420 * @param pending_inval Do we have a pending invalidation?
422 * @return The snoop delay incurred by the upwards snoop
424 uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk,
425 bool is_timing, bool is_deferred, bool pending_inval);
428 * Create a writeback request for the given block.
429 * @param blk The block to writeback.
430 * @return The writeback request for the block.
432 PacketPtr writebackBlk(CacheBlk *blk);
435 * Create a CleanEvict request for the given block.
436 * @param blk The block to evict.
437 * @return The CleanEvict request for the block.
439 PacketPtr cleanEvictBlk(CacheBlk *blk);
442 void memWriteback() override;
443 void memInvalidate() override;
444 bool isDirty() const override;
447 * Cache block visitor that writes back dirty cache blocks using
450 * \return Always returns true.
452 bool writebackVisitor(CacheBlk &blk);
454 * Cache block visitor that invalidates all blocks in the cache.
456 * @warn Dirty cache lines will not be written back to memory.
458 * \return Always returns true.
460 bool invalidateVisitor(CacheBlk &blk);
463 * Create an appropriate downstream bus request packet for the
465 * @param cpu_pkt The miss that needs to be satisfied.
466 * @param blk The block currently in the cache corresponding to
467 * cpu_pkt (nullptr if none).
468 * @param needsWritable Indicates that the block must be writable
469 * even if the request in cpu_pkt doesn't indicate that.
470 * @return A new Packet containing the request, or nullptr if the
471 * current request in cpu_pkt should just be forwarded on.
473 PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
474 bool needsWritable) const;
477 * Return the next queue entry to service, either a pending miss
478 * from the MSHR queue, a buffered write from the write buffer, or
479 * something from the prefetcher. This function is responsible
480 * for prioritizing among those sources on the fly.
482 QueueEntry* getNextQueueEntry();
485 * Send up a snoop request and find cached copies. If cached copies are
486 * found, set the BLOCK_CACHED flag in pkt.
488 bool isCachedAbove(PacketPtr pkt, bool is_timing = true) const;
491 * Return whether there are any outstanding misses.
493 bool outstandingMisses() const
495 return !mshrQueue.isEmpty();
498 CacheBlk *findBlock(Addr addr, bool is_secure) const {
499 return tags->findBlock(addr, is_secure);
502 bool inCache(Addr addr, bool is_secure) const override {
503 return (tags->findBlock(addr, is_secure) != 0);
506 bool inMissQueue(Addr addr, bool is_secure) const override {
507 return (mshrQueue.findMatch(addr, is_secure) != 0);
511 * Find next request ready time from among possible sources.
513 Tick nextQueueReadyTime() const;
516 /** Instantiates a basic cache object. */
517 Cache(const CacheParams *p);
519 /** Non-default destructor is needed to deallocate memory. */
522 void regStats() override;
525 * Take an MSHR, turn it into a suitable downstream packet, and
526 * send it out. This construct allows a queue entry to choose a suitable
527 * approach based on its type.
529 * @param mshr The MSHR to turn into a packet and send
530 * @return True if the port is waiting for a retry
532 bool sendMSHRQueuePacket(MSHR* mshr);
535 * Similar to sendMSHR, but for a write-queue entry
536 * instead. Create the packet, and send it, and if successful also
537 * mark the entry in service.
539 * @param wq_entry The write-queue entry to turn into a packet and send
540 * @return True if the port is waiting for a retry
542 bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
544 /** serialize the state of the caches
545 * We currently don't support checkpointing cache state, so this panics.
547 void serialize(CheckpointOut &cp) const override;
548 void unserialize(CheckpointIn &cp) override;
552 * Wrap a method and present it as a cache block visitor.
554 * For example the forEachBlk method in the tag arrays expects a
555 * callable object/function as their parameter. This class wraps a
556 * method in an object and presents callable object that adheres to
557 * the cache block visitor protocol.
559 class CacheBlkVisitorWrapper : public CacheBlkVisitor
562 typedef bool (Cache::*VisitorPtr)(CacheBlk &blk);
564 CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor)
565 : cache(_cache), visitor(_visitor) {}
567 bool operator()(CacheBlk &blk) override {
568 return (cache.*visitor)(blk);
577 * Cache block visitor that determines if there are dirty blocks in a
580 * Use with the forEachBlk method in the tag array to determine if the
581 * array contains dirty blocks.
583 class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
586 CacheBlkIsDirtyVisitor()
589 bool operator()(CacheBlk &blk) override {
599 * Does the array contain a dirty line?
601 * \return true if yes, false otherwise.
603 bool isDirty() const { return _isDirty; };
609 #endif // __MEM_CACHE_CACHE_HH__