2 * Copyright (c) 2010-2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Erik Hallnor
55 #include "mem/cache/cache.hh"
59 #include "base/compiler.hh"
60 #include "base/logging.hh"
61 #include "base/trace.hh"
62 #include "base/types.hh"
63 #include "debug/Cache.hh"
64 #include "debug/CacheTags.hh"
65 #include "debug/CacheVerbose.hh"
66 #include "enums/Clusivity.hh"
67 #include "mem/cache/cache_blk.hh"
68 #include "mem/cache/mshr.hh"
69 #include "mem/cache/tags/base.hh"
70 #include "mem/cache/write_queue_entry.hh"
71 #include "mem/request.hh"
72 #include "params/Cache.hh"
74 Cache::Cache(const CacheParams
*p
)
75 : BaseCache(p
, p
->system
->cacheLineSize()),
81 Cache::satisfyRequest(PacketPtr pkt
, CacheBlk
*blk
,
82 bool deferred_response
, bool pending_downgrade
)
84 BaseCache::satisfyRequest(pkt
, blk
);
87 // determine if this read is from a (coherent) cache or not
88 if (pkt
->fromCache()) {
89 assert(pkt
->getSize() == blkSize
);
90 // special handling for coherent block requests from
92 if (pkt
->needsWritable()) {
94 assert(pkt
->cmd
== MemCmd::ReadExReq
||
95 pkt
->cmd
== MemCmd::SCUpgradeFailReq
);
96 assert(!pkt
->hasSharers());
98 // if we have a dirty copy, make sure the recipient
99 // keeps it marked dirty (in the modified state)
100 if (blk
->isDirty()) {
101 pkt
->setCacheResponding();
102 blk
->status
&= ~BlkDirty
;
104 } else if (blk
->isWritable() && !pending_downgrade
&&
105 !pkt
->hasSharers() &&
106 pkt
->cmd
!= MemCmd::ReadCleanReq
) {
107 // we can give the requester a writable copy on a read
109 // - we have a writable copy at this level (& below)
110 // - we don't have a pending snoop from below
111 // signaling another read request
112 // - no other cache above has a copy (otherwise it
113 // would have set hasSharers flag when
114 // snooping the packet)
115 // - the read has explicitly asked for a clean
117 if (blk
->isDirty()) {
118 // special considerations if we're owner:
119 if (!deferred_response
) {
120 // respond with the line in Modified state
121 // (cacheResponding set, hasSharers not set)
122 pkt
->setCacheResponding();
124 // if this cache is mostly inclusive, we
125 // keep the block in the Exclusive state,
126 // and pass it upwards as Modified
127 // (writable and dirty), hence we have
128 // multiple caches, all on the same path
129 // towards memory, all considering the
130 // same block writable, but only one
131 // considering it Modified
133 // we get away with multiple caches (on
134 // the same path to memory) considering
135 // the block writeable as we always enter
136 // the cache hierarchy through a cache,
137 // and first snoop upwards in all other
139 blk
->status
&= ~BlkDirty
;
141 // if we're responding after our own miss,
142 // there's a window where the recipient didn't
143 // know it was getting ownership and may not
144 // have responded to snoops correctly, so we
145 // have to respond with a shared line
146 pkt
->setHasSharers();
150 // otherwise only respond with a shared copy
151 pkt
->setHasSharers();
157 /////////////////////////////////////////////////////
159 // Access path: requests coming in from the CPU side
161 /////////////////////////////////////////////////////
164 Cache::access(PacketPtr pkt
, CacheBlk
*&blk
, Cycles
&lat
,
165 PacketList
&writebacks
)
168 if (pkt
->req
->isUncacheable()) {
169 assert(pkt
->isRequest());
171 chatty_assert(!(isReadOnly
&& pkt
->isWrite()),
172 "Should never see a write in a read-only cache %s\n",
175 DPRINTF(Cache
, "%s for %s\n", __func__
, pkt
->print());
177 // flush and invalidate any existing block
178 CacheBlk
*old_blk(tags
->findBlock(pkt
->getAddr(), pkt
->isSecure()));
179 if (old_blk
&& old_blk
->isValid()) {
180 BaseCache::evictBlock(old_blk
, writebacks
);
184 // lookupLatency is the latency in case the request is uncacheable.
189 return BaseCache::access(pkt
, blk
, lat
, writebacks
);
193 Cache::doWritebacks(PacketList
& writebacks
, Tick forward_time
)
195 while (!writebacks
.empty()) {
196 PacketPtr wbPkt
= writebacks
.front();
197 // We use forwardLatency here because we are copying writebacks to
200 // Call isCachedAbove for Writebacks, CleanEvicts and
201 // WriteCleans to discover if the block is cached above.
202 if (isCachedAbove(wbPkt
)) {
203 if (wbPkt
->cmd
== MemCmd::CleanEvict
) {
204 // Delete CleanEvict because cached copies exist above. The
205 // packet destructor will delete the request object because
206 // this is a non-snoop request packet which does not require a
209 } else if (wbPkt
->cmd
== MemCmd::WritebackClean
) {
210 // clean writeback, do not send since the block is
211 // still cached above
212 assert(writebackClean
);
215 assert(wbPkt
->cmd
== MemCmd::WritebackDirty
||
216 wbPkt
->cmd
== MemCmd::WriteClean
);
217 // Set BLOCK_CACHED flag in Writeback and send below, so that
218 // the Writeback does not reset the bit corresponding to this
219 // address in the snoop filter below.
220 wbPkt
->setBlockCached();
221 allocateWriteBuffer(wbPkt
, forward_time
);
224 // If the block is not cached above, send packet below. Both
225 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
226 // reset the bit corresponding to this address in the snoop filter
228 allocateWriteBuffer(wbPkt
, forward_time
);
230 writebacks
.pop_front();
235 Cache::doWritebacksAtomic(PacketList
& writebacks
)
237 while (!writebacks
.empty()) {
238 PacketPtr wbPkt
= writebacks
.front();
239 // Call isCachedAbove for both Writebacks and CleanEvicts. If
240 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
241 // and discard CleanEvicts.
242 if (isCachedAbove(wbPkt
, false)) {
243 if (wbPkt
->cmd
== MemCmd::WritebackDirty
||
244 wbPkt
->cmd
== MemCmd::WriteClean
) {
245 // Set BLOCK_CACHED flag in Writeback and send below,
246 // so that the Writeback does not reset the bit
247 // corresponding to this address in the snoop filter
248 // below. We can discard CleanEvicts because cached
249 // copies exist above. Atomic mode isCachedAbove
250 // modifies packet to set BLOCK_CACHED flag
251 memSidePort
.sendAtomic(wbPkt
);
254 // If the block is not cached above, send packet below. Both
255 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
256 // reset the bit corresponding to this address in the snoop filter
258 memSidePort
.sendAtomic(wbPkt
);
260 writebacks
.pop_front();
261 // In case of CleanEvicts, the packet destructor will delete the
262 // request object because this is a non-snoop request packet which
263 // does not require a response.
270 Cache::recvTimingSnoopResp(PacketPtr pkt
)
272 DPRINTF(Cache
, "%s for %s\n", __func__
, pkt
->print());
274 // determine if the response is from a snoop request we created
275 // (in which case it should be in the outstandingSnoop), or if we
276 // merely forwarded someone else's snoop request
277 const bool forwardAsSnoop
= outstandingSnoop
.find(pkt
->req
) ==
278 outstandingSnoop
.end();
280 if (!forwardAsSnoop
) {
281 // the packet came from this cache, so sink it here and do not
283 assert(pkt
->cmd
== MemCmd::HardPFResp
);
285 outstandingSnoop
.erase(pkt
->req
);
287 DPRINTF(Cache
, "Got prefetch response from above for addr "
288 "%#llx (%s)\n", pkt
->getAddr(), pkt
->isSecure() ? "s" : "ns");
293 // forwardLatency is set here because there is a response from an
294 // upper level cache.
295 // To pay the delay that occurs if the packet comes from the bus,
296 // we charge also headerDelay.
297 Tick snoop_resp_time
= clockEdge(forwardLatency
) + pkt
->headerDelay
;
298 // Reset the timing of the packet.
299 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
300 memSidePort
.schedTimingSnoopResp(pkt
, snoop_resp_time
);
304 Cache::promoteWholeLineWrites(PacketPtr pkt
)
306 // Cache line clearing instructions
307 if (doFastWrites
&& (pkt
->cmd
== MemCmd::WriteReq
) &&
308 (pkt
->getSize() == blkSize
) && (pkt
->getOffset(blkSize
) == 0) &&
309 !pkt
->isMaskedWrite()) {
310 pkt
->cmd
= MemCmd::WriteLineReq
;
311 DPRINTF(Cache
, "packet promoted from Write to WriteLineReq\n");
316 Cache::handleTimingReqHit(PacketPtr pkt
, CacheBlk
*blk
, Tick request_time
)
318 // should never be satisfying an uncacheable access as we
319 // flush and invalidate any existing block as part of the
321 assert(!pkt
->req
->isUncacheable());
323 BaseCache::handleTimingReqHit(pkt
, blk
, request_time
);
327 Cache::handleTimingReqMiss(PacketPtr pkt
, CacheBlk
*blk
, Tick forward_time
,
330 if (pkt
->req
->isUncacheable()) {
331 // ignore any existing MSHR if we are dealing with an
332 // uncacheable request
334 // should have flushed and have no valid block
335 assert(!blk
|| !blk
->isValid());
337 stats
.cmdStats(pkt
).mshr_uncacheable
[pkt
->req
->masterId()]++;
339 if (pkt
->isWrite()) {
340 allocateWriteBuffer(pkt
, forward_time
);
342 assert(pkt
->isRead());
344 // uncacheable accesses always allocate a new MSHR
346 // Here we are using forward_time, modelling the latency of
347 // a miss (outbound) just as forwardLatency, neglecting the
348 // lookupLatency component.
349 allocateMissBuffer(pkt
, forward_time
);
355 Addr blk_addr
= pkt
->getBlockAddr(blkSize
);
357 MSHR
*mshr
= mshrQueue
.findMatch(blk_addr
, pkt
->isSecure());
359 // Software prefetch handling:
360 // To keep the core from waiting on data it won't look at
361 // anyway, send back a response with dummy data. Miss handling
362 // will continue asynchronously. Unfortunately, the core will
363 // insist upon freeing original Packet/Request, so we have to
364 // create a new pair with a different lifecycle. Note that this
365 // processing happens before any MSHR munging on the behalf of
366 // this request because this new Request will be the one stored
367 // into the MSHRs, not the original.
368 if (pkt
->cmd
.isSWPrefetch()) {
369 assert(pkt
->needsResponse());
370 assert(pkt
->req
->hasPaddr());
371 assert(!pkt
->req
->isUncacheable());
373 // There's no reason to add a prefetch as an additional target
374 // to an existing MSHR. If an outstanding request is already
375 // in progress, there is nothing for the prefetch to do.
376 // If this is the case, we don't even create a request at all.
377 PacketPtr pf
= nullptr;
380 // copy the request and create a new SoftPFReq packet
381 RequestPtr req
= std::make_shared
<Request
>(pkt
->req
->getPaddr(),
383 pkt
->req
->getFlags(),
384 pkt
->req
->masterId());
385 pf
= new Packet(req
, pkt
->cmd
);
387 assert(pf
->matchAddr(pkt
));
388 assert(pf
->getSize() == pkt
->getSize());
391 pkt
->makeTimingResponse();
393 // request_time is used here, taking into account lat and the delay
394 // charged if the packet comes from the xbar.
395 cpuSidePort
.schedTimingResp(pkt
, request_time
);
397 // If an outstanding request is in progress (we found an
398 // MSHR) this is set to null
402 BaseCache::handleTimingReqMiss(pkt
, mshr
, blk
, forward_time
, request_time
);
406 Cache::recvTimingReq(PacketPtr pkt
)
408 DPRINTF(CacheTags
, "%s tags:\n%s\n", __func__
, tags
->print());
410 promoteWholeLineWrites(pkt
);
412 if (pkt
->cacheResponding()) {
413 // a cache above us (but not where the packet came from) is
414 // responding to the request, in other words it has the line
415 // in Modified or Owned state
416 DPRINTF(Cache
, "Cache above responding to %s: not responding\n",
419 // if the packet needs the block to be writable, and the cache
420 // that has promised to respond (setting the cache responding
421 // flag) is not providing writable (it is in Owned rather than
422 // the Modified state), we know that there may be other Shared
423 // copies in the system; go out and invalidate them all
424 assert(pkt
->needsWritable() && !pkt
->responderHadWritable());
426 // an upstream cache that had the line in Owned state
427 // (dirty, but not writable), is responding and thus
428 // transferring the dirty line from one branch of the
429 // cache hierarchy to another
431 // send out an express snoop and invalidate all other
432 // copies (snooping a packet that needs writable is the
433 // same as an invalidation), thus turning the Owned line
434 // into a Modified line, note that we don't invalidate the
435 // block in the current cache or any other cache on the
438 // create a downstream express snoop with cleared packet
439 // flags, there is no need to allocate any data as the
440 // packet is merely used to co-ordinate state transitions
441 Packet
*snoop_pkt
= new Packet(pkt
, true, false);
443 // also reset the bus time that the original packet has
445 snoop_pkt
->headerDelay
= snoop_pkt
->payloadDelay
= 0;
447 // make this an instantaneous express snoop, and let the
448 // other caches in the system know that the another cache
449 // is responding, because we have found the authorative
450 // copy (Modified or Owned) that will supply the right
452 snoop_pkt
->setExpressSnoop();
453 snoop_pkt
->setCacheResponding();
455 // this express snoop travels towards the memory, and at
456 // every crossbar it is snooped upwards thus reaching
457 // every cache in the system
458 bool M5_VAR_USED success
= memSidePort
.sendTimingReq(snoop_pkt
);
459 // express snoops always succeed
462 // main memory will delete the snoop packet
464 // queue for deletion, as opposed to immediate deletion, as
465 // the sending cache is still relying on the packet
466 pendingDelete
.reset(pkt
);
468 // no need to take any further action in this particular cache
469 // as an upstram cache has already committed to responding,
470 // and we have already sent out any express snoops in the
471 // section above to ensure all other copies in the system are
476 BaseCache::recvTimingReq(pkt
);
480 Cache::createMissPacket(PacketPtr cpu_pkt
, CacheBlk
*blk
,
482 bool is_whole_line_write
) const
484 // should never see evictions here
485 assert(!cpu_pkt
->isEviction());
487 bool blkValid
= blk
&& blk
->isValid();
489 if (cpu_pkt
->req
->isUncacheable() ||
490 (!blkValid
&& cpu_pkt
->isUpgrade()) ||
491 cpu_pkt
->cmd
== MemCmd::InvalidateReq
|| cpu_pkt
->isClean()) {
492 // uncacheable requests and upgrades from upper-level caches
493 // that missed completely just go through as is
497 assert(cpu_pkt
->needsResponse());
500 // @TODO make useUpgrades a parameter.
501 // Note that ownership protocols require upgrade, otherwise a
502 // write miss on a shared owned block will generate a ReadExcl,
503 // which will clobber the owned copy.
504 const bool useUpgrades
= true;
505 assert(cpu_pkt
->cmd
!= MemCmd::WriteLineReq
|| is_whole_line_write
);
506 if (is_whole_line_write
) {
507 assert(!blkValid
|| !blk
->isWritable());
508 // forward as invalidate to all other caches, this gives us
509 // the line in Exclusive state, and invalidates all other
511 cmd
= MemCmd::InvalidateReq
;
512 } else if (blkValid
&& useUpgrades
) {
513 // only reason to be here is that blk is read only and we need
515 assert(needsWritable
);
516 assert(!blk
->isWritable());
517 cmd
= cpu_pkt
->isLLSC() ? MemCmd::SCUpgradeReq
: MemCmd::UpgradeReq
;
518 } else if (cpu_pkt
->cmd
== MemCmd::SCUpgradeFailReq
||
519 cpu_pkt
->cmd
== MemCmd::StoreCondFailReq
) {
520 // Even though this SC will fail, we still need to send out the
521 // request and get the data to supply it to other snoopers in the case
522 // where the determination the StoreCond fails is delayed due to
523 // all caches not being on the same local bus.
524 cmd
= MemCmd::SCUpgradeFailReq
;
528 // If the request does not need a writable there are two cases
529 // where we need to ensure the response will not fetch the
530 // block in dirty state:
531 // * this cache is read only and it does not perform
533 // * this cache is mostly exclusive and will not fill (since
534 // it does not fill it will have to writeback the dirty data
535 // immediately which generates uneccesary writebacks).
536 bool force_clean_rsp
= isReadOnly
|| clusivity
== Enums::mostly_excl
;
537 cmd
= needsWritable
? MemCmd::ReadExReq
:
538 (force_clean_rsp
? MemCmd::ReadCleanReq
: MemCmd::ReadSharedReq
);
540 PacketPtr pkt
= new Packet(cpu_pkt
->req
, cmd
, blkSize
);
542 // if there are upstream caches that have already marked the
543 // packet as having sharers (not passing writable), pass that info
545 if (cpu_pkt
->hasSharers() && !needsWritable
) {
546 // note that cpu_pkt may have spent a considerable time in the
547 // MSHR queue and that the information could possibly be out
548 // of date, however, there is no harm in conservatively
549 // assuming the block has sharers
550 pkt
->setHasSharers();
551 DPRINTF(Cache
, "%s: passing hasSharers from %s to %s\n",
552 __func__
, cpu_pkt
->print(), pkt
->print());
555 // the packet should be block aligned
556 assert(pkt
->getAddr() == pkt
->getBlockAddr(blkSize
));
559 DPRINTF(Cache
, "%s: created %s from %s\n", __func__
, pkt
->print(),
566 Cache::handleAtomicReqMiss(PacketPtr pkt
, CacheBlk
*&blk
,
567 PacketList
&writebacks
)
569 // deal with the packets that go through the write path of
570 // the cache, i.e. any evictions and writes
571 if (pkt
->isEviction() || pkt
->cmd
== MemCmd::WriteClean
||
572 (pkt
->req
->isUncacheable() && pkt
->isWrite())) {
573 Cycles latency
= ticksToCycles(memSidePort
.sendAtomic(pkt
));
575 // at this point, if the request was an uncacheable write
576 // request, it has been satisfied by a memory below and the
577 // packet carries the response back
578 assert(!(pkt
->req
->isUncacheable() && pkt
->isWrite()) ||
586 PacketPtr bus_pkt
= createMissPacket(pkt
, blk
, pkt
->needsWritable(),
587 pkt
->isWholeLineWrite(blkSize
));
589 bool is_forward
= (bus_pkt
== nullptr);
592 // just forwarding the same request to the next level
593 // no local cache operation involved
597 DPRINTF(Cache
, "%s: Sending an atomic %s\n", __func__
,
601 CacheBlk::State old_state
= blk
? blk
->status
: 0;
604 Cycles latency
= ticksToCycles(memSidePort
.sendAtomic(bus_pkt
));
606 bool is_invalidate
= bus_pkt
->isInvalidate();
608 // We are now dealing with the response handling
609 DPRINTF(Cache
, "%s: Receive response: %s in state %i\n", __func__
,
610 bus_pkt
->print(), old_state
);
612 // If packet was a forward, the response (if any) is already
613 // in place in the bus_pkt == pkt structure, so we don't need
614 // to do anything. Otherwise, use the separate bus_pkt to
615 // generate response to pkt and then delete it.
617 if (pkt
->needsResponse()) {
618 assert(bus_pkt
->isResponse());
619 if (bus_pkt
->isError()) {
620 pkt
->makeAtomicResponse();
621 pkt
->copyError(bus_pkt
);
622 } else if (pkt
->isWholeLineWrite(blkSize
)) {
623 // note the use of pkt, not bus_pkt here.
625 // write-line request to the cache that promoted
626 // the write to a whole line
627 const bool allocate
= allocOnFill(pkt
->cmd
) &&
628 (!writeAllocator
|| writeAllocator
->allocate());
629 blk
= handleFill(bus_pkt
, blk
, writebacks
, allocate
);
631 is_invalidate
= false;
632 satisfyRequest(pkt
, blk
);
633 } else if (bus_pkt
->isRead() ||
634 bus_pkt
->cmd
== MemCmd::UpgradeResp
) {
635 // we're updating cache state to allow us to
636 // satisfy the upstream request from the cache
637 blk
= handleFill(bus_pkt
, blk
, writebacks
,
638 allocOnFill(pkt
->cmd
));
639 satisfyRequest(pkt
, blk
);
640 maintainClusivity(pkt
->fromCache(), blk
);
642 // we're satisfying the upstream request without
643 // modifying cache state, e.g., a write-through
644 pkt
->makeAtomicResponse();
650 if (is_invalidate
&& blk
&& blk
->isValid()) {
651 invalidateBlock(blk
);
658 Cache::recvAtomic(PacketPtr pkt
)
660 promoteWholeLineWrites(pkt
);
662 // follow the same flow as in recvTimingReq, and check if a cache
663 // above us is responding
664 if (pkt
->cacheResponding()) {
665 assert(!pkt
->req
->isCacheInvalidate());
666 DPRINTF(Cache
, "Cache above responding to %s: not responding\n",
669 // if a cache is responding, and it had the line in Owned
670 // rather than Modified state, we need to invalidate any
671 // copies that are not on the same path to memory
672 assert(pkt
->needsWritable() && !pkt
->responderHadWritable());
674 return memSidePort
.sendAtomic(pkt
);
677 return BaseCache::recvAtomic(pkt
);
681 /////////////////////////////////////////////////////
683 // Response handling: responses from the memory side
685 /////////////////////////////////////////////////////
689 Cache::serviceMSHRTargets(MSHR
*mshr
, const PacketPtr pkt
, CacheBlk
*blk
)
691 QueueEntry::Target
*initial_tgt
= mshr
->getTarget();
692 // First offset for critical word first calculations
693 const int initial_offset
= initial_tgt
->pkt
->getOffset(blkSize
);
695 const bool is_error
= pkt
->isError();
696 // allow invalidation responses originating from write-line
697 // requests to be discarded
698 bool is_invalidate
= pkt
->isInvalidate() &&
699 !mshr
->wasWholeLineWrite
;
701 MSHR::TargetList targets
= mshr
->extractServiceableTargets(pkt
);
702 for (auto &target
: targets
) {
703 Packet
*tgt_pkt
= target
.pkt
;
704 switch (target
.source
) {
705 case MSHR::Target::FromCPU
:
706 Tick completion_time
;
707 // Here we charge on completion_time the delay of the xbar if the
708 // packet comes from it, charged on headerDelay.
709 completion_time
= pkt
->headerDelay
;
711 // Software prefetch handling for cache closest to core
712 if (tgt_pkt
->cmd
.isSWPrefetch()) {
713 if (tgt_pkt
->needsWritable()) {
714 // All other copies of the block were invalidated and we
715 // have an exclusive copy.
717 // The coherence protocol assumes that if we fetched an
718 // exclusive copy of the block, we have the intention to
719 // modify it. Therefore the MSHR for the PrefetchExReq has
720 // been the point of ordering and this cache has commited
721 // to respond to snoops for the block.
723 // In most cases this is true anyway - a PrefetchExReq
724 // will be followed by a WriteReq. However, if that
725 // doesn't happen, the block is not marked as dirty and
726 // the cache doesn't respond to snoops that has committed
729 // To avoid deadlocks in cases where there is a snoop
730 // between the PrefetchExReq and the expected WriteReq, we
731 // proactively mark the block as Dirty.
733 blk
->status
|= BlkDirty
;
735 panic_if(isReadOnly
, "Prefetch exclusive requests from "
736 "read-only cache %s\n", name());
739 // a software prefetch would have already been ack'd
740 // immediately with dummy data so the core would be able to
741 // retire it. This request completes right here, so we
744 break; // skip response
747 // unlike the other packet flows, where data is found in other
748 // caches or memory and brought back, write-line requests always
749 // have the data right away, so the above check for "is fill?"
750 // cannot actually be determined until examining the stored MSHR
751 // state. We "catch up" with that logic here, which is duplicated
753 if (tgt_pkt
->cmd
== MemCmd::WriteLineReq
) {
756 assert(blk
->isWritable());
759 // Here we decide whether we will satisfy the target using
760 // data from the block or from the response. We use the
761 // block data to satisfy the request when the block is
762 // present and valid and in addition the response in not
763 // forwarding data to the cache above (we didn't fill
764 // either); otherwise we use the packet data.
765 if (blk
&& blk
->isValid() &&
766 (!mshr
->isForward
|| !pkt
->hasData())) {
767 satisfyRequest(tgt_pkt
, blk
, true, mshr
->hasPostDowngrade());
769 // How many bytes past the first request is this one
770 int transfer_offset
=
771 tgt_pkt
->getOffset(blkSize
) - initial_offset
;
772 if (transfer_offset
< 0) {
773 transfer_offset
+= blkSize
;
776 // If not critical word (offset) return payloadDelay.
777 // responseLatency is the latency of the return path
778 // from lower level caches/memory to an upper level cache or
780 completion_time
+= clockEdge(responseLatency
) +
781 (transfer_offset
? pkt
->payloadDelay
: 0);
783 assert(!tgt_pkt
->req
->isUncacheable());
785 assert(tgt_pkt
->req
->masterId() < system
->maxMasters());
786 stats
.cmdStats(tgt_pkt
)
787 .missLatency
[tgt_pkt
->req
->masterId()] +=
788 completion_time
- target
.recvTime
;
789 } else if (pkt
->cmd
== MemCmd::UpgradeFailResp
) {
790 // failed StoreCond upgrade
791 assert(tgt_pkt
->cmd
== MemCmd::StoreCondReq
||
792 tgt_pkt
->cmd
== MemCmd::StoreCondFailReq
||
793 tgt_pkt
->cmd
== MemCmd::SCUpgradeFailReq
);
794 // responseLatency is the latency of the return path
795 // from lower level caches/memory to an upper level cache or
797 completion_time
+= clockEdge(responseLatency
) +
799 tgt_pkt
->req
->setExtraData(0);
801 if (is_invalidate
&& blk
&& blk
->isValid()) {
802 // We are about to send a response to a cache above
803 // that asked for an invalidation; we need to
804 // invalidate our copy immediately as the most
805 // up-to-date copy of the block will now be in the
806 // cache above. It will also prevent this cache from
807 // responding (if the block was previously dirty) to
808 // snoops as they should snoop the caches above where
809 // they will get the response from.
810 invalidateBlock(blk
);
812 // not a cache fill, just forwarding response
813 // responseLatency is the latency of the return path
814 // from lower level cahces/memory to the core.
815 completion_time
+= clockEdge(responseLatency
) +
820 assert(pkt
->matchAddr(tgt_pkt
));
821 assert(pkt
->getSize() >= tgt_pkt
->getSize());
823 tgt_pkt
->setData(pkt
->getConstPtr
<uint8_t>());
825 // MSHR targets can read data either from the
826 // block or the response pkt. If we can't get data
827 // from the block (i.e., invalid or has old data)
828 // or the response (did not bring in any data)
829 // then make sure that the target didn't expect
831 assert(!tgt_pkt
->hasRespData());
835 // this response did not allocate here and therefore
836 // it was not consumed, make sure that any flags are
837 // carried over to cache above
838 tgt_pkt
->copyResponderFlags(pkt
);
840 tgt_pkt
->makeTimingResponse();
841 // if this packet is an error copy that to the new packet
843 tgt_pkt
->copyError(pkt
);
844 if (tgt_pkt
->cmd
== MemCmd::ReadResp
&&
845 (is_invalidate
|| mshr
->hasPostInvalidate())) {
846 // If intermediate cache got ReadRespWithInvalidate,
847 // propagate that. Response should not have
848 // isInvalidate() set otherwise.
849 tgt_pkt
->cmd
= MemCmd::ReadRespWithInvalidate
;
850 DPRINTF(Cache
, "%s: updated cmd to %s\n", __func__
,
853 // Reset the bus additional time as it is now accounted for
854 tgt_pkt
->headerDelay
= tgt_pkt
->payloadDelay
= 0;
855 cpuSidePort
.schedTimingResp(tgt_pkt
, completion_time
);
858 case MSHR::Target::FromPrefetcher
:
859 assert(tgt_pkt
->cmd
== MemCmd::HardPFReq
);
861 blk
->status
|= BlkHWPrefetched
;
865 case MSHR::Target::FromSnoop
:
866 // I don't believe that a snoop can be in an error state
868 // response to snoop request
869 DPRINTF(Cache
, "processing deferred snoop...\n");
870 // If the response is invalidating, a snooping target can
871 // be satisfied if it is also invalidating. If the reponse is, not
872 // only invalidating, but more specifically an InvalidateResp and
873 // the MSHR was created due to an InvalidateReq then a cache above
874 // is waiting to satisfy a WriteLineReq. In this case even an
875 // non-invalidating snoop is added as a target here since this is
876 // the ordering point. When the InvalidateResp reaches this cache,
877 // the snooping target will snoop further the cache above with the
879 assert(!is_invalidate
|| pkt
->cmd
== MemCmd::InvalidateResp
||
880 pkt
->req
->isCacheMaintenance() ||
881 mshr
->hasPostInvalidate());
882 handleSnoop(tgt_pkt
, blk
, true, true, mshr
->hasPostInvalidate());
886 panic("Illegal target->source enum %d\n", target
.source
);
890 maintainClusivity(targets
.hasFromCache
, blk
);
892 if (blk
&& blk
->isValid()) {
893 // an invalidate response stemming from a write line request
894 // should not invalidate the block, so check if the
895 // invalidation should be discarded
896 if (is_invalidate
|| mshr
->hasPostInvalidate()) {
897 invalidateBlock(blk
);
898 } else if (mshr
->hasPostDowngrade()) {
899 blk
->status
&= ~BlkWritable
;
905 Cache::evictBlock(CacheBlk
*blk
)
907 PacketPtr pkt
= (blk
->isDirty() || writebackClean
) ?
908 writebackBlk(blk
) : cleanEvictBlk(blk
);
910 invalidateBlock(blk
);
916 Cache::cleanEvictBlk(CacheBlk
*blk
)
918 assert(!writebackClean
);
919 assert(blk
&& blk
->isValid() && !blk
->isDirty());
921 // Creating a zero sized write, a message to the snoop filter
922 RequestPtr req
= std::make_shared
<Request
>(
923 regenerateBlkAddr(blk
), blkSize
, 0, Request::wbMasterId
);
926 req
->setFlags(Request::SECURE
);
928 req
->taskId(blk
->task_id
);
930 PacketPtr pkt
= new Packet(req
, MemCmd::CleanEvict
);
932 DPRINTF(Cache
, "Create CleanEvict %s\n", pkt
->print());
937 /////////////////////////////////////////////////////
939 // Snoop path: requests coming in from the memory side
941 /////////////////////////////////////////////////////
944 Cache::doTimingSupplyResponse(PacketPtr req_pkt
, const uint8_t *blk_data
,
945 bool already_copied
, bool pending_inval
)
948 assert(req_pkt
->isRequest());
949 assert(req_pkt
->needsResponse());
951 DPRINTF(Cache
, "%s: for %s\n", __func__
, req_pkt
->print());
952 // timing-mode snoop responses require a new packet, unless we
953 // already made a copy...
954 PacketPtr pkt
= req_pkt
;
956 // do not clear flags, and allocate space for data if the
957 // packet needs it (the only packets that carry data are read
959 pkt
= new Packet(req_pkt
, false, req_pkt
->isRead());
961 assert(req_pkt
->req
->isUncacheable() || req_pkt
->isInvalidate() ||
963 pkt
->makeTimingResponse();
965 pkt
->setDataFromBlock(blk_data
, blkSize
);
967 if (pkt
->cmd
== MemCmd::ReadResp
&& pending_inval
) {
968 // Assume we defer a response to a read from a far-away cache
969 // A, then later defer a ReadExcl from a cache B on the same
970 // bus as us. We'll assert cacheResponding in both cases, but
971 // in the latter case cacheResponding will keep the
972 // invalidation from reaching cache A. This special response
973 // tells cache A that it gets the block to satisfy its read,
974 // but must immediately invalidate it.
975 pkt
->cmd
= MemCmd::ReadRespWithInvalidate
;
977 // Here we consider forward_time, paying for just forward latency and
978 // also charging the delay provided by the xbar.
979 // forward_time is used as send_time in next allocateWriteBuffer().
980 Tick forward_time
= clockEdge(forwardLatency
) + pkt
->headerDelay
;
981 // Here we reset the timing of the packet.
982 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
983 DPRINTF(CacheVerbose
, "%s: created response: %s tick: %lu\n", __func__
,
984 pkt
->print(), forward_time
);
985 memSidePort
.schedTimingSnoopResp(pkt
, forward_time
);
989 Cache::handleSnoop(PacketPtr pkt
, CacheBlk
*blk
, bool is_timing
,
990 bool is_deferred
, bool pending_inval
)
992 DPRINTF(CacheVerbose
, "%s: for %s\n", __func__
, pkt
->print());
993 // deferred snoops can only happen in timing mode
994 assert(!(is_deferred
&& !is_timing
));
995 // pending_inval only makes sense on deferred snoops
996 assert(!(pending_inval
&& !is_deferred
));
997 assert(pkt
->isRequest());
999 // the packet may get modified if we or a forwarded snooper
1000 // responds in atomic mode, so remember a few things about the
1001 // original packet up front
1002 bool invalidate
= pkt
->isInvalidate();
1003 bool M5_VAR_USED needs_writable
= pkt
->needsWritable();
1005 // at the moment we could get an uncacheable write which does not
1006 // have the invalidate flag, and we need a suitable way of dealing
1008 panic_if(invalidate
&& pkt
->req
->isUncacheable(),
1009 "%s got an invalidating uncacheable snoop request %s",
1010 name(), pkt
->print());
1012 uint32_t snoop_delay
= 0;
1014 if (forwardSnoops
) {
1015 // first propagate snoop upward to see if anyone above us wants to
1016 // handle it. save & restore packet src since it will get
1017 // rewritten to be relative to cpu-side bus (if any)
1019 // copy the packet so that we can clear any flags before
1020 // forwarding it upwards, we also allocate data (passing
1021 // the pointer along in case of static data), in case
1022 // there is a snoop hit in upper levels
1023 Packet
snoopPkt(pkt
, true, true);
1024 snoopPkt
.setExpressSnoop();
1025 // the snoop packet does not need to wait any additional
1027 snoopPkt
.headerDelay
= snoopPkt
.payloadDelay
= 0;
1028 cpuSidePort
.sendTimingSnoopReq(&snoopPkt
);
1030 // add the header delay (including crossbar and snoop
1031 // delays) of the upward snoop to the snoop delay for this
1033 snoop_delay
+= snoopPkt
.headerDelay
;
1035 // If this request is a prefetch or clean evict and an upper level
1036 // signals block present, make sure to propagate the block
1037 // presence to the requester.
1038 if (snoopPkt
.isBlockCached()) {
1039 pkt
->setBlockCached();
1041 // If the request was satisfied by snooping the cache
1042 // above, mark the original packet as satisfied too.
1043 if (snoopPkt
.satisfied()) {
1044 pkt
->setSatisfied();
1047 // Copy over flags from the snoop response to make sure we
1048 // inform the final destination
1049 pkt
->copyResponderFlags(&snoopPkt
);
1051 bool already_responded
= pkt
->cacheResponding();
1052 cpuSidePort
.sendAtomicSnoop(pkt
);
1053 if (!already_responded
&& pkt
->cacheResponding()) {
1054 // cache-to-cache response from some upper cache:
1055 // forward response to original requester
1056 assert(pkt
->isResponse());
1061 bool respond
= false;
1062 bool blk_valid
= blk
&& blk
->isValid();
1063 if (pkt
->isClean()) {
1064 if (blk_valid
&& blk
->isDirty()) {
1065 DPRINTF(CacheVerbose
, "%s: packet (snoop) %s found block: %s\n",
1066 __func__
, pkt
->print(), blk
->print());
1067 PacketPtr wb_pkt
= writecleanBlk(blk
, pkt
->req
->getDest(), pkt
->id
);
1068 PacketList writebacks
;
1069 writebacks
.push_back(wb_pkt
);
1072 // anything that is merely forwarded pays for the forward
1073 // latency and the delay provided by the crossbar
1074 Tick forward_time
= clockEdge(forwardLatency
) +
1076 doWritebacks(writebacks
, forward_time
);
1078 doWritebacksAtomic(writebacks
);
1080 pkt
->setSatisfied();
1082 } else if (!blk_valid
) {
1083 DPRINTF(CacheVerbose
, "%s: snoop miss for %s\n", __func__
,
1086 // we no longer have the block, and will not respond, but a
1087 // packet was allocated in MSHR::handleSnoop and we have
1089 assert(pkt
->needsResponse());
1091 // we have passed the block to a cache upstream, that
1092 // cache should be responding
1093 assert(pkt
->cacheResponding());
1099 DPRINTF(Cache
, "%s: snoop hit for %s, old state is %s\n", __func__
,
1100 pkt
->print(), blk
->print());
1102 // We may end up modifying both the block state and the packet (if
1103 // we respond in atomic mode), so just figure out what to do now
1104 // and then do it later. We respond to all snoops that need
1105 // responses provided we have the block in dirty state. The
1106 // invalidation itself is taken care of below. We don't respond to
1107 // cache maintenance operations as this is done by the destination
1109 respond
= blk
->isDirty() && pkt
->needsResponse();
1111 chatty_assert(!(isReadOnly
&& blk
->isDirty()), "Should never have "
1112 "a dirty block in a read-only cache %s\n", name());
1115 // Invalidate any prefetch's from below that would strip write permissions
1116 // MemCmd::HardPFReq is only observed by upstream caches. After missing
1117 // above and in it's own cache, a new MemCmd::ReadReq is created that
1118 // downstream caches observe.
1119 if (pkt
->mustCheckAbove()) {
1120 DPRINTF(Cache
, "Found addr %#llx in upper level cache for snoop %s "
1121 "from lower cache\n", pkt
->getAddr(), pkt
->print());
1122 pkt
->setBlockCached();
1126 if (pkt
->isRead() && !invalidate
) {
1127 // reading without requiring the line in a writable state
1128 assert(!needs_writable
);
1129 pkt
->setHasSharers();
1131 // if the requesting packet is uncacheable, retain the line in
1132 // the current state, otherwhise unset the writable flag,
1133 // which means we go from Modified to Owned (and will respond
1134 // below), remain in Owned (and will respond below), from
1135 // Exclusive to Shared, or remain in Shared
1136 if (!pkt
->req
->isUncacheable())
1137 blk
->status
&= ~BlkWritable
;
1138 DPRINTF(Cache
, "new state is %s\n", blk
->print());
1142 // prevent anyone else from responding, cache as well as
1143 // memory, and also prevent any memory from even seeing the
1145 pkt
->setCacheResponding();
1146 if (!pkt
->isClean() && blk
->isWritable()) {
1147 // inform the cache hierarchy that this cache had the line
1148 // in the Modified state so that we avoid unnecessary
1149 // invalidations (see Packet::setResponderHadWritable)
1150 pkt
->setResponderHadWritable();
1152 // in the case of an uncacheable request there is no point
1153 // in setting the responderHadWritable flag, but since the
1154 // recipient does not care there is no harm in doing so
1156 // if the packet has needsWritable set we invalidate our
1157 // copy below and all other copies will be invalidates
1158 // through express snoops, and if needsWritable is not set
1159 // we already called setHasSharers above
1162 // if we are returning a writable and dirty (Modified) line,
1163 // we should be invalidating the line
1164 panic_if(!invalidate
&& !pkt
->hasSharers(),
1165 "%s is passing a Modified line through %s, "
1166 "but keeping the block", name(), pkt
->print());
1169 doTimingSupplyResponse(pkt
, blk
->data
, is_deferred
, pending_inval
);
1171 pkt
->makeAtomicResponse();
1172 // packets such as upgrades do not actually have any data
1175 pkt
->setDataFromBlock(blk
->data
, blkSize
);
1178 // When a block is compressed, it must first be decompressed before
1179 // being read, and this increases the snoop delay.
1180 if (compressor
&& pkt
->isRead()) {
1181 snoop_delay
+= compressor
->getDecompressionLatency(blk
);
1185 if (!respond
&& is_deferred
) {
1186 assert(pkt
->needsResponse());
1190 // Do this last in case it deallocates block data or something
1192 if (blk_valid
&& invalidate
) {
1193 invalidateBlock(blk
);
1194 DPRINTF(Cache
, "new state is %s\n", blk
->print());
1202 Cache::recvTimingSnoopReq(PacketPtr pkt
)
1204 DPRINTF(CacheVerbose
, "%s: for %s\n", __func__
, pkt
->print());
1206 // no need to snoop requests that are not in range
1207 if (!inRange(pkt
->getAddr())) {
1211 bool is_secure
= pkt
->isSecure();
1212 CacheBlk
*blk
= tags
->findBlock(pkt
->getAddr(), is_secure
);
1214 Addr blk_addr
= pkt
->getBlockAddr(blkSize
);
1215 MSHR
*mshr
= mshrQueue
.findMatch(blk_addr
, is_secure
);
1217 // Update the latency cost of the snoop so that the crossbar can
1218 // account for it. Do not overwrite what other neighbouring caches
1219 // have already done, rather take the maximum. The update is
1220 // tentative, for cases where we return before an upward snoop
1222 pkt
->snoopDelay
= std::max
<uint32_t>(pkt
->snoopDelay
,
1223 lookupLatency
* clockPeriod());
1225 // Inform request(Prefetch, CleanEvict or Writeback) from below of
1226 // MSHR hit, set setBlockCached.
1227 if (mshr
&& pkt
->mustCheckAbove()) {
1228 DPRINTF(Cache
, "Setting block cached for %s from lower cache on "
1229 "mshr hit\n", pkt
->print());
1230 pkt
->setBlockCached();
1234 // Let the MSHR itself track the snoop and decide whether we want
1235 // to go ahead and do the regular cache snoop
1236 if (mshr
&& mshr
->handleSnoop(pkt
, order
++)) {
1237 DPRINTF(Cache
, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1238 "mshrs: %s\n", blk_addr
, is_secure
? "s" : "ns",
1241 if (mshr
->getNumTargets() > numTarget
)
1242 warn("allocating bonus target for snoop"); //handle later
1246 //We also need to check the writeback buffers and handle those
1247 WriteQueueEntry
*wb_entry
= writeBuffer
.findMatch(blk_addr
, is_secure
);
1249 DPRINTF(Cache
, "Snoop hit in writeback to addr %#llx (%s)\n",
1250 pkt
->getAddr(), is_secure
? "s" : "ns");
1251 // Expect to see only Writebacks and/or CleanEvicts here, both of
1252 // which should not be generated for uncacheable data.
1253 assert(!wb_entry
->isUncacheable());
1254 // There should only be a single request responsible for generating
1255 // Writebacks/CleanEvicts.
1256 assert(wb_entry
->getNumTargets() == 1);
1257 PacketPtr wb_pkt
= wb_entry
->getTarget()->pkt
;
1258 assert(wb_pkt
->isEviction() || wb_pkt
->cmd
== MemCmd::WriteClean
);
1260 if (pkt
->isEviction()) {
1261 // if the block is found in the write queue, set the BLOCK_CACHED
1262 // flag for Writeback/CleanEvict snoop. On return the snoop will
1263 // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1264 // any CleanEvicts from travelling down the memory hierarchy.
1265 pkt
->setBlockCached();
1266 DPRINTF(Cache
, "%s: Squashing %s from lower cache on writequeue "
1267 "hit\n", __func__
, pkt
->print());
1271 // conceptually writebacks are no different to other blocks in
1272 // this cache, so the behaviour is modelled after handleSnoop,
1273 // the difference being that instead of querying the block
1274 // state to determine if it is dirty and writable, we use the
1275 // command and fields of the writeback packet
1276 bool respond
= wb_pkt
->cmd
== MemCmd::WritebackDirty
&&
1277 pkt
->needsResponse();
1278 bool have_writable
= !wb_pkt
->hasSharers();
1279 bool invalidate
= pkt
->isInvalidate();
1281 if (!pkt
->req
->isUncacheable() && pkt
->isRead() && !invalidate
) {
1282 assert(!pkt
->needsWritable());
1283 pkt
->setHasSharers();
1284 wb_pkt
->setHasSharers();
1288 pkt
->setCacheResponding();
1290 if (have_writable
) {
1291 pkt
->setResponderHadWritable();
1294 doTimingSupplyResponse(pkt
, wb_pkt
->getConstPtr
<uint8_t>(),
1298 if (invalidate
&& wb_pkt
->cmd
!= MemCmd::WriteClean
) {
1299 // Invalidation trumps our writeback... discard here
1300 // Note: markInService will remove entry from writeback buffer.
1301 markInService(wb_entry
);
1306 // If this was a shared writeback, there may still be
1307 // other shared copies above that require invalidation.
1308 // We could be more selective and return here if the
1309 // request is non-exclusive or if the writeback is
1311 uint32_t snoop_delay
= handleSnoop(pkt
, blk
, true, false, false);
1313 // Override what we did when we first saw the snoop, as we now
1314 // also have the cost of the upwards snoops to account for
1315 pkt
->snoopDelay
= std::max
<uint32_t>(pkt
->snoopDelay
, snoop_delay
+
1316 lookupLatency
* clockPeriod());
1320 Cache::recvAtomicSnoop(PacketPtr pkt
)
1322 // no need to snoop requests that are not in range.
1323 if (!inRange(pkt
->getAddr())) {
1327 CacheBlk
*blk
= tags
->findBlock(pkt
->getAddr(), pkt
->isSecure());
1328 uint32_t snoop_delay
= handleSnoop(pkt
, blk
, false, false, false);
1329 return snoop_delay
+ lookupLatency
* clockPeriod();
1333 Cache::isCachedAbove(PacketPtr pkt
, bool is_timing
)
1337 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1338 // Writeback snoops into upper level caches to check for copies of the
1339 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1340 // packet, the cache can inform the crossbar below of presence or absence
1343 Packet
snoop_pkt(pkt
, true, false);
1344 snoop_pkt
.setExpressSnoop();
1345 // Assert that packet is either Writeback or CleanEvict and not a
1346 // prefetch request because prefetch requests need an MSHR and may
1347 // generate a snoop response.
1348 assert(pkt
->isEviction() || pkt
->cmd
== MemCmd::WriteClean
);
1349 snoop_pkt
.senderState
= nullptr;
1350 cpuSidePort
.sendTimingSnoopReq(&snoop_pkt
);
1351 // Writeback/CleanEvict snoops do not generate a snoop response.
1352 assert(!(snoop_pkt
.cacheResponding()));
1353 return snoop_pkt
.isBlockCached();
1355 cpuSidePort
.sendAtomicSnoop(pkt
);
1356 return pkt
->isBlockCached();
1361 Cache::sendMSHRQueuePacket(MSHR
* mshr
)
1365 // use request from 1st target
1366 PacketPtr tgt_pkt
= mshr
->getTarget()->pkt
;
1368 if (tgt_pkt
->cmd
== MemCmd::HardPFReq
&& forwardSnoops
) {
1369 DPRINTF(Cache
, "%s: MSHR %s\n", __func__
, tgt_pkt
->print());
1371 // we should never have hardware prefetches to allocated
1373 assert(!tags
->findBlock(mshr
->blkAddr
, mshr
->isSecure
));
1375 // We need to check the caches above us to verify that
1376 // they don't have a copy of this block in the dirty state
1377 // at the moment. Without this check we could get a stale
1378 // copy from memory that might get used in place of the
1380 Packet
snoop_pkt(tgt_pkt
, true, false);
1381 snoop_pkt
.setExpressSnoop();
1382 // We are sending this packet upwards, but if it hits we will
1383 // get a snoop response that we end up treating just like a
1384 // normal response, hence it needs the MSHR as its sender
1386 snoop_pkt
.senderState
= mshr
;
1387 cpuSidePort
.sendTimingSnoopReq(&snoop_pkt
);
1389 // Check to see if the prefetch was squashed by an upper cache (to
1390 // prevent us from grabbing the line) or if a Check to see if a
1391 // writeback arrived between the time the prefetch was placed in
1392 // the MSHRs and when it was selected to be sent or if the
1393 // prefetch was squashed by an upper cache.
1395 // It is important to check cacheResponding before
1396 // prefetchSquashed. If another cache has committed to
1397 // responding, it will be sending a dirty response which will
1398 // arrive at the MSHR allocated for this request. Checking the
1399 // prefetchSquash first may result in the MSHR being
1400 // prematurely deallocated.
1401 if (snoop_pkt
.cacheResponding()) {
1402 auto M5_VAR_USED r
= outstandingSnoop
.insert(snoop_pkt
.req
);
1405 // if we are getting a snoop response with no sharers it
1406 // will be allocated as Modified
1407 bool pending_modified_resp
= !snoop_pkt
.hasSharers();
1408 markInService(mshr
, pending_modified_resp
);
1410 DPRINTF(Cache
, "Upward snoop of prefetch for addr"
1412 tgt_pkt
->getAddr(), tgt_pkt
->isSecure()? "s": "ns");
1416 if (snoop_pkt
.isBlockCached()) {
1417 DPRINTF(Cache
, "Block present, prefetch squashed by cache. "
1418 "Deallocating mshr target %#x.\n",
1421 // Deallocate the mshr target
1422 if (mshrQueue
.forceDeallocateTarget(mshr
)) {
1423 // Clear block if this deallocation resulted freed an
1424 // mshr when all had previously been utilized
1425 clearBlocked(Blocked_NoMSHRs
);
1428 // given that no response is expected, delete Request and Packet
1435 return BaseCache::sendMSHRQueuePacket(mshr
);
1439 CacheParams::create()
1442 assert(replacement_policy
);
1444 return new Cache(this);