mem-cache: Implement FPC-D cache compression
[gem5.git] / src / mem / cache / cache.cc
1 /*
2 * Copyright (c) 2010-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010,2015 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 * Andreas Sandberg
47 * Nikos Nikoleris
48 */
49
50 /**
51 * @file
52 * Cache definitions.
53 */
54
55 #include "mem/cache/cache.hh"
56
57 #include <cassert>
58
59 #include "base/compiler.hh"
60 #include "base/logging.hh"
61 #include "base/trace.hh"
62 #include "base/types.hh"
63 #include "debug/Cache.hh"
64 #include "debug/CacheTags.hh"
65 #include "debug/CacheVerbose.hh"
66 #include "enums/Clusivity.hh"
67 #include "mem/cache/cache_blk.hh"
68 #include "mem/cache/mshr.hh"
69 #include "mem/cache/tags/base.hh"
70 #include "mem/cache/write_queue_entry.hh"
71 #include "mem/request.hh"
72 #include "params/Cache.hh"
73
74 Cache::Cache(const CacheParams *p)
75 : BaseCache(p, p->system->cacheLineSize()),
76 doFastWrites(true)
77 {
78 }
79
80 void
81 Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
82 bool deferred_response, bool pending_downgrade)
83 {
84 BaseCache::satisfyRequest(pkt, blk);
85
86 if (pkt->isRead()) {
87 // determine if this read is from a (coherent) cache or not
88 if (pkt->fromCache()) {
89 assert(pkt->getSize() == blkSize);
90 // special handling for coherent block requests from
91 // upper-level caches
92 if (pkt->needsWritable()) {
93 // sanity check
94 assert(pkt->cmd == MemCmd::ReadExReq ||
95 pkt->cmd == MemCmd::SCUpgradeFailReq);
96 assert(!pkt->hasSharers());
97
98 // if we have a dirty copy, make sure the recipient
99 // keeps it marked dirty (in the modified state)
100 if (blk->isDirty()) {
101 pkt->setCacheResponding();
102 blk->status &= ~BlkDirty;
103 }
104 } else if (blk->isWritable() && !pending_downgrade &&
105 !pkt->hasSharers() &&
106 pkt->cmd != MemCmd::ReadCleanReq) {
107 // we can give the requester a writable copy on a read
108 // request if:
109 // - we have a writable copy at this level (& below)
110 // - we don't have a pending snoop from below
111 // signaling another read request
112 // - no other cache above has a copy (otherwise it
113 // would have set hasSharers flag when
114 // snooping the packet)
115 // - the read has explicitly asked for a clean
116 // copy of the line
117 if (blk->isDirty()) {
118 // special considerations if we're owner:
119 if (!deferred_response) {
120 // respond with the line in Modified state
121 // (cacheResponding set, hasSharers not set)
122 pkt->setCacheResponding();
123
124 // if this cache is mostly inclusive, we
125 // keep the block in the Exclusive state,
126 // and pass it upwards as Modified
127 // (writable and dirty), hence we have
128 // multiple caches, all on the same path
129 // towards memory, all considering the
130 // same block writable, but only one
131 // considering it Modified
132
133 // we get away with multiple caches (on
134 // the same path to memory) considering
135 // the block writeable as we always enter
136 // the cache hierarchy through a cache,
137 // and first snoop upwards in all other
138 // branches
139 blk->status &= ~BlkDirty;
140 } else {
141 // if we're responding after our own miss,
142 // there's a window where the recipient didn't
143 // know it was getting ownership and may not
144 // have responded to snoops correctly, so we
145 // have to respond with a shared line
146 pkt->setHasSharers();
147 }
148 }
149 } else {
150 // otherwise only respond with a shared copy
151 pkt->setHasSharers();
152 }
153 }
154 }
155 }
156
157 /////////////////////////////////////////////////////
158 //
159 // Access path: requests coming in from the CPU side
160 //
161 /////////////////////////////////////////////////////
162
163 bool
164 Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
165 PacketList &writebacks)
166 {
167
168 if (pkt->req->isUncacheable()) {
169 assert(pkt->isRequest());
170
171 chatty_assert(!(isReadOnly && pkt->isWrite()),
172 "Should never see a write in a read-only cache %s\n",
173 name());
174
175 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
176
177 // flush and invalidate any existing block
178 CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
179 if (old_blk && old_blk->isValid()) {
180 BaseCache::evictBlock(old_blk, writebacks);
181 }
182
183 blk = nullptr;
184 // lookupLatency is the latency in case the request is uncacheable.
185 lat = lookupLatency;
186 return false;
187 }
188
189 return BaseCache::access(pkt, blk, lat, writebacks);
190 }
191
192 void
193 Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
194 {
195 while (!writebacks.empty()) {
196 PacketPtr wbPkt = writebacks.front();
197 // We use forwardLatency here because we are copying writebacks to
198 // write buffer.
199
200 // Call isCachedAbove for Writebacks, CleanEvicts and
201 // WriteCleans to discover if the block is cached above.
202 if (isCachedAbove(wbPkt)) {
203 if (wbPkt->cmd == MemCmd::CleanEvict) {
204 // Delete CleanEvict because cached copies exist above. The
205 // packet destructor will delete the request object because
206 // this is a non-snoop request packet which does not require a
207 // response.
208 delete wbPkt;
209 } else if (wbPkt->cmd == MemCmd::WritebackClean) {
210 // clean writeback, do not send since the block is
211 // still cached above
212 assert(writebackClean);
213 delete wbPkt;
214 } else {
215 assert(wbPkt->cmd == MemCmd::WritebackDirty ||
216 wbPkt->cmd == MemCmd::WriteClean);
217 // Set BLOCK_CACHED flag in Writeback and send below, so that
218 // the Writeback does not reset the bit corresponding to this
219 // address in the snoop filter below.
220 wbPkt->setBlockCached();
221 allocateWriteBuffer(wbPkt, forward_time);
222 }
223 } else {
224 // If the block is not cached above, send packet below. Both
225 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
226 // reset the bit corresponding to this address in the snoop filter
227 // below.
228 allocateWriteBuffer(wbPkt, forward_time);
229 }
230 writebacks.pop_front();
231 }
232 }
233
234 void
235 Cache::doWritebacksAtomic(PacketList& writebacks)
236 {
237 while (!writebacks.empty()) {
238 PacketPtr wbPkt = writebacks.front();
239 // Call isCachedAbove for both Writebacks and CleanEvicts. If
240 // isCachedAbove returns true we set BLOCK_CACHED flag in Writebacks
241 // and discard CleanEvicts.
242 if (isCachedAbove(wbPkt, false)) {
243 if (wbPkt->cmd == MemCmd::WritebackDirty ||
244 wbPkt->cmd == MemCmd::WriteClean) {
245 // Set BLOCK_CACHED flag in Writeback and send below,
246 // so that the Writeback does not reset the bit
247 // corresponding to this address in the snoop filter
248 // below. We can discard CleanEvicts because cached
249 // copies exist above. Atomic mode isCachedAbove
250 // modifies packet to set BLOCK_CACHED flag
251 memSidePort.sendAtomic(wbPkt);
252 }
253 } else {
254 // If the block is not cached above, send packet below. Both
255 // CleanEvict and Writeback with BLOCK_CACHED flag cleared will
256 // reset the bit corresponding to this address in the snoop filter
257 // below.
258 memSidePort.sendAtomic(wbPkt);
259 }
260 writebacks.pop_front();
261 // In case of CleanEvicts, the packet destructor will delete the
262 // request object because this is a non-snoop request packet which
263 // does not require a response.
264 delete wbPkt;
265 }
266 }
267
268
269 void
270 Cache::recvTimingSnoopResp(PacketPtr pkt)
271 {
272 DPRINTF(Cache, "%s for %s\n", __func__, pkt->print());
273
274 // determine if the response is from a snoop request we created
275 // (in which case it should be in the outstandingSnoop), or if we
276 // merely forwarded someone else's snoop request
277 const bool forwardAsSnoop = outstandingSnoop.find(pkt->req) ==
278 outstandingSnoop.end();
279
280 if (!forwardAsSnoop) {
281 // the packet came from this cache, so sink it here and do not
282 // forward it
283 assert(pkt->cmd == MemCmd::HardPFResp);
284
285 outstandingSnoop.erase(pkt->req);
286
287 DPRINTF(Cache, "Got prefetch response from above for addr "
288 "%#llx (%s)\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
289 recvTimingResp(pkt);
290 return;
291 }
292
293 // forwardLatency is set here because there is a response from an
294 // upper level cache.
295 // To pay the delay that occurs if the packet comes from the bus,
296 // we charge also headerDelay.
297 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
298 // Reset the timing of the packet.
299 pkt->headerDelay = pkt->payloadDelay = 0;
300 memSidePort.schedTimingSnoopResp(pkt, snoop_resp_time);
301 }
302
303 void
304 Cache::promoteWholeLineWrites(PacketPtr pkt)
305 {
306 // Cache line clearing instructions
307 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
308 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) &&
309 !pkt->isMaskedWrite()) {
310 pkt->cmd = MemCmd::WriteLineReq;
311 DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
312 }
313 }
314
315 void
316 Cache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
317 {
318 // should never be satisfying an uncacheable access as we
319 // flush and invalidate any existing block as part of the
320 // lookup
321 assert(!pkt->req->isUncacheable());
322
323 BaseCache::handleTimingReqHit(pkt, blk, request_time);
324 }
325
326 void
327 Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
328 Tick request_time)
329 {
330 if (pkt->req->isUncacheable()) {
331 // ignore any existing MSHR if we are dealing with an
332 // uncacheable request
333
334 // should have flushed and have no valid block
335 assert(!blk || !blk->isValid());
336
337 stats.cmdStats(pkt).mshr_uncacheable[pkt->req->masterId()]++;
338
339 if (pkt->isWrite()) {
340 allocateWriteBuffer(pkt, forward_time);
341 } else {
342 assert(pkt->isRead());
343
344 // uncacheable accesses always allocate a new MSHR
345
346 // Here we are using forward_time, modelling the latency of
347 // a miss (outbound) just as forwardLatency, neglecting the
348 // lookupLatency component.
349 allocateMissBuffer(pkt, forward_time);
350 }
351
352 return;
353 }
354
355 Addr blk_addr = pkt->getBlockAddr(blkSize);
356
357 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
358
359 // Software prefetch handling:
360 // To keep the core from waiting on data it won't look at
361 // anyway, send back a response with dummy data. Miss handling
362 // will continue asynchronously. Unfortunately, the core will
363 // insist upon freeing original Packet/Request, so we have to
364 // create a new pair with a different lifecycle. Note that this
365 // processing happens before any MSHR munging on the behalf of
366 // this request because this new Request will be the one stored
367 // into the MSHRs, not the original.
368 if (pkt->cmd.isSWPrefetch()) {
369 assert(pkt->needsResponse());
370 assert(pkt->req->hasPaddr());
371 assert(!pkt->req->isUncacheable());
372
373 // There's no reason to add a prefetch as an additional target
374 // to an existing MSHR. If an outstanding request is already
375 // in progress, there is nothing for the prefetch to do.
376 // If this is the case, we don't even create a request at all.
377 PacketPtr pf = nullptr;
378
379 if (!mshr) {
380 // copy the request and create a new SoftPFReq packet
381 RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
382 pkt->req->getSize(),
383 pkt->req->getFlags(),
384 pkt->req->masterId());
385 pf = new Packet(req, pkt->cmd);
386 pf->allocate();
387 assert(pf->matchAddr(pkt));
388 assert(pf->getSize() == pkt->getSize());
389 }
390
391 pkt->makeTimingResponse();
392
393 // request_time is used here, taking into account lat and the delay
394 // charged if the packet comes from the xbar.
395 cpuSidePort.schedTimingResp(pkt, request_time);
396
397 // If an outstanding request is in progress (we found an
398 // MSHR) this is set to null
399 pkt = pf;
400 }
401
402 BaseCache::handleTimingReqMiss(pkt, mshr, blk, forward_time, request_time);
403 }
404
405 void
406 Cache::recvTimingReq(PacketPtr pkt)
407 {
408 DPRINTF(CacheTags, "%s tags:\n%s\n", __func__, tags->print());
409
410 promoteWholeLineWrites(pkt);
411
412 if (pkt->cacheResponding()) {
413 // a cache above us (but not where the packet came from) is
414 // responding to the request, in other words it has the line
415 // in Modified or Owned state
416 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
417 pkt->print());
418
419 // if the packet needs the block to be writable, and the cache
420 // that has promised to respond (setting the cache responding
421 // flag) is not providing writable (it is in Owned rather than
422 // the Modified state), we know that there may be other Shared
423 // copies in the system; go out and invalidate them all
424 assert(pkt->needsWritable() && !pkt->responderHadWritable());
425
426 // an upstream cache that had the line in Owned state
427 // (dirty, but not writable), is responding and thus
428 // transferring the dirty line from one branch of the
429 // cache hierarchy to another
430
431 // send out an express snoop and invalidate all other
432 // copies (snooping a packet that needs writable is the
433 // same as an invalidation), thus turning the Owned line
434 // into a Modified line, note that we don't invalidate the
435 // block in the current cache or any other cache on the
436 // path to memory
437
438 // create a downstream express snoop with cleared packet
439 // flags, there is no need to allocate any data as the
440 // packet is merely used to co-ordinate state transitions
441 Packet *snoop_pkt = new Packet(pkt, true, false);
442
443 // also reset the bus time that the original packet has
444 // not yet paid for
445 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
446
447 // make this an instantaneous express snoop, and let the
448 // other caches in the system know that the another cache
449 // is responding, because we have found the authorative
450 // copy (Modified or Owned) that will supply the right
451 // data
452 snoop_pkt->setExpressSnoop();
453 snoop_pkt->setCacheResponding();
454
455 // this express snoop travels towards the memory, and at
456 // every crossbar it is snooped upwards thus reaching
457 // every cache in the system
458 bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
459 // express snoops always succeed
460 assert(success);
461
462 // main memory will delete the snoop packet
463
464 // queue for deletion, as opposed to immediate deletion, as
465 // the sending cache is still relying on the packet
466 pendingDelete.reset(pkt);
467
468 // no need to take any further action in this particular cache
469 // as an upstram cache has already committed to responding,
470 // and we have already sent out any express snoops in the
471 // section above to ensure all other copies in the system are
472 // invalidated
473 return;
474 }
475
476 BaseCache::recvTimingReq(pkt);
477 }
478
479 PacketPtr
480 Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
481 bool needsWritable,
482 bool is_whole_line_write) const
483 {
484 // should never see evictions here
485 assert(!cpu_pkt->isEviction());
486
487 bool blkValid = blk && blk->isValid();
488
489 if (cpu_pkt->req->isUncacheable() ||
490 (!blkValid && cpu_pkt->isUpgrade()) ||
491 cpu_pkt->cmd == MemCmd::InvalidateReq || cpu_pkt->isClean()) {
492 // uncacheable requests and upgrades from upper-level caches
493 // that missed completely just go through as is
494 return nullptr;
495 }
496
497 assert(cpu_pkt->needsResponse());
498
499 MemCmd cmd;
500 // @TODO make useUpgrades a parameter.
501 // Note that ownership protocols require upgrade, otherwise a
502 // write miss on a shared owned block will generate a ReadExcl,
503 // which will clobber the owned copy.
504 const bool useUpgrades = true;
505 assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write);
506 if (is_whole_line_write) {
507 assert(!blkValid || !blk->isWritable());
508 // forward as invalidate to all other caches, this gives us
509 // the line in Exclusive state, and invalidates all other
510 // copies
511 cmd = MemCmd::InvalidateReq;
512 } else if (blkValid && useUpgrades) {
513 // only reason to be here is that blk is read only and we need
514 // it to be writable
515 assert(needsWritable);
516 assert(!blk->isWritable());
517 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
518 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
519 cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
520 // Even though this SC will fail, we still need to send out the
521 // request and get the data to supply it to other snoopers in the case
522 // where the determination the StoreCond fails is delayed due to
523 // all caches not being on the same local bus.
524 cmd = MemCmd::SCUpgradeFailReq;
525 } else {
526 // block is invalid
527
528 // If the request does not need a writable there are two cases
529 // where we need to ensure the response will not fetch the
530 // block in dirty state:
531 // * this cache is read only and it does not perform
532 // writebacks,
533 // * this cache is mostly exclusive and will not fill (since
534 // it does not fill it will have to writeback the dirty data
535 // immediately which generates uneccesary writebacks).
536 bool force_clean_rsp = isReadOnly || clusivity == Enums::mostly_excl;
537 cmd = needsWritable ? MemCmd::ReadExReq :
538 (force_clean_rsp ? MemCmd::ReadCleanReq : MemCmd::ReadSharedReq);
539 }
540 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
541
542 // if there are upstream caches that have already marked the
543 // packet as having sharers (not passing writable), pass that info
544 // downstream
545 if (cpu_pkt->hasSharers() && !needsWritable) {
546 // note that cpu_pkt may have spent a considerable time in the
547 // MSHR queue and that the information could possibly be out
548 // of date, however, there is no harm in conservatively
549 // assuming the block has sharers
550 pkt->setHasSharers();
551 DPRINTF(Cache, "%s: passing hasSharers from %s to %s\n",
552 __func__, cpu_pkt->print(), pkt->print());
553 }
554
555 // the packet should be block aligned
556 assert(pkt->getAddr() == pkt->getBlockAddr(blkSize));
557
558 pkt->allocate();
559 DPRINTF(Cache, "%s: created %s from %s\n", __func__, pkt->print(),
560 cpu_pkt->print());
561 return pkt;
562 }
563
564
565 Cycles
566 Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
567 PacketList &writebacks)
568 {
569 // deal with the packets that go through the write path of
570 // the cache, i.e. any evictions and writes
571 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
572 (pkt->req->isUncacheable() && pkt->isWrite())) {
573 Cycles latency = ticksToCycles(memSidePort.sendAtomic(pkt));
574
575 // at this point, if the request was an uncacheable write
576 // request, it has been satisfied by a memory below and the
577 // packet carries the response back
578 assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
579 pkt->isResponse());
580
581 return latency;
582 }
583
584 // only misses left
585
586 PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable(),
587 pkt->isWholeLineWrite(blkSize));
588
589 bool is_forward = (bus_pkt == nullptr);
590
591 if (is_forward) {
592 // just forwarding the same request to the next level
593 // no local cache operation involved
594 bus_pkt = pkt;
595 }
596
597 DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
598 bus_pkt->print());
599
600 #if TRACING_ON
601 CacheBlk::State old_state = blk ? blk->status : 0;
602 #endif
603
604 Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
605
606 bool is_invalidate = bus_pkt->isInvalidate();
607
608 // We are now dealing with the response handling
609 DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
610 bus_pkt->print(), old_state);
611
612 // If packet was a forward, the response (if any) is already
613 // in place in the bus_pkt == pkt structure, so we don't need
614 // to do anything. Otherwise, use the separate bus_pkt to
615 // generate response to pkt and then delete it.
616 if (!is_forward) {
617 if (pkt->needsResponse()) {
618 assert(bus_pkt->isResponse());
619 if (bus_pkt->isError()) {
620 pkt->makeAtomicResponse();
621 pkt->copyError(bus_pkt);
622 } else if (pkt->isWholeLineWrite(blkSize)) {
623 // note the use of pkt, not bus_pkt here.
624
625 // write-line request to the cache that promoted
626 // the write to a whole line
627 const bool allocate = allocOnFill(pkt->cmd) &&
628 (!writeAllocator || writeAllocator->allocate());
629 blk = handleFill(bus_pkt, blk, writebacks, allocate);
630 assert(blk != NULL);
631 is_invalidate = false;
632 satisfyRequest(pkt, blk);
633 } else if (bus_pkt->isRead() ||
634 bus_pkt->cmd == MemCmd::UpgradeResp) {
635 // we're updating cache state to allow us to
636 // satisfy the upstream request from the cache
637 blk = handleFill(bus_pkt, blk, writebacks,
638 allocOnFill(pkt->cmd));
639 satisfyRequest(pkt, blk);
640 maintainClusivity(pkt->fromCache(), blk);
641 } else {
642 // we're satisfying the upstream request without
643 // modifying cache state, e.g., a write-through
644 pkt->makeAtomicResponse();
645 }
646 }
647 delete bus_pkt;
648 }
649
650 if (is_invalidate && blk && blk->isValid()) {
651 invalidateBlock(blk);
652 }
653
654 return latency;
655 }
656
657 Tick
658 Cache::recvAtomic(PacketPtr pkt)
659 {
660 promoteWholeLineWrites(pkt);
661
662 // follow the same flow as in recvTimingReq, and check if a cache
663 // above us is responding
664 if (pkt->cacheResponding()) {
665 assert(!pkt->req->isCacheInvalidate());
666 DPRINTF(Cache, "Cache above responding to %s: not responding\n",
667 pkt->print());
668
669 // if a cache is responding, and it had the line in Owned
670 // rather than Modified state, we need to invalidate any
671 // copies that are not on the same path to memory
672 assert(pkt->needsWritable() && !pkt->responderHadWritable());
673
674 return memSidePort.sendAtomic(pkt);
675 }
676
677 return BaseCache::recvAtomic(pkt);
678 }
679
680
681 /////////////////////////////////////////////////////
682 //
683 // Response handling: responses from the memory side
684 //
685 /////////////////////////////////////////////////////
686
687
688 void
689 Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk)
690 {
691 QueueEntry::Target *initial_tgt = mshr->getTarget();
692 // First offset for critical word first calculations
693 const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
694
695 const bool is_error = pkt->isError();
696 // allow invalidation responses originating from write-line
697 // requests to be discarded
698 bool is_invalidate = pkt->isInvalidate() &&
699 !mshr->wasWholeLineWrite;
700
701 MSHR::TargetList targets = mshr->extractServiceableTargets(pkt);
702 for (auto &target: targets) {
703 Packet *tgt_pkt = target.pkt;
704 switch (target.source) {
705 case MSHR::Target::FromCPU:
706 Tick completion_time;
707 // Here we charge on completion_time the delay of the xbar if the
708 // packet comes from it, charged on headerDelay.
709 completion_time = pkt->headerDelay;
710
711 // Software prefetch handling for cache closest to core
712 if (tgt_pkt->cmd.isSWPrefetch()) {
713 if (tgt_pkt->needsWritable()) {
714 // All other copies of the block were invalidated and we
715 // have an exclusive copy.
716
717 // The coherence protocol assumes that if we fetched an
718 // exclusive copy of the block, we have the intention to
719 // modify it. Therefore the MSHR for the PrefetchExReq has
720 // been the point of ordering and this cache has commited
721 // to respond to snoops for the block.
722 //
723 // In most cases this is true anyway - a PrefetchExReq
724 // will be followed by a WriteReq. However, if that
725 // doesn't happen, the block is not marked as dirty and
726 // the cache doesn't respond to snoops that has committed
727 // to do so.
728 //
729 // To avoid deadlocks in cases where there is a snoop
730 // between the PrefetchExReq and the expected WriteReq, we
731 // proactively mark the block as Dirty.
732 assert(blk);
733 blk->status |= BlkDirty;
734
735 panic_if(isReadOnly, "Prefetch exclusive requests from "
736 "read-only cache %s\n", name());
737 }
738
739 // a software prefetch would have already been ack'd
740 // immediately with dummy data so the core would be able to
741 // retire it. This request completes right here, so we
742 // deallocate it.
743 delete tgt_pkt;
744 break; // skip response
745 }
746
747 // unlike the other packet flows, where data is found in other
748 // caches or memory and brought back, write-line requests always
749 // have the data right away, so the above check for "is fill?"
750 // cannot actually be determined until examining the stored MSHR
751 // state. We "catch up" with that logic here, which is duplicated
752 // from above.
753 if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
754 assert(!is_error);
755 assert(blk);
756 assert(blk->isWritable());
757 }
758
759 if (blk && blk->isValid() && !mshr->isForward) {
760 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
761
762 // How many bytes past the first request is this one
763 int transfer_offset =
764 tgt_pkt->getOffset(blkSize) - initial_offset;
765 if (transfer_offset < 0) {
766 transfer_offset += blkSize;
767 }
768
769 // If not critical word (offset) return payloadDelay.
770 // responseLatency is the latency of the return path
771 // from lower level caches/memory to an upper level cache or
772 // the core.
773 completion_time += clockEdge(responseLatency) +
774 (transfer_offset ? pkt->payloadDelay : 0);
775
776 assert(!tgt_pkt->req->isUncacheable());
777
778 assert(tgt_pkt->req->masterId() < system->maxMasters());
779 stats.cmdStats(tgt_pkt)
780 .missLatency[tgt_pkt->req->masterId()] +=
781 completion_time - target.recvTime;
782 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
783 // failed StoreCond upgrade
784 assert(tgt_pkt->cmd == MemCmd::StoreCondReq ||
785 tgt_pkt->cmd == MemCmd::StoreCondFailReq ||
786 tgt_pkt->cmd == MemCmd::SCUpgradeFailReq);
787 // responseLatency is the latency of the return path
788 // from lower level caches/memory to an upper level cache or
789 // the core.
790 completion_time += clockEdge(responseLatency) +
791 pkt->payloadDelay;
792 tgt_pkt->req->setExtraData(0);
793 } else {
794 // We are about to send a response to a cache above
795 // that asked for an invalidation; we need to
796 // invalidate our copy immediately as the most
797 // up-to-date copy of the block will now be in the
798 // cache above. It will also prevent this cache from
799 // responding (if the block was previously dirty) to
800 // snoops as they should snoop the caches above where
801 // they will get the response from.
802 if (is_invalidate && blk && blk->isValid()) {
803 invalidateBlock(blk);
804 }
805 // not a cache fill, just forwarding response
806 // responseLatency is the latency of the return path
807 // from lower level cahces/memory to the core.
808 completion_time += clockEdge(responseLatency) +
809 pkt->payloadDelay;
810 if (pkt->isRead() && !is_error) {
811 // sanity check
812 assert(pkt->matchAddr(tgt_pkt));
813 assert(pkt->getSize() >= tgt_pkt->getSize());
814
815 tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
816 }
817
818 // this response did not allocate here and therefore
819 // it was not consumed, make sure that any flags are
820 // carried over to cache above
821 tgt_pkt->copyResponderFlags(pkt);
822 }
823 tgt_pkt->makeTimingResponse();
824 // if this packet is an error copy that to the new packet
825 if (is_error)
826 tgt_pkt->copyError(pkt);
827 if (tgt_pkt->cmd == MemCmd::ReadResp &&
828 (is_invalidate || mshr->hasPostInvalidate())) {
829 // If intermediate cache got ReadRespWithInvalidate,
830 // propagate that. Response should not have
831 // isInvalidate() set otherwise.
832 tgt_pkt->cmd = MemCmd::ReadRespWithInvalidate;
833 DPRINTF(Cache, "%s: updated cmd to %s\n", __func__,
834 tgt_pkt->print());
835 }
836 // Reset the bus additional time as it is now accounted for
837 tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
838 cpuSidePort.schedTimingResp(tgt_pkt, completion_time);
839 break;
840
841 case MSHR::Target::FromPrefetcher:
842 assert(tgt_pkt->cmd == MemCmd::HardPFReq);
843 if (blk)
844 blk->status |= BlkHWPrefetched;
845 delete tgt_pkt;
846 break;
847
848 case MSHR::Target::FromSnoop:
849 // I don't believe that a snoop can be in an error state
850 assert(!is_error);
851 // response to snoop request
852 DPRINTF(Cache, "processing deferred snoop...\n");
853 // If the response is invalidating, a snooping target can
854 // be satisfied if it is also invalidating. If the reponse is, not
855 // only invalidating, but more specifically an InvalidateResp and
856 // the MSHR was created due to an InvalidateReq then a cache above
857 // is waiting to satisfy a WriteLineReq. In this case even an
858 // non-invalidating snoop is added as a target here since this is
859 // the ordering point. When the InvalidateResp reaches this cache,
860 // the snooping target will snoop further the cache above with the
861 // WriteLineReq.
862 assert(!is_invalidate || pkt->cmd == MemCmd::InvalidateResp ||
863 pkt->req->isCacheMaintenance() ||
864 mshr->hasPostInvalidate());
865 handleSnoop(tgt_pkt, blk, true, true, mshr->hasPostInvalidate());
866 break;
867
868 default:
869 panic("Illegal target->source enum %d\n", target.source);
870 }
871 }
872
873 maintainClusivity(targets.hasFromCache, blk);
874
875 if (blk && blk->isValid()) {
876 // an invalidate response stemming from a write line request
877 // should not invalidate the block, so check if the
878 // invalidation should be discarded
879 if (is_invalidate || mshr->hasPostInvalidate()) {
880 invalidateBlock(blk);
881 } else if (mshr->hasPostDowngrade()) {
882 blk->status &= ~BlkWritable;
883 }
884 }
885 }
886
887 PacketPtr
888 Cache::evictBlock(CacheBlk *blk)
889 {
890 PacketPtr pkt = (blk->isDirty() || writebackClean) ?
891 writebackBlk(blk) : cleanEvictBlk(blk);
892
893 invalidateBlock(blk);
894
895 return pkt;
896 }
897
898 PacketPtr
899 Cache::cleanEvictBlk(CacheBlk *blk)
900 {
901 assert(!writebackClean);
902 assert(blk && blk->isValid() && !blk->isDirty());
903
904 // Creating a zero sized write, a message to the snoop filter
905 RequestPtr req = std::make_shared<Request>(
906 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
907
908 if (blk->isSecure())
909 req->setFlags(Request::SECURE);
910
911 req->taskId(blk->task_id);
912
913 PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
914 pkt->allocate();
915 DPRINTF(Cache, "Create CleanEvict %s\n", pkt->print());
916
917 return pkt;
918 }
919
920 /////////////////////////////////////////////////////
921 //
922 // Snoop path: requests coming in from the memory side
923 //
924 /////////////////////////////////////////////////////
925
926 void
927 Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
928 bool already_copied, bool pending_inval)
929 {
930 // sanity check
931 assert(req_pkt->isRequest());
932 assert(req_pkt->needsResponse());
933
934 DPRINTF(Cache, "%s: for %s\n", __func__, req_pkt->print());
935 // timing-mode snoop responses require a new packet, unless we
936 // already made a copy...
937 PacketPtr pkt = req_pkt;
938 if (!already_copied)
939 // do not clear flags, and allocate space for data if the
940 // packet needs it (the only packets that carry data are read
941 // responses)
942 pkt = new Packet(req_pkt, false, req_pkt->isRead());
943
944 assert(req_pkt->req->isUncacheable() || req_pkt->isInvalidate() ||
945 pkt->hasSharers());
946 pkt->makeTimingResponse();
947 if (pkt->isRead()) {
948 pkt->setDataFromBlock(blk_data, blkSize);
949 }
950 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
951 // Assume we defer a response to a read from a far-away cache
952 // A, then later defer a ReadExcl from a cache B on the same
953 // bus as us. We'll assert cacheResponding in both cases, but
954 // in the latter case cacheResponding will keep the
955 // invalidation from reaching cache A. This special response
956 // tells cache A that it gets the block to satisfy its read,
957 // but must immediately invalidate it.
958 pkt->cmd = MemCmd::ReadRespWithInvalidate;
959 }
960 // Here we consider forward_time, paying for just forward latency and
961 // also charging the delay provided by the xbar.
962 // forward_time is used as send_time in next allocateWriteBuffer().
963 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
964 // Here we reset the timing of the packet.
965 pkt->headerDelay = pkt->payloadDelay = 0;
966 DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
967 pkt->print(), forward_time);
968 memSidePort.schedTimingSnoopResp(pkt, forward_time);
969 }
970
971 uint32_t
972 Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
973 bool is_deferred, bool pending_inval)
974 {
975 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
976 // deferred snoops can only happen in timing mode
977 assert(!(is_deferred && !is_timing));
978 // pending_inval only makes sense on deferred snoops
979 assert(!(pending_inval && !is_deferred));
980 assert(pkt->isRequest());
981
982 // the packet may get modified if we or a forwarded snooper
983 // responds in atomic mode, so remember a few things about the
984 // original packet up front
985 bool invalidate = pkt->isInvalidate();
986 bool M5_VAR_USED needs_writable = pkt->needsWritable();
987
988 // at the moment we could get an uncacheable write which does not
989 // have the invalidate flag, and we need a suitable way of dealing
990 // with this case
991 panic_if(invalidate && pkt->req->isUncacheable(),
992 "%s got an invalidating uncacheable snoop request %s",
993 name(), pkt->print());
994
995 uint32_t snoop_delay = 0;
996
997 if (forwardSnoops) {
998 // first propagate snoop upward to see if anyone above us wants to
999 // handle it. save & restore packet src since it will get
1000 // rewritten to be relative to cpu-side bus (if any)
1001 if (is_timing) {
1002 // copy the packet so that we can clear any flags before
1003 // forwarding it upwards, we also allocate data (passing
1004 // the pointer along in case of static data), in case
1005 // there is a snoop hit in upper levels
1006 Packet snoopPkt(pkt, true, true);
1007 snoopPkt.setExpressSnoop();
1008 // the snoop packet does not need to wait any additional
1009 // time
1010 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1011 cpuSidePort.sendTimingSnoopReq(&snoopPkt);
1012
1013 // add the header delay (including crossbar and snoop
1014 // delays) of the upward snoop to the snoop delay for this
1015 // cache
1016 snoop_delay += snoopPkt.headerDelay;
1017
1018 // If this request is a prefetch or clean evict and an upper level
1019 // signals block present, make sure to propagate the block
1020 // presence to the requester.
1021 if (snoopPkt.isBlockCached()) {
1022 pkt->setBlockCached();
1023 }
1024 // If the request was satisfied by snooping the cache
1025 // above, mark the original packet as satisfied too.
1026 if (snoopPkt.satisfied()) {
1027 pkt->setSatisfied();
1028 }
1029
1030 // Copy over flags from the snoop response to make sure we
1031 // inform the final destination
1032 pkt->copyResponderFlags(&snoopPkt);
1033 } else {
1034 bool already_responded = pkt->cacheResponding();
1035 cpuSidePort.sendAtomicSnoop(pkt);
1036 if (!already_responded && pkt->cacheResponding()) {
1037 // cache-to-cache response from some upper cache:
1038 // forward response to original requester
1039 assert(pkt->isResponse());
1040 }
1041 }
1042 }
1043
1044 bool respond = false;
1045 bool blk_valid = blk && blk->isValid();
1046 if (pkt->isClean()) {
1047 if (blk_valid && blk->isDirty()) {
1048 DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
1049 __func__, pkt->print(), blk->print());
1050 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
1051 PacketList writebacks;
1052 writebacks.push_back(wb_pkt);
1053
1054 if (is_timing) {
1055 // anything that is merely forwarded pays for the forward
1056 // latency and the delay provided by the crossbar
1057 Tick forward_time = clockEdge(forwardLatency) +
1058 pkt->headerDelay;
1059 doWritebacks(writebacks, forward_time);
1060 } else {
1061 doWritebacksAtomic(writebacks);
1062 }
1063 pkt->setSatisfied();
1064 }
1065 } else if (!blk_valid) {
1066 DPRINTF(CacheVerbose, "%s: snoop miss for %s\n", __func__,
1067 pkt->print());
1068 if (is_deferred) {
1069 // we no longer have the block, and will not respond, but a
1070 // packet was allocated in MSHR::handleSnoop and we have
1071 // to delete it
1072 assert(pkt->needsResponse());
1073
1074 // we have passed the block to a cache upstream, that
1075 // cache should be responding
1076 assert(pkt->cacheResponding());
1077
1078 delete pkt;
1079 }
1080 return snoop_delay;
1081 } else {
1082 DPRINTF(Cache, "%s: snoop hit for %s, old state is %s\n", __func__,
1083 pkt->print(), blk->print());
1084
1085 // We may end up modifying both the block state and the packet (if
1086 // we respond in atomic mode), so just figure out what to do now
1087 // and then do it later. We respond to all snoops that need
1088 // responses provided we have the block in dirty state. The
1089 // invalidation itself is taken care of below. We don't respond to
1090 // cache maintenance operations as this is done by the destination
1091 // xbar.
1092 respond = blk->isDirty() && pkt->needsResponse();
1093
1094 chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
1095 "a dirty block in a read-only cache %s\n", name());
1096 }
1097
1098 // Invalidate any prefetch's from below that would strip write permissions
1099 // MemCmd::HardPFReq is only observed by upstream caches. After missing
1100 // above and in it's own cache, a new MemCmd::ReadReq is created that
1101 // downstream caches observe.
1102 if (pkt->mustCheckAbove()) {
1103 DPRINTF(Cache, "Found addr %#llx in upper level cache for snoop %s "
1104 "from lower cache\n", pkt->getAddr(), pkt->print());
1105 pkt->setBlockCached();
1106 return snoop_delay;
1107 }
1108
1109 if (pkt->isRead() && !invalidate) {
1110 // reading without requiring the line in a writable state
1111 assert(!needs_writable);
1112 pkt->setHasSharers();
1113
1114 // if the requesting packet is uncacheable, retain the line in
1115 // the current state, otherwhise unset the writable flag,
1116 // which means we go from Modified to Owned (and will respond
1117 // below), remain in Owned (and will respond below), from
1118 // Exclusive to Shared, or remain in Shared
1119 if (!pkt->req->isUncacheable())
1120 blk->status &= ~BlkWritable;
1121 DPRINTF(Cache, "new state is %s\n", blk->print());
1122 }
1123
1124 if (respond) {
1125 // prevent anyone else from responding, cache as well as
1126 // memory, and also prevent any memory from even seeing the
1127 // request
1128 pkt->setCacheResponding();
1129 if (!pkt->isClean() && blk->isWritable()) {
1130 // inform the cache hierarchy that this cache had the line
1131 // in the Modified state so that we avoid unnecessary
1132 // invalidations (see Packet::setResponderHadWritable)
1133 pkt->setResponderHadWritable();
1134
1135 // in the case of an uncacheable request there is no point
1136 // in setting the responderHadWritable flag, but since the
1137 // recipient does not care there is no harm in doing so
1138 } else {
1139 // if the packet has needsWritable set we invalidate our
1140 // copy below and all other copies will be invalidates
1141 // through express snoops, and if needsWritable is not set
1142 // we already called setHasSharers above
1143 }
1144
1145 // if we are returning a writable and dirty (Modified) line,
1146 // we should be invalidating the line
1147 panic_if(!invalidate && !pkt->hasSharers(),
1148 "%s is passing a Modified line through %s, "
1149 "but keeping the block", name(), pkt->print());
1150
1151 if (is_timing) {
1152 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1153 } else {
1154 pkt->makeAtomicResponse();
1155 // packets such as upgrades do not actually have any data
1156 // payload
1157 if (pkt->hasData())
1158 pkt->setDataFromBlock(blk->data, blkSize);
1159 }
1160
1161 // When a block is compressed, it must first be decompressed before
1162 // being read, and this increases the snoop delay.
1163 if (compressor && pkt->isRead()) {
1164 snoop_delay += compressor->getDecompressionLatency(blk);
1165 }
1166 }
1167
1168 if (!respond && is_deferred) {
1169 assert(pkt->needsResponse());
1170 delete pkt;
1171 }
1172
1173 // Do this last in case it deallocates block data or something
1174 // like that
1175 if (blk_valid && invalidate) {
1176 invalidateBlock(blk);
1177 DPRINTF(Cache, "new state is %s\n", blk->print());
1178 }
1179
1180 return snoop_delay;
1181 }
1182
1183
1184 void
1185 Cache::recvTimingSnoopReq(PacketPtr pkt)
1186 {
1187 DPRINTF(CacheVerbose, "%s: for %s\n", __func__, pkt->print());
1188
1189 // no need to snoop requests that are not in range
1190 if (!inRange(pkt->getAddr())) {
1191 return;
1192 }
1193
1194 bool is_secure = pkt->isSecure();
1195 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
1196
1197 Addr blk_addr = pkt->getBlockAddr(blkSize);
1198 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1199
1200 // Update the latency cost of the snoop so that the crossbar can
1201 // account for it. Do not overwrite what other neighbouring caches
1202 // have already done, rather take the maximum. The update is
1203 // tentative, for cases where we return before an upward snoop
1204 // happens below.
1205 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay,
1206 lookupLatency * clockPeriod());
1207
1208 // Inform request(Prefetch, CleanEvict or Writeback) from below of
1209 // MSHR hit, set setBlockCached.
1210 if (mshr && pkt->mustCheckAbove()) {
1211 DPRINTF(Cache, "Setting block cached for %s from lower cache on "
1212 "mshr hit\n", pkt->print());
1213 pkt->setBlockCached();
1214 return;
1215 }
1216
1217 // Let the MSHR itself track the snoop and decide whether we want
1218 // to go ahead and do the regular cache snoop
1219 if (mshr && mshr->handleSnoop(pkt, order++)) {
1220 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %#llx (%s)."
1221 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1222 mshr->print());
1223
1224 if (mshr->getNumTargets() > numTarget)
1225 warn("allocating bonus target for snoop"); //handle later
1226 return;
1227 }
1228
1229 //We also need to check the writeback buffers and handle those
1230 WriteQueueEntry *wb_entry = writeBuffer.findMatch(blk_addr, is_secure);
1231 if (wb_entry) {
1232 DPRINTF(Cache, "Snoop hit in writeback to addr %#llx (%s)\n",
1233 pkt->getAddr(), is_secure ? "s" : "ns");
1234 // Expect to see only Writebacks and/or CleanEvicts here, both of
1235 // which should not be generated for uncacheable data.
1236 assert(!wb_entry->isUncacheable());
1237 // There should only be a single request responsible for generating
1238 // Writebacks/CleanEvicts.
1239 assert(wb_entry->getNumTargets() == 1);
1240 PacketPtr wb_pkt = wb_entry->getTarget()->pkt;
1241 assert(wb_pkt->isEviction() || wb_pkt->cmd == MemCmd::WriteClean);
1242
1243 if (pkt->isEviction()) {
1244 // if the block is found in the write queue, set the BLOCK_CACHED
1245 // flag for Writeback/CleanEvict snoop. On return the snoop will
1246 // propagate the BLOCK_CACHED flag in Writeback packets and prevent
1247 // any CleanEvicts from travelling down the memory hierarchy.
1248 pkt->setBlockCached();
1249 DPRINTF(Cache, "%s: Squashing %s from lower cache on writequeue "
1250 "hit\n", __func__, pkt->print());
1251 return;
1252 }
1253
1254 // conceptually writebacks are no different to other blocks in
1255 // this cache, so the behaviour is modelled after handleSnoop,
1256 // the difference being that instead of querying the block
1257 // state to determine if it is dirty and writable, we use the
1258 // command and fields of the writeback packet
1259 bool respond = wb_pkt->cmd == MemCmd::WritebackDirty &&
1260 pkt->needsResponse();
1261 bool have_writable = !wb_pkt->hasSharers();
1262 bool invalidate = pkt->isInvalidate();
1263
1264 if (!pkt->req->isUncacheable() && pkt->isRead() && !invalidate) {
1265 assert(!pkt->needsWritable());
1266 pkt->setHasSharers();
1267 wb_pkt->setHasSharers();
1268 }
1269
1270 if (respond) {
1271 pkt->setCacheResponding();
1272
1273 if (have_writable) {
1274 pkt->setResponderHadWritable();
1275 }
1276
1277 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1278 false, false);
1279 }
1280
1281 if (invalidate && wb_pkt->cmd != MemCmd::WriteClean) {
1282 // Invalidation trumps our writeback... discard here
1283 // Note: markInService will remove entry from writeback buffer.
1284 markInService(wb_entry);
1285 delete wb_pkt;
1286 }
1287 }
1288
1289 // If this was a shared writeback, there may still be
1290 // other shared copies above that require invalidation.
1291 // We could be more selective and return here if the
1292 // request is non-exclusive or if the writeback is
1293 // exclusive.
1294 uint32_t snoop_delay = handleSnoop(pkt, blk, true, false, false);
1295
1296 // Override what we did when we first saw the snoop, as we now
1297 // also have the cost of the upwards snoops to account for
1298 pkt->snoopDelay = std::max<uint32_t>(pkt->snoopDelay, snoop_delay +
1299 lookupLatency * clockPeriod());
1300 }
1301
1302 Tick
1303 Cache::recvAtomicSnoop(PacketPtr pkt)
1304 {
1305 // no need to snoop requests that are not in range.
1306 if (!inRange(pkt->getAddr())) {
1307 return 0;
1308 }
1309
1310 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1311 uint32_t snoop_delay = handleSnoop(pkt, blk, false, false, false);
1312 return snoop_delay + lookupLatency * clockPeriod();
1313 }
1314
1315 bool
1316 Cache::isCachedAbove(PacketPtr pkt, bool is_timing)
1317 {
1318 if (!forwardSnoops)
1319 return false;
1320 // Mirroring the flow of HardPFReqs, the cache sends CleanEvict and
1321 // Writeback snoops into upper level caches to check for copies of the
1322 // same block. Using the BLOCK_CACHED flag with the Writeback/CleanEvict
1323 // packet, the cache can inform the crossbar below of presence or absence
1324 // of the block.
1325 if (is_timing) {
1326 Packet snoop_pkt(pkt, true, false);
1327 snoop_pkt.setExpressSnoop();
1328 // Assert that packet is either Writeback or CleanEvict and not a
1329 // prefetch request because prefetch requests need an MSHR and may
1330 // generate a snoop response.
1331 assert(pkt->isEviction() || pkt->cmd == MemCmd::WriteClean);
1332 snoop_pkt.senderState = nullptr;
1333 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1334 // Writeback/CleanEvict snoops do not generate a snoop response.
1335 assert(!(snoop_pkt.cacheResponding()));
1336 return snoop_pkt.isBlockCached();
1337 } else {
1338 cpuSidePort.sendAtomicSnoop(pkt);
1339 return pkt->isBlockCached();
1340 }
1341 }
1342
1343 bool
1344 Cache::sendMSHRQueuePacket(MSHR* mshr)
1345 {
1346 assert(mshr);
1347
1348 // use request from 1st target
1349 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1350
1351 if (tgt_pkt->cmd == MemCmd::HardPFReq && forwardSnoops) {
1352 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1353
1354 // we should never have hardware prefetches to allocated
1355 // blocks
1356 assert(!tags->findBlock(mshr->blkAddr, mshr->isSecure));
1357
1358 // We need to check the caches above us to verify that
1359 // they don't have a copy of this block in the dirty state
1360 // at the moment. Without this check we could get a stale
1361 // copy from memory that might get used in place of the
1362 // dirty one.
1363 Packet snoop_pkt(tgt_pkt, true, false);
1364 snoop_pkt.setExpressSnoop();
1365 // We are sending this packet upwards, but if it hits we will
1366 // get a snoop response that we end up treating just like a
1367 // normal response, hence it needs the MSHR as its sender
1368 // state
1369 snoop_pkt.senderState = mshr;
1370 cpuSidePort.sendTimingSnoopReq(&snoop_pkt);
1371
1372 // Check to see if the prefetch was squashed by an upper cache (to
1373 // prevent us from grabbing the line) or if a Check to see if a
1374 // writeback arrived between the time the prefetch was placed in
1375 // the MSHRs and when it was selected to be sent or if the
1376 // prefetch was squashed by an upper cache.
1377
1378 // It is important to check cacheResponding before
1379 // prefetchSquashed. If another cache has committed to
1380 // responding, it will be sending a dirty response which will
1381 // arrive at the MSHR allocated for this request. Checking the
1382 // prefetchSquash first may result in the MSHR being
1383 // prematurely deallocated.
1384 if (snoop_pkt.cacheResponding()) {
1385 auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
1386 assert(r.second);
1387
1388 // if we are getting a snoop response with no sharers it
1389 // will be allocated as Modified
1390 bool pending_modified_resp = !snoop_pkt.hasSharers();
1391 markInService(mshr, pending_modified_resp);
1392
1393 DPRINTF(Cache, "Upward snoop of prefetch for addr"
1394 " %#x (%s) hit\n",
1395 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
1396 return false;
1397 }
1398
1399 if (snoop_pkt.isBlockCached()) {
1400 DPRINTF(Cache, "Block present, prefetch squashed by cache. "
1401 "Deallocating mshr target %#x.\n",
1402 mshr->blkAddr);
1403
1404 // Deallocate the mshr target
1405 if (mshrQueue.forceDeallocateTarget(mshr)) {
1406 // Clear block if this deallocation resulted freed an
1407 // mshr when all had previously been utilized
1408 clearBlocked(Blocked_NoMSHRs);
1409 }
1410
1411 // given that no response is expected, delete Request and Packet
1412 delete tgt_pkt;
1413
1414 return false;
1415 }
1416 }
1417
1418 return BaseCache::sendMSHRQueuePacket(mshr);
1419 }
1420
1421 Cache*
1422 CacheParams::create()
1423 {
1424 assert(tags);
1425 assert(replacement_policy);
1426
1427 return new Cache(this);
1428 }