mem-cache: Make findVictim non-const
[gem5.git] / src / mem / cache / base.cc
1 /*
2 * Copyright (c) 2012-2013, 2018-2019 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Erik Hallnor
41 * Nikos Nikoleris
42 */
43
44 /**
45 * @file
46 * Definition of BaseCache functions.
47 */
48
49 #include "mem/cache/base.hh"
50
51 #include "base/compiler.hh"
52 #include "base/logging.hh"
53 #include "debug/Cache.hh"
54 #include "debug/CacheComp.hh"
55 #include "debug/CachePort.hh"
56 #include "debug/CacheRepl.hh"
57 #include "debug/CacheVerbose.hh"
58 #include "mem/cache/compressors/base.hh"
59 #include "mem/cache/mshr.hh"
60 #include "mem/cache/prefetch/base.hh"
61 #include "mem/cache/queue_entry.hh"
62 #include "mem/cache/tags/super_blk.hh"
63 #include "params/BaseCache.hh"
64 #include "params/WriteAllocator.hh"
65 #include "sim/core.hh"
66
67 using namespace std;
68
69 BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
70 BaseCache *_cache,
71 const std::string &_label)
72 : QueuedSlavePort(_name, _cache, queue),
73 queue(*_cache, *this, true, _label),
74 blocked(false), mustSendRetry(false),
75 sendRetryEvent([this]{ processSendRetry(); }, _name)
76 {
77 }
78
79 BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
80 : ClockedObject(p),
81 cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
82 memSidePort(p->name + ".mem_side", this, "MemSidePort"),
83 mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
84 writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
85 tags(p->tags),
86 compressor(p->compressor),
87 prefetcher(p->prefetcher),
88 writeAllocator(p->write_allocator),
89 writebackClean(p->writeback_clean),
90 tempBlockWriteback(nullptr),
91 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
92 name(), false,
93 EventBase::Delayed_Writeback_Pri),
94 blkSize(blk_size),
95 lookupLatency(p->tag_latency),
96 dataLatency(p->data_latency),
97 forwardLatency(p->tag_latency),
98 fillLatency(p->data_latency),
99 responseLatency(p->response_latency),
100 sequentialAccess(p->sequential_access),
101 numTarget(p->tgts_per_mshr),
102 forwardSnoops(true),
103 clusivity(p->clusivity),
104 isReadOnly(p->is_read_only),
105 blocked(0),
106 order(0),
107 noTargetMSHR(nullptr),
108 missCount(p->max_miss_count),
109 addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
110 system(p->system),
111 stats(*this)
112 {
113 // the MSHR queue has no reserve entries as we check the MSHR
114 // queue on every single allocation, whereas the write queue has
115 // as many reserve entries as we have MSHRs, since every MSHR may
116 // eventually require a writeback, and we do not check the write
117 // buffer before committing to an MSHR
118
119 // forward snoops is overridden in init() once we can query
120 // whether the connected master is actually snooping or not
121
122 tempBlock = new TempCacheBlk(blkSize);
123
124 tags->tagsInit();
125 if (prefetcher)
126 prefetcher->setCache(this);
127 }
128
129 BaseCache::~BaseCache()
130 {
131 delete tempBlock;
132 }
133
134 void
135 BaseCache::CacheSlavePort::setBlocked()
136 {
137 assert(!blocked);
138 DPRINTF(CachePort, "Port is blocking new requests\n");
139 blocked = true;
140 // if we already scheduled a retry in this cycle, but it has not yet
141 // happened, cancel it
142 if (sendRetryEvent.scheduled()) {
143 owner.deschedule(sendRetryEvent);
144 DPRINTF(CachePort, "Port descheduled retry\n");
145 mustSendRetry = true;
146 }
147 }
148
149 void
150 BaseCache::CacheSlavePort::clearBlocked()
151 {
152 assert(blocked);
153 DPRINTF(CachePort, "Port is accepting new requests\n");
154 blocked = false;
155 if (mustSendRetry) {
156 // @TODO: need to find a better time (next cycle?)
157 owner.schedule(sendRetryEvent, curTick() + 1);
158 }
159 }
160
161 void
162 BaseCache::CacheSlavePort::processSendRetry()
163 {
164 DPRINTF(CachePort, "Port is sending retry\n");
165
166 // reset the flag and call retry
167 mustSendRetry = false;
168 sendRetryReq();
169 }
170
171 Addr
172 BaseCache::regenerateBlkAddr(CacheBlk* blk)
173 {
174 if (blk != tempBlock) {
175 return tags->regenerateBlkAddr(blk);
176 } else {
177 return tempBlock->getAddr();
178 }
179 }
180
181 void
182 BaseCache::init()
183 {
184 if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
185 fatal("Cache ports on %s are not connected\n", name());
186 cpuSidePort.sendRangeChange();
187 forwardSnoops = cpuSidePort.isSnooping();
188 }
189
190 Port &
191 BaseCache::getPort(const std::string &if_name, PortID idx)
192 {
193 if (if_name == "mem_side") {
194 return memSidePort;
195 } else if (if_name == "cpu_side") {
196 return cpuSidePort;
197 } else {
198 return ClockedObject::getPort(if_name, idx);
199 }
200 }
201
202 bool
203 BaseCache::inRange(Addr addr) const
204 {
205 for (const auto& r : addrRanges) {
206 if (r.contains(addr)) {
207 return true;
208 }
209 }
210 return false;
211 }
212
213 void
214 BaseCache::handleTimingReqHit(PacketPtr pkt, CacheBlk *blk, Tick request_time)
215 {
216 if (pkt->needsResponse()) {
217 // These delays should have been consumed by now
218 assert(pkt->headerDelay == 0);
219 assert(pkt->payloadDelay == 0);
220
221 pkt->makeTimingResponse();
222
223 // In this case we are considering request_time that takes
224 // into account the delay of the xbar, if any, and just
225 // lat, neglecting responseLatency, modelling hit latency
226 // just as the value of lat overriden by access(), which calls
227 // the calculateAccessLatency() function.
228 cpuSidePort.schedTimingResp(pkt, request_time);
229 } else {
230 DPRINTF(Cache, "%s satisfied %s, no response needed\n", __func__,
231 pkt->print());
232
233 // queue the packet for deletion, as the sending cache is
234 // still relying on it; if the block is found in access(),
235 // CleanEvict and Writeback messages will be deleted
236 // here as well
237 pendingDelete.reset(pkt);
238 }
239 }
240
241 void
242 BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
243 Tick forward_time, Tick request_time)
244 {
245 if (writeAllocator &&
246 pkt && pkt->isWrite() && !pkt->req->isUncacheable()) {
247 writeAllocator->updateMode(pkt->getAddr(), pkt->getSize(),
248 pkt->getBlockAddr(blkSize));
249 }
250
251 if (mshr) {
252 /// MSHR hit
253 /// @note writebacks will be checked in getNextMSHR()
254 /// for any conflicting requests to the same block
255
256 //@todo remove hw_pf here
257
258 // Coalesce unless it was a software prefetch (see above).
259 if (pkt) {
260 assert(!pkt->isWriteback());
261 // CleanEvicts corresponding to blocks which have
262 // outstanding requests in MSHRs are simply sunk here
263 if (pkt->cmd == MemCmd::CleanEvict) {
264 pendingDelete.reset(pkt);
265 } else if (pkt->cmd == MemCmd::WriteClean) {
266 // A WriteClean should never coalesce with any
267 // outstanding cache maintenance requests.
268
269 // We use forward_time here because there is an
270 // uncached memory write, forwarded to WriteBuffer.
271 allocateWriteBuffer(pkt, forward_time);
272 } else {
273 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
274 pkt->print());
275
276 assert(pkt->req->masterId() < system->maxMasters());
277 stats.cmdStats(pkt).mshr_hits[pkt->req->masterId()]++;
278
279 // We use forward_time here because it is the same
280 // considering new targets. We have multiple
281 // requests for the same address here. It
282 // specifies the latency to allocate an internal
283 // buffer and to schedule an event to the queued
284 // port and also takes into account the additional
285 // delay of the xbar.
286 mshr->allocateTarget(pkt, forward_time, order++,
287 allocOnFill(pkt->cmd));
288 if (mshr->getNumTargets() == numTarget) {
289 noTargetMSHR = mshr;
290 setBlocked(Blocked_NoTargets);
291 // need to be careful with this... if this mshr isn't
292 // ready yet (i.e. time > curTick()), we don't want to
293 // move it ahead of mshrs that are ready
294 // mshrQueue.moveToFront(mshr);
295 }
296 }
297 }
298 } else {
299 // no MSHR
300 assert(pkt->req->masterId() < system->maxMasters());
301 stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++;
302
303 if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
304 // We use forward_time here because there is an
305 // writeback or writeclean, forwarded to WriteBuffer.
306 allocateWriteBuffer(pkt, forward_time);
307 } else {
308 if (blk && blk->isValid()) {
309 // If we have a write miss to a valid block, we
310 // need to mark the block non-readable. Otherwise
311 // if we allow reads while there's an outstanding
312 // write miss, the read could return stale data
313 // out of the cache block... a more aggressive
314 // system could detect the overlap (if any) and
315 // forward data out of the MSHRs, but we don't do
316 // that yet. Note that we do need to leave the
317 // block valid so that it stays in the cache, in
318 // case we get an upgrade response (and hence no
319 // new data) when the write miss completes.
320 // As long as CPUs do proper store/load forwarding
321 // internally, and have a sufficiently weak memory
322 // model, this is probably unnecessary, but at some
323 // point it must have seemed like we needed it...
324 assert((pkt->needsWritable() && !blk->isWritable()) ||
325 pkt->req->isCacheMaintenance());
326 blk->status &= ~BlkReadable;
327 }
328 // Here we are using forward_time, modelling the latency of
329 // a miss (outbound) just as forwardLatency, neglecting the
330 // lookupLatency component.
331 allocateMissBuffer(pkt, forward_time);
332 }
333 }
334 }
335
336 void
337 BaseCache::recvTimingReq(PacketPtr pkt)
338 {
339 // anything that is merely forwarded pays for the forward latency and
340 // the delay provided by the crossbar
341 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
342
343 Cycles lat;
344 CacheBlk *blk = nullptr;
345 bool satisfied = false;
346 {
347 PacketList writebacks;
348 // Note that lat is passed by reference here. The function
349 // access() will set the lat value.
350 satisfied = access(pkt, blk, lat, writebacks);
351
352 // After the evicted blocks are selected, they must be forwarded
353 // to the write buffer to ensure they logically precede anything
354 // happening below
355 doWritebacks(writebacks, clockEdge(lat + forwardLatency));
356 }
357
358 // Here we charge the headerDelay that takes into account the latencies
359 // of the bus, if the packet comes from it.
360 // The latency charged is just the value set by the access() function.
361 // In case of a hit we are neglecting response latency.
362 // In case of a miss we are neglecting forward latency.
363 Tick request_time = clockEdge(lat);
364 // Here we reset the timing of the packet.
365 pkt->headerDelay = pkt->payloadDelay = 0;
366
367 if (satisfied) {
368 // notify before anything else as later handleTimingReqHit might turn
369 // the packet in a response
370 ppHit->notify(pkt);
371
372 if (prefetcher && blk && blk->wasPrefetched()) {
373 blk->status &= ~BlkHWPrefetched;
374 }
375
376 handleTimingReqHit(pkt, blk, request_time);
377 } else {
378 handleTimingReqMiss(pkt, blk, forward_time, request_time);
379
380 ppMiss->notify(pkt);
381 }
382
383 if (prefetcher) {
384 // track time of availability of next prefetch, if any
385 Tick next_pf_time = prefetcher->nextPrefetchReadyTime();
386 if (next_pf_time != MaxTick) {
387 schedMemSideSendEvent(next_pf_time);
388 }
389 }
390 }
391
392 void
393 BaseCache::handleUncacheableWriteResp(PacketPtr pkt)
394 {
395 Tick completion_time = clockEdge(responseLatency) +
396 pkt->headerDelay + pkt->payloadDelay;
397
398 // Reset the bus additional time as it is now accounted for
399 pkt->headerDelay = pkt->payloadDelay = 0;
400
401 cpuSidePort.schedTimingResp(pkt, completion_time);
402 }
403
404 void
405 BaseCache::recvTimingResp(PacketPtr pkt)
406 {
407 assert(pkt->isResponse());
408
409 // all header delay should be paid for by the crossbar, unless
410 // this is a prefetch response from above
411 panic_if(pkt->headerDelay != 0 && pkt->cmd != MemCmd::HardPFResp,
412 "%s saw a non-zero packet delay\n", name());
413
414 const bool is_error = pkt->isError();
415
416 if (is_error) {
417 DPRINTF(Cache, "%s: Cache received %s with error\n", __func__,
418 pkt->print());
419 }
420
421 DPRINTF(Cache, "%s: Handling response %s\n", __func__,
422 pkt->print());
423
424 // if this is a write, we should be looking at an uncacheable
425 // write
426 if (pkt->isWrite()) {
427 assert(pkt->req->isUncacheable());
428 handleUncacheableWriteResp(pkt);
429 return;
430 }
431
432 // we have dealt with any (uncacheable) writes above, from here on
433 // we know we are dealing with an MSHR due to a miss or a prefetch
434 MSHR *mshr = dynamic_cast<MSHR*>(pkt->popSenderState());
435 assert(mshr);
436
437 if (mshr == noTargetMSHR) {
438 // we always clear at least one target
439 clearBlocked(Blocked_NoTargets);
440 noTargetMSHR = nullptr;
441 }
442
443 // Initial target is used just for stats
444 const QueueEntry::Target *initial_tgt = mshr->getTarget();
445 const Tick miss_latency = curTick() - initial_tgt->recvTime;
446 if (pkt->req->isUncacheable()) {
447 assert(pkt->req->masterId() < system->maxMasters());
448 stats.cmdStats(initial_tgt->pkt)
449 .mshr_uncacheable_lat[pkt->req->masterId()] += miss_latency;
450 } else {
451 assert(pkt->req->masterId() < system->maxMasters());
452 stats.cmdStats(initial_tgt->pkt)
453 .mshr_miss_latency[pkt->req->masterId()] += miss_latency;
454 }
455
456 PacketList writebacks;
457
458 bool is_fill = !mshr->isForward &&
459 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
460 mshr->wasWholeLineWrite);
461
462 // make sure that if the mshr was due to a whole line write then
463 // the response is an invalidation
464 assert(!mshr->wasWholeLineWrite || pkt->isInvalidate());
465
466 CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
467
468 if (is_fill && !is_error) {
469 DPRINTF(Cache, "Block for addr %#llx being updated in Cache\n",
470 pkt->getAddr());
471
472 const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
473 writeAllocator->allocate() : mshr->allocOnFill();
474 blk = handleFill(pkt, blk, writebacks, allocate);
475 assert(blk != nullptr);
476 ppFill->notify(pkt);
477 }
478
479 if (blk && blk->isValid() && pkt->isClean() && !pkt->isInvalidate()) {
480 // The block was marked not readable while there was a pending
481 // cache maintenance operation, restore its flag.
482 blk->status |= BlkReadable;
483
484 // This was a cache clean operation (without invalidate)
485 // and we have a copy of the block already. Since there
486 // is no invalidation, we can promote targets that don't
487 // require a writable copy
488 mshr->promoteReadable();
489 }
490
491 if (blk && blk->isWritable() && !pkt->req->isCacheInvalidate()) {
492 // If at this point the referenced block is writable and the
493 // response is not a cache invalidate, we promote targets that
494 // were deferred as we couldn't guarrantee a writable copy
495 mshr->promoteWritable();
496 }
497
498 serviceMSHRTargets(mshr, pkt, blk);
499
500 if (mshr->promoteDeferredTargets()) {
501 // avoid later read getting stale data while write miss is
502 // outstanding.. see comment in timingAccess()
503 if (blk) {
504 blk->status &= ~BlkReadable;
505 }
506 mshrQueue.markPending(mshr);
507 schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
508 } else {
509 // while we deallocate an mshr from the queue we still have to
510 // check the isFull condition before and after as we might
511 // have been using the reserved entries already
512 const bool was_full = mshrQueue.isFull();
513 mshrQueue.deallocate(mshr);
514 if (was_full && !mshrQueue.isFull()) {
515 clearBlocked(Blocked_NoMSHRs);
516 }
517
518 // Request the bus for a prefetch if this deallocation freed enough
519 // MSHRs for a prefetch to take place
520 if (prefetcher && mshrQueue.canPrefetch()) {
521 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
522 clockEdge());
523 if (next_pf_time != MaxTick)
524 schedMemSideSendEvent(next_pf_time);
525 }
526 }
527
528 // if we used temp block, check to see if its valid and then clear it out
529 if (blk == tempBlock && tempBlock->isValid()) {
530 evictBlock(blk, writebacks);
531 }
532
533 const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
534 // copy writebacks to write buffer
535 doWritebacks(writebacks, forward_time);
536
537 DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
538 delete pkt;
539 }
540
541
542 Tick
543 BaseCache::recvAtomic(PacketPtr pkt)
544 {
545 // should assert here that there are no outstanding MSHRs or
546 // writebacks... that would mean that someone used an atomic
547 // access in timing mode
548
549 // We use lookupLatency here because it is used to specify the latency
550 // to access.
551 Cycles lat = lookupLatency;
552
553 CacheBlk *blk = nullptr;
554 PacketList writebacks;
555 bool satisfied = access(pkt, blk, lat, writebacks);
556
557 if (pkt->isClean() && blk && blk->isDirty()) {
558 // A cache clean opearation is looking for a dirty
559 // block. If a dirty block is encountered a WriteClean
560 // will update any copies to the path to the memory
561 // until the point of reference.
562 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
563 __func__, pkt->print(), blk->print());
564 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
565 writebacks.push_back(wb_pkt);
566 pkt->setSatisfied();
567 }
568
569 // handle writebacks resulting from the access here to ensure they
570 // logically precede anything happening below
571 doWritebacksAtomic(writebacks);
572 assert(writebacks.empty());
573
574 if (!satisfied) {
575 lat += handleAtomicReqMiss(pkt, blk, writebacks);
576 }
577
578 // Note that we don't invoke the prefetcher at all in atomic mode.
579 // It's not clear how to do it properly, particularly for
580 // prefetchers that aggressively generate prefetch candidates and
581 // rely on bandwidth contention to throttle them; these will tend
582 // to pollute the cache in atomic mode since there is no bandwidth
583 // contention. If we ever do want to enable prefetching in atomic
584 // mode, though, this is the place to do it... see timingAccess()
585 // for an example (though we'd want to issue the prefetch(es)
586 // immediately rather than calling requestMemSideBus() as we do
587 // there).
588
589 // do any writebacks resulting from the response handling
590 doWritebacksAtomic(writebacks);
591
592 // if we used temp block, check to see if its valid and if so
593 // clear it out, but only do so after the call to recvAtomic is
594 // finished so that any downstream observers (such as a snoop
595 // filter), first see the fill, and only then see the eviction
596 if (blk == tempBlock && tempBlock->isValid()) {
597 // the atomic CPU calls recvAtomic for fetch and load/store
598 // sequentuially, and we may already have a tempBlock
599 // writeback from the fetch that we have not yet sent
600 if (tempBlockWriteback) {
601 // if that is the case, write the prevoius one back, and
602 // do not schedule any new event
603 writebackTempBlockAtomic();
604 } else {
605 // the writeback/clean eviction happens after the call to
606 // recvAtomic has finished (but before any successive
607 // calls), so that the response handling from the fill is
608 // allowed to happen first
609 schedule(writebackTempBlockAtomicEvent, curTick());
610 }
611
612 tempBlockWriteback = evictBlock(blk);
613 }
614
615 if (pkt->needsResponse()) {
616 pkt->makeAtomicResponse();
617 }
618
619 return lat * clockPeriod();
620 }
621
622 void
623 BaseCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
624 {
625 Addr blk_addr = pkt->getBlockAddr(blkSize);
626 bool is_secure = pkt->isSecure();
627 CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
628 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
629
630 pkt->pushLabel(name());
631
632 CacheBlkPrintWrapper cbpw(blk);
633
634 // Note that just because an L2/L3 has valid data doesn't mean an
635 // L1 doesn't have a more up-to-date modified copy that still
636 // needs to be found. As a result we always update the request if
637 // we have it, but only declare it satisfied if we are the owner.
638
639 // see if we have data at all (owned or otherwise)
640 bool have_data = blk && blk->isValid()
641 && pkt->trySatisfyFunctional(&cbpw, blk_addr, is_secure, blkSize,
642 blk->data);
643
644 // data we have is dirty if marked as such or if we have an
645 // in-service MSHR that is pending a modified line
646 bool have_dirty =
647 have_data && (blk->isDirty() ||
648 (mshr && mshr->inService && mshr->isPendingModified()));
649
650 bool done = have_dirty ||
651 cpuSidePort.trySatisfyFunctional(pkt) ||
652 mshrQueue.trySatisfyFunctional(pkt) ||
653 writeBuffer.trySatisfyFunctional(pkt) ||
654 memSidePort.trySatisfyFunctional(pkt);
655
656 DPRINTF(CacheVerbose, "%s: %s %s%s%s\n", __func__, pkt->print(),
657 (blk && blk->isValid()) ? "valid " : "",
658 have_data ? "data " : "", done ? "done " : "");
659
660 // We're leaving the cache, so pop cache->name() label
661 pkt->popLabel();
662
663 if (done) {
664 pkt->makeResponse();
665 } else {
666 // if it came as a request from the CPU side then make sure it
667 // continues towards the memory side
668 if (from_cpu_side) {
669 memSidePort.sendFunctional(pkt);
670 } else if (cpuSidePort.isSnooping()) {
671 // if it came from the memory side, it must be a snoop request
672 // and we should only forward it if we are forwarding snoops
673 cpuSidePort.sendFunctionalSnoop(pkt);
674 }
675 }
676 }
677
678
679 void
680 BaseCache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
681 {
682 assert(pkt->isRequest());
683
684 uint64_t overwrite_val;
685 bool overwrite_mem;
686 uint64_t condition_val64;
687 uint32_t condition_val32;
688
689 int offset = pkt->getOffset(blkSize);
690 uint8_t *blk_data = blk->data + offset;
691
692 assert(sizeof(uint64_t) >= pkt->getSize());
693
694 overwrite_mem = true;
695 // keep a copy of our possible write value, and copy what is at the
696 // memory address into the packet
697 pkt->writeData((uint8_t *)&overwrite_val);
698 pkt->setData(blk_data);
699
700 if (pkt->req->isCondSwap()) {
701 if (pkt->getSize() == sizeof(uint64_t)) {
702 condition_val64 = pkt->req->getExtraData();
703 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
704 sizeof(uint64_t));
705 } else if (pkt->getSize() == sizeof(uint32_t)) {
706 condition_val32 = (uint32_t)pkt->req->getExtraData();
707 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
708 sizeof(uint32_t));
709 } else
710 panic("Invalid size for conditional read/write\n");
711 }
712
713 if (overwrite_mem) {
714 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
715 blk->status |= BlkDirty;
716 }
717 }
718
719 QueueEntry*
720 BaseCache::getNextQueueEntry()
721 {
722 // Check both MSHR queue and write buffer for potential requests,
723 // note that null does not mean there is no request, it could
724 // simply be that it is not ready
725 MSHR *miss_mshr = mshrQueue.getNext();
726 WriteQueueEntry *wq_entry = writeBuffer.getNext();
727
728 // If we got a write buffer request ready, first priority is a
729 // full write buffer, otherwise we favour the miss requests
730 if (wq_entry && (writeBuffer.isFull() || !miss_mshr)) {
731 // need to search MSHR queue for conflicting earlier miss.
732 MSHR *conflict_mshr = mshrQueue.findPending(wq_entry);
733
734 if (conflict_mshr && conflict_mshr->order < wq_entry->order) {
735 // Service misses in order until conflict is cleared.
736 return conflict_mshr;
737
738 // @todo Note that we ignore the ready time of the conflict here
739 }
740
741 // No conflicts; issue write
742 return wq_entry;
743 } else if (miss_mshr) {
744 // need to check for conflicting earlier writeback
745 WriteQueueEntry *conflict_mshr = writeBuffer.findPending(miss_mshr);
746 if (conflict_mshr) {
747 // not sure why we don't check order here... it was in the
748 // original code but commented out.
749
750 // The only way this happens is if we are
751 // doing a write and we didn't have permissions
752 // then subsequently saw a writeback (owned got evicted)
753 // We need to make sure to perform the writeback first
754 // To preserve the dirty data, then we can issue the write
755
756 // should we return wq_entry here instead? I.e. do we
757 // have to flush writes in order? I don't think so... not
758 // for Alpha anyway. Maybe for x86?
759 return conflict_mshr;
760
761 // @todo Note that we ignore the ready time of the conflict here
762 }
763
764 // No conflicts; issue read
765 return miss_mshr;
766 }
767
768 // fall through... no pending requests. Try a prefetch.
769 assert(!miss_mshr && !wq_entry);
770 if (prefetcher && mshrQueue.canPrefetch()) {
771 // If we have a miss queue slot, we can try a prefetch
772 PacketPtr pkt = prefetcher->getPacket();
773 if (pkt) {
774 Addr pf_addr = pkt->getBlockAddr(blkSize);
775 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
776 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
777 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
778 // Update statistic on number of prefetches issued
779 // (hwpf_mshr_misses)
780 assert(pkt->req->masterId() < system->maxMasters());
781 stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++;
782
783 // allocate an MSHR and return it, note
784 // that we send the packet straight away, so do not
785 // schedule the send
786 return allocateMissBuffer(pkt, curTick(), false);
787 } else {
788 // free the request and packet
789 delete pkt;
790 }
791 }
792 }
793
794 return nullptr;
795 }
796
797 bool
798 BaseCache::handleEvictions(std::vector<CacheBlk*> &evict_blks,
799 PacketList &writebacks)
800 {
801 bool replacement = false;
802 for (const auto& blk : evict_blks) {
803 if (blk->isValid()) {
804 replacement = true;
805
806 const MSHR* mshr =
807 mshrQueue.findMatch(regenerateBlkAddr(blk), blk->isSecure());
808 if (mshr) {
809 // Must be an outstanding upgrade or clean request on a block
810 // we're about to replace
811 assert((!blk->isWritable() && mshr->needsWritable()) ||
812 mshr->isCleaning());
813 return false;
814 }
815 }
816 }
817
818 // The victim will be replaced by a new entry, so increase the replacement
819 // counter if a valid block is being replaced
820 if (replacement) {
821 stats.replacements++;
822
823 // Evict valid blocks associated to this victim block
824 for (auto& blk : evict_blks) {
825 if (blk->isValid()) {
826 evictBlock(blk, writebacks);
827 }
828 }
829 }
830
831 return true;
832 }
833
834 bool
835 BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
836 PacketList &writebacks)
837 {
838 // tempBlock does not exist in the tags, so don't do anything for it.
839 if (blk == tempBlock) {
840 return true;
841 }
842
843 // Get superblock of the given block
844 CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
845 const SuperBlk* superblock = static_cast<const SuperBlk*>(
846 compression_blk->getSectorBlock());
847
848 // The compressor is called to compress the updated data, so that its
849 // metadata can be updated.
850 std::size_t compression_size = 0;
851 Cycles compression_lat = Cycles(0);
852 Cycles decompression_lat = Cycles(0);
853 compressor->compress(data, compression_lat, decompression_lat,
854 compression_size);
855
856 // If block's compression factor increased, it may not be co-allocatable
857 // anymore. If so, some blocks might need to be evicted to make room for
858 // the bigger block
859
860 // Get previous compressed size
861 const std::size_t M5_VAR_USED prev_size = compression_blk->getSizeBits();
862
863 // Check if new data is co-allocatable
864 const bool is_co_allocatable = superblock->isCompressed(compression_blk) &&
865 superblock->canCoAllocate(compression_size);
866
867 // If block was compressed, possibly co-allocated with other blocks, and
868 // cannot be co-allocated anymore, one or more blocks must be evicted to
869 // make room for the expanded block. As of now we decide to evict the co-
870 // allocated blocks to make room for the expansion, but other approaches
871 // that take the replacement data of the superblock into account may
872 // generate better results
873 const bool was_compressed = compression_blk->isCompressed();
874 if (was_compressed && !is_co_allocatable) {
875 std::vector<CacheBlk*> evict_blks;
876 for (const auto& sub_blk : superblock->blks) {
877 if (sub_blk->isValid() && (compression_blk != sub_blk)) {
878 evict_blks.push_back(sub_blk);
879 }
880 }
881
882 // Try to evict blocks; if it fails, give up on update
883 if (!handleEvictions(evict_blks, writebacks)) {
884 return false;
885 }
886
887 // Update the number of data expansions
888 stats.dataExpansions++;
889
890 DPRINTF(CacheComp, "Data expansion: expanding [%s] from %d to %d bits"
891 "\n", blk->print(), prev_size, compression_size);
892 }
893
894 // We always store compressed blocks when possible
895 if (is_co_allocatable) {
896 compression_blk->setCompressed();
897 } else {
898 compression_blk->setUncompressed();
899 }
900 compression_blk->setSizeBits(compression_size);
901 compression_blk->setDecompressionLatency(decompression_lat);
902
903 return true;
904 }
905
906 void
907 BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
908 {
909 assert(pkt->isRequest());
910
911 assert(blk && blk->isValid());
912 // Occasionally this is not true... if we are a lower-level cache
913 // satisfying a string of Read and ReadEx requests from
914 // upper-level caches, a Read will mark the block as shared but we
915 // can satisfy a following ReadEx anyway since we can rely on the
916 // Read requester(s) to have buffered the ReadEx snoop and to
917 // invalidate their blocks after receiving them.
918 // assert(!pkt->needsWritable() || blk->isWritable());
919 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
920
921 // Check RMW operations first since both isRead() and
922 // isWrite() will be true for them
923 if (pkt->cmd == MemCmd::SwapReq) {
924 if (pkt->isAtomicOp()) {
925 // extract data from cache and save it into the data field in
926 // the packet as a return value from this atomic op
927 int offset = tags->extractBlkOffset(pkt->getAddr());
928 uint8_t *blk_data = blk->data + offset;
929 pkt->setData(blk_data);
930
931 // execute AMO operation
932 (*(pkt->getAtomicOp()))(blk_data);
933
934 // set block status to dirty
935 blk->status |= BlkDirty;
936 } else {
937 cmpAndSwap(blk, pkt);
938 }
939 } else if (pkt->isWrite()) {
940 // we have the block in a writable state and can go ahead,
941 // note that the line may be also be considered writable in
942 // downstream caches along the path to memory, but always
943 // Exclusive, and never Modified
944 assert(blk->isWritable());
945 // Write or WriteLine at the first cache with block in writable state
946 if (blk->checkWrite(pkt)) {
947 pkt->writeDataToBlock(blk->data, blkSize);
948 }
949 // Always mark the line as dirty (and thus transition to the
950 // Modified state) even if we are a failed StoreCond so we
951 // supply data to any snoops that have appended themselves to
952 // this cache before knowing the store will fail.
953 blk->status |= BlkDirty;
954 DPRINTF(CacheVerbose, "%s for %s (write)\n", __func__, pkt->print());
955 } else if (pkt->isRead()) {
956 if (pkt->isLLSC()) {
957 blk->trackLoadLocked(pkt);
958 }
959
960 // all read responses have a data payload
961 assert(pkt->hasRespData());
962 pkt->setDataFromBlock(blk->data, blkSize);
963 } else if (pkt->isUpgrade()) {
964 // sanity check
965 assert(!pkt->hasSharers());
966
967 if (blk->isDirty()) {
968 // we were in the Owned state, and a cache above us that
969 // has the line in Shared state needs to be made aware
970 // that the data it already has is in fact dirty
971 pkt->setCacheResponding();
972 blk->status &= ~BlkDirty;
973 }
974 } else if (pkt->isClean()) {
975 blk->status &= ~BlkDirty;
976 } else {
977 assert(pkt->isInvalidate());
978 invalidateBlock(blk);
979 DPRINTF(CacheVerbose, "%s for %s (invalidation)\n", __func__,
980 pkt->print());
981 }
982 }
983
984 /////////////////////////////////////////////////////
985 //
986 // Access path: requests coming in from the CPU side
987 //
988 /////////////////////////////////////////////////////
989 Cycles
990 BaseCache::calculateTagOnlyLatency(const uint32_t delay,
991 const Cycles lookup_lat) const
992 {
993 // A tag-only access has to wait for the packet to arrive in order to
994 // perform the tag lookup.
995 return ticksToCycles(delay) + lookup_lat;
996 }
997
998 Cycles
999 BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
1000 const Cycles lookup_lat) const
1001 {
1002 Cycles lat(0);
1003
1004 if (blk != nullptr) {
1005 // As soon as the access arrives, for sequential accesses first access
1006 // tags, then the data entry. In the case of parallel accesses the
1007 // latency is dictated by the slowest of tag and data latencies.
1008 if (sequentialAccess) {
1009 lat = ticksToCycles(delay) + lookup_lat + dataLatency;
1010 } else {
1011 lat = ticksToCycles(delay) + std::max(lookup_lat, dataLatency);
1012 }
1013
1014 // Check if the block to be accessed is available. If not, apply the
1015 // access latency on top of when the block is ready to be accessed.
1016 const Tick tick = curTick() + delay;
1017 const Tick when_ready = blk->getWhenReady();
1018 if (when_ready > tick &&
1019 ticksToCycles(when_ready - tick) > lat) {
1020 lat += ticksToCycles(when_ready - tick);
1021 }
1022 } else {
1023 // In case of a miss, we neglect the data access in a parallel
1024 // configuration (i.e., the data access will be stopped as soon as
1025 // we find out it is a miss), and use the tag-only latency.
1026 lat = calculateTagOnlyLatency(delay, lookup_lat);
1027 }
1028
1029 return lat;
1030 }
1031
1032 bool
1033 BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
1034 PacketList &writebacks)
1035 {
1036 // sanity check
1037 assert(pkt->isRequest());
1038
1039 chatty_assert(!(isReadOnly && pkt->isWrite()),
1040 "Should never see a write in a read-only cache %s\n",
1041 name());
1042
1043 // Access block in the tags
1044 Cycles tag_latency(0);
1045 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), tag_latency);
1046
1047 DPRINTF(Cache, "%s for %s %s\n", __func__, pkt->print(),
1048 blk ? "hit " + blk->print() : "miss");
1049
1050 if (pkt->req->isCacheMaintenance()) {
1051 // A cache maintenance operation is always forwarded to the
1052 // memory below even if the block is found in dirty state.
1053
1054 // We defer any changes to the state of the block until we
1055 // create and mark as in service the mshr for the downstream
1056 // packet.
1057
1058 // Calculate access latency on top of when the packet arrives. This
1059 // takes into account the bus delay.
1060 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1061
1062 return false;
1063 }
1064
1065 if (pkt->isEviction()) {
1066 // We check for presence of block in above caches before issuing
1067 // Writeback or CleanEvict to write buffer. Therefore the only
1068 // possible cases can be of a CleanEvict packet coming from above
1069 // encountering a Writeback generated in this cache peer cache and
1070 // waiting in the write buffer. Cases of upper level peer caches
1071 // generating CleanEvict and Writeback or simply CleanEvict and
1072 // CleanEvict almost simultaneously will be caught by snoops sent out
1073 // by crossbar.
1074 WriteQueueEntry *wb_entry = writeBuffer.findMatch(pkt->getAddr(),
1075 pkt->isSecure());
1076 if (wb_entry) {
1077 assert(wb_entry->getNumTargets() == 1);
1078 PacketPtr wbPkt = wb_entry->getTarget()->pkt;
1079 assert(wbPkt->isWriteback());
1080
1081 if (pkt->isCleanEviction()) {
1082 // The CleanEvict and WritebackClean snoops into other
1083 // peer caches of the same level while traversing the
1084 // crossbar. If a copy of the block is found, the
1085 // packet is deleted in the crossbar. Hence, none of
1086 // the other upper level caches connected to this
1087 // cache have the block, so we can clear the
1088 // BLOCK_CACHED flag in the Writeback if set and
1089 // discard the CleanEvict by returning true.
1090 wbPkt->clearBlockCached();
1091
1092 // A clean evict does not need to access the data array
1093 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1094
1095 return true;
1096 } else {
1097 assert(pkt->cmd == MemCmd::WritebackDirty);
1098 // Dirty writeback from above trumps our clean
1099 // writeback... discard here
1100 // Note: markInService will remove entry from writeback buffer.
1101 markInService(wb_entry);
1102 delete wbPkt;
1103 }
1104 }
1105 }
1106
1107 // The critical latency part of a write depends only on the tag access
1108 if (pkt->isWrite()) {
1109 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1110 }
1111
1112 // Writeback handling is special case. We can write the block into
1113 // the cache without having a writeable copy (or any copy at all).
1114 if (pkt->isWriteback()) {
1115 assert(blkSize == pkt->getSize());
1116
1117 // we could get a clean writeback while we are having
1118 // outstanding accesses to a block, do the simple thing for
1119 // now and drop the clean writeback so that we do not upset
1120 // any ordering/decisions about ownership already taken
1121 if (pkt->cmd == MemCmd::WritebackClean &&
1122 mshrQueue.findMatch(pkt->getAddr(), pkt->isSecure())) {
1123 DPRINTF(Cache, "Clean writeback %#llx to block with MSHR, "
1124 "dropping\n", pkt->getAddr());
1125
1126 // A writeback searches for the block, then writes the data.
1127 // As the writeback is being dropped, the data is not touched,
1128 // and we just had to wait for the time to find a match in the
1129 // MSHR. As of now assume a mshr queue search takes as long as
1130 // a tag lookup for simplicity.
1131 return true;
1132 }
1133
1134 if (!blk) {
1135 // need to do a replacement
1136 blk = allocateBlock(pkt, writebacks);
1137 if (!blk) {
1138 // no replaceable block available: give up, fwd to next level.
1139 incMissCount(pkt);
1140 return false;
1141 }
1142
1143 blk->status |= BlkReadable;
1144 } else if (compressor) {
1145 // This is an overwrite to an existing block, therefore we need
1146 // to check for data expansion (i.e., block was compressed with
1147 // a smaller size, and now it doesn't fit the entry anymore).
1148 // If that is the case we might need to evict blocks.
1149 if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1150 writebacks)) {
1151 invalidateBlock(blk);
1152 return false;
1153 }
1154 }
1155
1156 // only mark the block dirty if we got a writeback command,
1157 // and leave it as is for a clean writeback
1158 if (pkt->cmd == MemCmd::WritebackDirty) {
1159 // TODO: the coherent cache can assert(!blk->isDirty());
1160 blk->status |= BlkDirty;
1161 }
1162 // if the packet does not have sharers, it is passing
1163 // writable, and we got the writeback in Modified or Exclusive
1164 // state, if not we are in the Owned or Shared state
1165 if (!pkt->hasSharers()) {
1166 blk->status |= BlkWritable;
1167 }
1168 // nothing else to do; writeback doesn't expect response
1169 assert(!pkt->needsResponse());
1170 pkt->writeDataToBlock(blk->data, blkSize);
1171 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1172 incHitCount(pkt);
1173
1174 // When the packet metadata arrives, the tag lookup will be done while
1175 // the payload is arriving. Then the block will be ready to access as
1176 // soon as the fill is done
1177 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1178 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1179
1180 return true;
1181 } else if (pkt->cmd == MemCmd::CleanEvict) {
1182 // A CleanEvict does not need to access the data array
1183 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1184
1185 if (blk) {
1186 // Found the block in the tags, need to stop CleanEvict from
1187 // propagating further down the hierarchy. Returning true will
1188 // treat the CleanEvict like a satisfied write request and delete
1189 // it.
1190 return true;
1191 }
1192 // We didn't find the block here, propagate the CleanEvict further
1193 // down the memory hierarchy. Returning false will treat the CleanEvict
1194 // like a Writeback which could not find a replaceable block so has to
1195 // go to next level.
1196 return false;
1197 } else if (pkt->cmd == MemCmd::WriteClean) {
1198 // WriteClean handling is a special case. We can allocate a
1199 // block directly if it doesn't exist and we can update the
1200 // block immediately. The WriteClean transfers the ownership
1201 // of the block as well.
1202 assert(blkSize == pkt->getSize());
1203
1204 if (!blk) {
1205 if (pkt->writeThrough()) {
1206 // if this is a write through packet, we don't try to
1207 // allocate if the block is not present
1208 return false;
1209 } else {
1210 // a writeback that misses needs to allocate a new block
1211 blk = allocateBlock(pkt, writebacks);
1212 if (!blk) {
1213 // no replaceable block available: give up, fwd to
1214 // next level.
1215 incMissCount(pkt);
1216 return false;
1217 }
1218
1219 blk->status |= BlkReadable;
1220 }
1221 } else if (compressor) {
1222 // This is an overwrite to an existing block, therefore we need
1223 // to check for data expansion (i.e., block was compressed with
1224 // a smaller size, and now it doesn't fit the entry anymore).
1225 // If that is the case we might need to evict blocks.
1226 if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
1227 writebacks)) {
1228 invalidateBlock(blk);
1229 return false;
1230 }
1231 }
1232
1233 // at this point either this is a writeback or a write-through
1234 // write clean operation and the block is already in this
1235 // cache, we need to update the data and the block flags
1236 assert(blk);
1237 // TODO: the coherent cache can assert(!blk->isDirty());
1238 if (!pkt->writeThrough()) {
1239 blk->status |= BlkDirty;
1240 }
1241 // nothing else to do; writeback doesn't expect response
1242 assert(!pkt->needsResponse());
1243 pkt->writeDataToBlock(blk->data, blkSize);
1244 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
1245
1246 incHitCount(pkt);
1247
1248 // When the packet metadata arrives, the tag lookup will be done while
1249 // the payload is arriving. Then the block will be ready to access as
1250 // soon as the fill is done
1251 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1252 std::max(cyclesToTicks(tag_latency), (uint64_t)pkt->payloadDelay));
1253
1254 // If this a write-through packet it will be sent to cache below
1255 return !pkt->writeThrough();
1256 } else if (blk && (pkt->needsWritable() ? blk->isWritable() :
1257 blk->isReadable())) {
1258 // OK to satisfy access
1259 incHitCount(pkt);
1260
1261 // Calculate access latency based on the need to access the data array
1262 if (pkt->isRead()) {
1263 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1264
1265 // When a block is compressed, it must first be decompressed
1266 // before being read. This adds to the access latency.
1267 if (compressor) {
1268 lat += compressor->getDecompressionLatency(blk);
1269 }
1270 } else {
1271 lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
1272 }
1273
1274 satisfyRequest(pkt, blk);
1275 maintainClusivity(pkt->fromCache(), blk);
1276
1277 return true;
1278 }
1279
1280 // Can't satisfy access normally... either no block (blk == nullptr)
1281 // or have block but need writable
1282
1283 incMissCount(pkt);
1284
1285 lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
1286
1287 if (!blk && pkt->isLLSC() && pkt->isWrite()) {
1288 // complete miss on store conditional... just give up now
1289 pkt->req->setExtraData(0);
1290 return true;
1291 }
1292
1293 return false;
1294 }
1295
1296 void
1297 BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
1298 {
1299 if (from_cache && blk && blk->isValid() && !blk->isDirty() &&
1300 clusivity == Enums::mostly_excl) {
1301 // if we have responded to a cache, and our block is still
1302 // valid, but not dirty, and this cache is mostly exclusive
1303 // with respect to the cache above, drop the block
1304 invalidateBlock(blk);
1305 }
1306 }
1307
1308 CacheBlk*
1309 BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
1310 bool allocate)
1311 {
1312 assert(pkt->isResponse());
1313 Addr addr = pkt->getAddr();
1314 bool is_secure = pkt->isSecure();
1315 #if TRACING_ON
1316 CacheBlk::State old_state = blk ? blk->status : 0;
1317 #endif
1318
1319 // When handling a fill, we should have no writes to this line.
1320 assert(addr == pkt->getBlockAddr(blkSize));
1321 assert(!writeBuffer.findMatch(addr, is_secure));
1322
1323 if (!blk) {
1324 // better have read new data...
1325 assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
1326
1327 // need to do a replacement if allocating, otherwise we stick
1328 // with the temporary storage
1329 blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
1330
1331 if (!blk) {
1332 // No replaceable block or a mostly exclusive
1333 // cache... just use temporary storage to complete the
1334 // current request and then get rid of it
1335 blk = tempBlock;
1336 tempBlock->insert(addr, is_secure);
1337 DPRINTF(Cache, "using temp block for %#llx (%s)\n", addr,
1338 is_secure ? "s" : "ns");
1339 }
1340 } else {
1341 // existing block... probably an upgrade
1342 // don't clear block status... if block is already dirty we
1343 // don't want to lose that
1344 }
1345
1346 // Block is guaranteed to be valid at this point
1347 assert(blk->isValid());
1348 assert(blk->isSecure() == is_secure);
1349 assert(regenerateBlkAddr(blk) == addr);
1350
1351 blk->status |= BlkReadable;
1352
1353 // sanity check for whole-line writes, which should always be
1354 // marked as writable as part of the fill, and then later marked
1355 // dirty as part of satisfyRequest
1356 if (pkt->cmd == MemCmd::InvalidateResp) {
1357 assert(!pkt->hasSharers());
1358 }
1359
1360 // here we deal with setting the appropriate state of the line,
1361 // and we start by looking at the hasSharers flag, and ignore the
1362 // cacheResponding flag (normally signalling dirty data) if the
1363 // packet has sharers, thus the line is never allocated as Owned
1364 // (dirty but not writable), and always ends up being either
1365 // Shared, Exclusive or Modified, see Packet::setCacheResponding
1366 // for more details
1367 if (!pkt->hasSharers()) {
1368 // we could get a writable line from memory (rather than a
1369 // cache) even in a read-only cache, note that we set this bit
1370 // even for a read-only cache, possibly revisit this decision
1371 blk->status |= BlkWritable;
1372
1373 // check if we got this via cache-to-cache transfer (i.e., from a
1374 // cache that had the block in Modified or Owned state)
1375 if (pkt->cacheResponding()) {
1376 // we got the block in Modified state, and invalidated the
1377 // owners copy
1378 blk->status |= BlkDirty;
1379
1380 chatty_assert(!isReadOnly, "Should never see dirty snoop response "
1381 "in read-only cache %s\n", name());
1382
1383 }
1384 }
1385
1386 DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
1387 addr, is_secure ? "s" : "ns", old_state, blk->print());
1388
1389 // if we got new data, copy it in (checking for a read response
1390 // and a response that has data is the same in the end)
1391 if (pkt->isRead()) {
1392 // sanity checks
1393 assert(pkt->hasData());
1394 assert(pkt->getSize() == blkSize);
1395
1396 pkt->writeDataToBlock(blk->data, blkSize);
1397 }
1398 // The block will be ready when the payload arrives and the fill is done
1399 blk->setWhenReady(clockEdge(fillLatency) + pkt->headerDelay +
1400 pkt->payloadDelay);
1401
1402 return blk;
1403 }
1404
1405 CacheBlk*
1406 BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
1407 {
1408 // Get address
1409 const Addr addr = pkt->getAddr();
1410
1411 // Get secure bit
1412 const bool is_secure = pkt->isSecure();
1413
1414 // Block size and compression related access latency. Only relevant if
1415 // using a compressor, otherwise there is no extra delay, and the block
1416 // is fully sized
1417 std::size_t blk_size_bits = blkSize*8;
1418 Cycles compression_lat = Cycles(0);
1419 Cycles decompression_lat = Cycles(0);
1420
1421 // If a compressor is being used, it is called to compress data before
1422 // insertion. Although in Gem5 the data is stored uncompressed, even if a
1423 // compressor is used, the compression/decompression methods are called to
1424 // calculate the amount of extra cycles needed to read or write compressed
1425 // blocks.
1426 if (compressor && pkt->hasData()) {
1427 compressor->compress(pkt->getConstPtr<uint64_t>(), compression_lat,
1428 decompression_lat, blk_size_bits);
1429 }
1430
1431 // Find replacement victim
1432 std::vector<CacheBlk*> evict_blks;
1433 CacheBlk *victim = tags->findVictim(addr, is_secure, blk_size_bits,
1434 evict_blks);
1435
1436 // It is valid to return nullptr if there is no victim
1437 if (!victim)
1438 return nullptr;
1439
1440 // Print victim block's information
1441 DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
1442
1443 // Try to evict blocks; if it fails, give up on allocation
1444 if (!handleEvictions(evict_blks, writebacks)) {
1445 return nullptr;
1446 }
1447
1448 // If using a compressor, set compression data. This must be done before
1449 // block insertion, as compressed tags use this information.
1450 if (compressor) {
1451 compressor->setSizeBits(victim, blk_size_bits);
1452 compressor->setDecompressionLatency(victim, decompression_lat);
1453 }
1454
1455 // Insert new block at victimized entry
1456 tags->insertBlock(pkt, victim);
1457
1458 return victim;
1459 }
1460
1461 void
1462 BaseCache::invalidateBlock(CacheBlk *blk)
1463 {
1464 // If block is still marked as prefetched, then it hasn't been used
1465 if (blk->wasPrefetched()) {
1466 stats.unusedPrefetches++;
1467 }
1468
1469 // If handling a block present in the Tags, let it do its invalidation
1470 // process, which will update stats and invalidate the block itself
1471 if (blk != tempBlock) {
1472 tags->invalidate(blk);
1473 } else {
1474 tempBlock->invalidate();
1475 }
1476 }
1477
1478 void
1479 BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
1480 {
1481 PacketPtr pkt = evictBlock(blk);
1482 if (pkt) {
1483 writebacks.push_back(pkt);
1484 }
1485 }
1486
1487 PacketPtr
1488 BaseCache::writebackBlk(CacheBlk *blk)
1489 {
1490 chatty_assert(!isReadOnly || writebackClean,
1491 "Writeback from read-only cache");
1492 assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
1493
1494 stats.writebacks[Request::wbMasterId]++;
1495
1496 RequestPtr req = std::make_shared<Request>(
1497 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1498
1499 if (blk->isSecure())
1500 req->setFlags(Request::SECURE);
1501
1502 req->taskId(blk->task_id);
1503
1504 PacketPtr pkt =
1505 new Packet(req, blk->isDirty() ?
1506 MemCmd::WritebackDirty : MemCmd::WritebackClean);
1507
1508 DPRINTF(Cache, "Create Writeback %s writable: %d, dirty: %d\n",
1509 pkt->print(), blk->isWritable(), blk->isDirty());
1510
1511 if (blk->isWritable()) {
1512 // not asserting shared means we pass the block in modified
1513 // state, mark our own block non-writeable
1514 blk->status &= ~BlkWritable;
1515 } else {
1516 // we are in the Owned state, tell the receiver
1517 pkt->setHasSharers();
1518 }
1519
1520 // make sure the block is not marked dirty
1521 blk->status &= ~BlkDirty;
1522
1523 pkt->allocate();
1524 pkt->setDataFromBlock(blk->data, blkSize);
1525
1526 // When a block is compressed, it must first be decompressed before being
1527 // sent for writeback.
1528 if (compressor) {
1529 pkt->payloadDelay = compressor->getDecompressionLatency(blk);
1530 }
1531
1532 return pkt;
1533 }
1534
1535 PacketPtr
1536 BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
1537 {
1538 RequestPtr req = std::make_shared<Request>(
1539 regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
1540
1541 if (blk->isSecure()) {
1542 req->setFlags(Request::SECURE);
1543 }
1544 req->taskId(blk->task_id);
1545
1546 PacketPtr pkt = new Packet(req, MemCmd::WriteClean, blkSize, id);
1547
1548 if (dest) {
1549 req->setFlags(dest);
1550 pkt->setWriteThrough();
1551 }
1552
1553 DPRINTF(Cache, "Create %s writable: %d, dirty: %d\n", pkt->print(),
1554 blk->isWritable(), blk->isDirty());
1555
1556 if (blk->isWritable()) {
1557 // not asserting shared means we pass the block in modified
1558 // state, mark our own block non-writeable
1559 blk->status &= ~BlkWritable;
1560 } else {
1561 // we are in the Owned state, tell the receiver
1562 pkt->setHasSharers();
1563 }
1564
1565 // make sure the block is not marked dirty
1566 blk->status &= ~BlkDirty;
1567
1568 pkt->allocate();
1569 pkt->setDataFromBlock(blk->data, blkSize);
1570
1571 // When a block is compressed, it must first be decompressed before being
1572 // sent for writeback.
1573 if (compressor) {
1574 pkt->payloadDelay = compressor->getDecompressionLatency(blk);
1575 }
1576
1577 return pkt;
1578 }
1579
1580
1581 void
1582 BaseCache::memWriteback()
1583 {
1584 tags->forEachBlk([this](CacheBlk &blk) { writebackVisitor(blk); });
1585 }
1586
1587 void
1588 BaseCache::memInvalidate()
1589 {
1590 tags->forEachBlk([this](CacheBlk &blk) { invalidateVisitor(blk); });
1591 }
1592
1593 bool
1594 BaseCache::isDirty() const
1595 {
1596 return tags->anyBlk([](CacheBlk &blk) { return blk.isDirty(); });
1597 }
1598
1599 bool
1600 BaseCache::coalesce() const
1601 {
1602 return writeAllocator && writeAllocator->coalesce();
1603 }
1604
1605 void
1606 BaseCache::writebackVisitor(CacheBlk &blk)
1607 {
1608 if (blk.isDirty()) {
1609 assert(blk.isValid());
1610
1611 RequestPtr request = std::make_shared<Request>(
1612 regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
1613
1614 request->taskId(blk.task_id);
1615 if (blk.isSecure()) {
1616 request->setFlags(Request::SECURE);
1617 }
1618
1619 Packet packet(request, MemCmd::WriteReq);
1620 packet.dataStatic(blk.data);
1621
1622 memSidePort.sendFunctional(&packet);
1623
1624 blk.status &= ~BlkDirty;
1625 }
1626 }
1627
1628 void
1629 BaseCache::invalidateVisitor(CacheBlk &blk)
1630 {
1631 if (blk.isDirty())
1632 warn_once("Invalidating dirty cache lines. " \
1633 "Expect things to break.\n");
1634
1635 if (blk.isValid()) {
1636 assert(!blk.isDirty());
1637 invalidateBlock(&blk);
1638 }
1639 }
1640
1641 Tick
1642 BaseCache::nextQueueReadyTime() const
1643 {
1644 Tick nextReady = std::min(mshrQueue.nextReadyTime(),
1645 writeBuffer.nextReadyTime());
1646
1647 // Don't signal prefetch ready time if no MSHRs available
1648 // Will signal once enoguh MSHRs are deallocated
1649 if (prefetcher && mshrQueue.canPrefetch()) {
1650 nextReady = std::min(nextReady,
1651 prefetcher->nextPrefetchReadyTime());
1652 }
1653
1654 return nextReady;
1655 }
1656
1657
1658 bool
1659 BaseCache::sendMSHRQueuePacket(MSHR* mshr)
1660 {
1661 assert(mshr);
1662
1663 // use request from 1st target
1664 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1665
1666 DPRINTF(Cache, "%s: MSHR %s\n", __func__, tgt_pkt->print());
1667
1668 // if the cache is in write coalescing mode or (additionally) in
1669 // no allocation mode, and we have a write packet with an MSHR
1670 // that is not a whole-line write (due to incompatible flags etc),
1671 // then reset the write mode
1672 if (writeAllocator && writeAllocator->coalesce() && tgt_pkt->isWrite()) {
1673 if (!mshr->isWholeLineWrite()) {
1674 // if we are currently write coalescing, hold on the
1675 // MSHR as many cycles extra as we need to completely
1676 // write a cache line
1677 if (writeAllocator->delay(mshr->blkAddr)) {
1678 Tick delay = blkSize / tgt_pkt->getSize() * clockPeriod();
1679 DPRINTF(CacheVerbose, "Delaying pkt %s %llu ticks to allow "
1680 "for write coalescing\n", tgt_pkt->print(), delay);
1681 mshrQueue.delay(mshr, delay);
1682 return false;
1683 } else {
1684 writeAllocator->reset();
1685 }
1686 } else {
1687 writeAllocator->resetDelay(mshr->blkAddr);
1688 }
1689 }
1690
1691 CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
1692
1693 // either a prefetch that is not present upstream, or a normal
1694 // MSHR request, proceed to get the packet to send downstream
1695 PacketPtr pkt = createMissPacket(tgt_pkt, blk, mshr->needsWritable(),
1696 mshr->isWholeLineWrite());
1697
1698 mshr->isForward = (pkt == nullptr);
1699
1700 if (mshr->isForward) {
1701 // not a cache block request, but a response is expected
1702 // make copy of current packet to forward, keep current
1703 // copy for response handling
1704 pkt = new Packet(tgt_pkt, false, true);
1705 assert(!pkt->isWrite());
1706 }
1707
1708 // play it safe and append (rather than set) the sender state,
1709 // as forwarded packets may already have existing state
1710 pkt->pushSenderState(mshr);
1711
1712 if (pkt->isClean() && blk && blk->isDirty()) {
1713 // A cache clean opearation is looking for a dirty block. Mark
1714 // the packet so that the destination xbar can determine that
1715 // there will be a follow-up write packet as well.
1716 pkt->setSatisfied();
1717 }
1718
1719 if (!memSidePort.sendTimingReq(pkt)) {
1720 // we are awaiting a retry, but we
1721 // delete the packet and will be creating a new packet
1722 // when we get the opportunity
1723 delete pkt;
1724
1725 // note that we have now masked any requestBus and
1726 // schedSendEvent (we will wait for a retry before
1727 // doing anything), and this is so even if we do not
1728 // care about this packet and might override it before
1729 // it gets retried
1730 return true;
1731 } else {
1732 // As part of the call to sendTimingReq the packet is
1733 // forwarded to all neighbouring caches (and any caches
1734 // above them) as a snoop. Thus at this point we know if
1735 // any of the neighbouring caches are responding, and if
1736 // so, we know it is dirty, and we can determine if it is
1737 // being passed as Modified, making our MSHR the ordering
1738 // point
1739 bool pending_modified_resp = !pkt->hasSharers() &&
1740 pkt->cacheResponding();
1741 markInService(mshr, pending_modified_resp);
1742
1743 if (pkt->isClean() && blk && blk->isDirty()) {
1744 // A cache clean opearation is looking for a dirty
1745 // block. If a dirty block is encountered a WriteClean
1746 // will update any copies to the path to the memory
1747 // until the point of reference.
1748 DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
1749 __func__, pkt->print(), blk->print());
1750 PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
1751 pkt->id);
1752 PacketList writebacks;
1753 writebacks.push_back(wb_pkt);
1754 doWritebacks(writebacks, 0);
1755 }
1756
1757 return false;
1758 }
1759 }
1760
1761 bool
1762 BaseCache::sendWriteQueuePacket(WriteQueueEntry* wq_entry)
1763 {
1764 assert(wq_entry);
1765
1766 // always a single target for write queue entries
1767 PacketPtr tgt_pkt = wq_entry->getTarget()->pkt;
1768
1769 DPRINTF(Cache, "%s: write %s\n", __func__, tgt_pkt->print());
1770
1771 // forward as is, both for evictions and uncacheable writes
1772 if (!memSidePort.sendTimingReq(tgt_pkt)) {
1773 // note that we have now masked any requestBus and
1774 // schedSendEvent (we will wait for a retry before
1775 // doing anything), and this is so even if we do not
1776 // care about this packet and might override it before
1777 // it gets retried
1778 return true;
1779 } else {
1780 markInService(wq_entry);
1781 return false;
1782 }
1783 }
1784
1785 void
1786 BaseCache::serialize(CheckpointOut &cp) const
1787 {
1788 bool dirty(isDirty());
1789
1790 if (dirty) {
1791 warn("*** The cache still contains dirty data. ***\n");
1792 warn(" Make sure to drain the system using the correct flags.\n");
1793 warn(" This checkpoint will not restore correctly " \
1794 "and dirty data in the cache will be lost!\n");
1795 }
1796
1797 // Since we don't checkpoint the data in the cache, any dirty data
1798 // will be lost when restoring from a checkpoint of a system that
1799 // wasn't drained properly. Flag the checkpoint as invalid if the
1800 // cache contains dirty data.
1801 bool bad_checkpoint(dirty);
1802 SERIALIZE_SCALAR(bad_checkpoint);
1803 }
1804
1805 void
1806 BaseCache::unserialize(CheckpointIn &cp)
1807 {
1808 bool bad_checkpoint;
1809 UNSERIALIZE_SCALAR(bad_checkpoint);
1810 if (bad_checkpoint) {
1811 fatal("Restoring from checkpoints with dirty caches is not "
1812 "supported in the classic memory system. Please remove any "
1813 "caches or drain them properly before taking checkpoints.\n");
1814 }
1815 }
1816
1817
1818 BaseCache::CacheCmdStats::CacheCmdStats(BaseCache &c,
1819 const std::string &name)
1820 : Stats::Group(&c), cache(c),
1821
1822 hits(
1823 this, (name + "_hits").c_str(),
1824 ("number of " + name + " hits").c_str()),
1825 misses(
1826 this, (name + "_misses").c_str(),
1827 ("number of " + name + " misses").c_str()),
1828 missLatency(
1829 this, (name + "_miss_latency").c_str(),
1830 ("number of " + name + " miss cycles").c_str()),
1831 accesses(
1832 this, (name + "_accesses").c_str(),
1833 ("number of " + name + " accesses(hits+misses)").c_str()),
1834 missRate(
1835 this, (name + "_miss_rate").c_str(),
1836 ("miss rate for " + name + " accesses").c_str()),
1837 avgMissLatency(
1838 this, (name + "_avg_miss_latency").c_str(),
1839 ("average " + name + " miss latency").c_str()),
1840 mshr_hits(
1841 this, (name + "_mshr_hits").c_str(),
1842 ("number of " + name + " MSHR hits").c_str()),
1843 mshr_misses(
1844 this, (name + "_mshr_misses").c_str(),
1845 ("number of " + name + " MSHR misses").c_str()),
1846 mshr_uncacheable(
1847 this, (name + "_mshr_uncacheable").c_str(),
1848 ("number of " + name + " MSHR uncacheable").c_str()),
1849 mshr_miss_latency(
1850 this, (name + "_mshr_miss_latency").c_str(),
1851 ("number of " + name + " MSHR miss cycles").c_str()),
1852 mshr_uncacheable_lat(
1853 this, (name + "_mshr_uncacheable_latency").c_str(),
1854 ("number of " + name + " MSHR uncacheable cycles").c_str()),
1855 mshrMissRate(
1856 this, (name + "_mshr_miss_rate").c_str(),
1857 ("mshr miss rate for " + name + " accesses").c_str()),
1858 avgMshrMissLatency(
1859 this, (name + "_avg_mshr_miss_latency").c_str(),
1860 ("average " + name + " mshr miss latency").c_str()),
1861 avgMshrUncacheableLatency(
1862 this, (name + "_avg_mshr_uncacheable_latency").c_str(),
1863 ("average " + name + " mshr uncacheable latency").c_str())
1864 {
1865 }
1866
1867 void
1868 BaseCache::CacheCmdStats::regStatsFromParent()
1869 {
1870 using namespace Stats;
1871
1872 Stats::Group::regStats();
1873 System *system = cache.system;
1874 const auto max_masters = system->maxMasters();
1875
1876 hits
1877 .init(max_masters)
1878 .flags(total | nozero | nonan)
1879 ;
1880 for (int i = 0; i < max_masters; i++) {
1881 hits.subname(i, system->getMasterName(i));
1882 }
1883
1884 // Miss statistics
1885 misses
1886 .init(max_masters)
1887 .flags(total | nozero | nonan)
1888 ;
1889 for (int i = 0; i < max_masters; i++) {
1890 misses.subname(i, system->getMasterName(i));
1891 }
1892
1893 // Miss latency statistics
1894 missLatency
1895 .init(max_masters)
1896 .flags(total | nozero | nonan)
1897 ;
1898 for (int i = 0; i < max_masters; i++) {
1899 missLatency.subname(i, system->getMasterName(i));
1900 }
1901
1902 // access formulas
1903 accesses.flags(total | nozero | nonan);
1904 accesses = hits + misses;
1905 for (int i = 0; i < max_masters; i++) {
1906 accesses.subname(i, system->getMasterName(i));
1907 }
1908
1909 // miss rate formulas
1910 missRate.flags(total | nozero | nonan);
1911 missRate = misses / accesses;
1912 for (int i = 0; i < max_masters; i++) {
1913 missRate.subname(i, system->getMasterName(i));
1914 }
1915
1916 // miss latency formulas
1917 avgMissLatency.flags(total | nozero | nonan);
1918 avgMissLatency = missLatency / misses;
1919 for (int i = 0; i < max_masters; i++) {
1920 avgMissLatency.subname(i, system->getMasterName(i));
1921 }
1922
1923 // MSHR statistics
1924 // MSHR hit statistics
1925 mshr_hits
1926 .init(max_masters)
1927 .flags(total | nozero | nonan)
1928 ;
1929 for (int i = 0; i < max_masters; i++) {
1930 mshr_hits.subname(i, system->getMasterName(i));
1931 }
1932
1933 // MSHR miss statistics
1934 mshr_misses
1935 .init(max_masters)
1936 .flags(total | nozero | nonan)
1937 ;
1938 for (int i = 0; i < max_masters; i++) {
1939 mshr_misses.subname(i, system->getMasterName(i));
1940 }
1941
1942 // MSHR miss latency statistics
1943 mshr_miss_latency
1944 .init(max_masters)
1945 .flags(total | nozero | nonan)
1946 ;
1947 for (int i = 0; i < max_masters; i++) {
1948 mshr_miss_latency.subname(i, system->getMasterName(i));
1949 }
1950
1951 // MSHR uncacheable statistics
1952 mshr_uncacheable
1953 .init(max_masters)
1954 .flags(total | nozero | nonan)
1955 ;
1956 for (int i = 0; i < max_masters; i++) {
1957 mshr_uncacheable.subname(i, system->getMasterName(i));
1958 }
1959
1960 // MSHR miss latency statistics
1961 mshr_uncacheable_lat
1962 .init(max_masters)
1963 .flags(total | nozero | nonan)
1964 ;
1965 for (int i = 0; i < max_masters; i++) {
1966 mshr_uncacheable_lat.subname(i, system->getMasterName(i));
1967 }
1968
1969 // MSHR miss rate formulas
1970 mshrMissRate.flags(total | nozero | nonan);
1971 mshrMissRate = mshr_misses / accesses;
1972
1973 for (int i = 0; i < max_masters; i++) {
1974 mshrMissRate.subname(i, system->getMasterName(i));
1975 }
1976
1977 // mshrMiss latency formulas
1978 avgMshrMissLatency.flags(total | nozero | nonan);
1979 avgMshrMissLatency = mshr_miss_latency / mshr_misses;
1980 for (int i = 0; i < max_masters; i++) {
1981 avgMshrMissLatency.subname(i, system->getMasterName(i));
1982 }
1983
1984 // mshrUncacheable latency formulas
1985 avgMshrUncacheableLatency.flags(total | nozero | nonan);
1986 avgMshrUncacheableLatency = mshr_uncacheable_lat / mshr_uncacheable;
1987 for (int i = 0; i < max_masters; i++) {
1988 avgMshrUncacheableLatency.subname(i, system->getMasterName(i));
1989 }
1990 }
1991
1992 BaseCache::CacheStats::CacheStats(BaseCache &c)
1993 : Stats::Group(&c), cache(c),
1994
1995 demandHits(this, "demand_hits", "number of demand (read+write) hits"),
1996
1997 overallHits(this, "overall_hits", "number of overall hits"),
1998 demandMisses(this, "demand_misses",
1999 "number of demand (read+write) misses"),
2000 overallMisses(this, "overall_misses", "number of overall misses"),
2001 demandMissLatency(this, "demand_miss_latency",
2002 "number of demand (read+write) miss cycles"),
2003 overallMissLatency(this, "overall_miss_latency",
2004 "number of overall miss cycles"),
2005 demandAccesses(this, "demand_accesses",
2006 "number of demand (read+write) accesses"),
2007 overallAccesses(this, "overall_accesses",
2008 "number of overall (read+write) accesses"),
2009 demandMissRate(this, "demand_miss_rate",
2010 "miss rate for demand accesses"),
2011 overallMissRate(this, "overall_miss_rate",
2012 "miss rate for overall accesses"),
2013 demandAvgMissLatency(this, "demand_avg_miss_latency",
2014 "average overall miss latency"),
2015 overallAvgMissLatency(this, "overall_avg_miss_latency",
2016 "average overall miss latency"),
2017 blocked_cycles(this, "blocked_cycles",
2018 "number of cycles access was blocked"),
2019 blocked_causes(this, "blocked", "number of cycles access was blocked"),
2020 avg_blocked(this, "avg_blocked_cycles",
2021 "average number of cycles each access was blocked"),
2022 unusedPrefetches(this, "unused_prefetches",
2023 "number of HardPF blocks evicted w/o reference"),
2024 writebacks(this, "writebacks", "number of writebacks"),
2025 demandMshrHits(this, "demand_mshr_hits",
2026 "number of demand (read+write) MSHR hits"),
2027 overallMshrHits(this, "overall_mshr_hits",
2028 "number of overall MSHR hits"),
2029 demandMshrMisses(this, "demand_mshr_misses",
2030 "number of demand (read+write) MSHR misses"),
2031 overallMshrMisses(this, "overall_mshr_misses",
2032 "number of overall MSHR misses"),
2033 overallMshrUncacheable(this, "overall_mshr_uncacheable_misses",
2034 "number of overall MSHR uncacheable misses"),
2035 demandMshrMissLatency(this, "demand_mshr_miss_latency",
2036 "number of demand (read+write) MSHR miss cycles"),
2037 overallMshrMissLatency(this, "overall_mshr_miss_latency",
2038 "number of overall MSHR miss cycles"),
2039 overallMshrUncacheableLatency(this, "overall_mshr_uncacheable_latency",
2040 "number of overall MSHR uncacheable cycles"),
2041 demandMshrMissRate(this, "demand_mshr_miss_rate",
2042 "mshr miss rate for demand accesses"),
2043 overallMshrMissRate(this, "overall_mshr_miss_rate",
2044 "mshr miss rate for overall accesses"),
2045 demandAvgMshrMissLatency(this, "demand_avg_mshr_miss_latency",
2046 "average overall mshr miss latency"),
2047 overallAvgMshrMissLatency(this, "overall_avg_mshr_miss_latency",
2048 "average overall mshr miss latency"),
2049 overallAvgMshrUncacheableLatency(
2050 this, "overall_avg_mshr_uncacheable_latency",
2051 "average overall mshr uncacheable latency"),
2052 replacements(this, "replacements", "number of replacements"),
2053
2054 dataExpansions(this, "data_expansions", "number of data expansions"),
2055 cmd(MemCmd::NUM_MEM_CMDS)
2056 {
2057 for (int idx = 0; idx < MemCmd::NUM_MEM_CMDS; ++idx)
2058 cmd[idx].reset(new CacheCmdStats(c, MemCmd(idx).toString()));
2059 }
2060
2061 void
2062 BaseCache::CacheStats::regStats()
2063 {
2064 using namespace Stats;
2065
2066 Stats::Group::regStats();
2067
2068 System *system = cache.system;
2069 const auto max_masters = system->maxMasters();
2070
2071 for (auto &cs : cmd)
2072 cs->regStatsFromParent();
2073
2074 // These macros make it easier to sum the right subset of commands and
2075 // to change the subset of commands that are considered "demand" vs
2076 // "non-demand"
2077 #define SUM_DEMAND(s) \
2078 (cmd[MemCmd::ReadReq]->s + cmd[MemCmd::WriteReq]->s + \
2079 cmd[MemCmd::WriteLineReq]->s + cmd[MemCmd::ReadExReq]->s + \
2080 cmd[MemCmd::ReadCleanReq]->s + cmd[MemCmd::ReadSharedReq]->s)
2081
2082 // should writebacks be included here? prior code was inconsistent...
2083 #define SUM_NON_DEMAND(s) \
2084 (cmd[MemCmd::SoftPFReq]->s + cmd[MemCmd::HardPFReq]->s + \
2085 cmd[MemCmd::SoftPFExReq]->s)
2086
2087 demandHits.flags(total | nozero | nonan);
2088 demandHits = SUM_DEMAND(hits);
2089 for (int i = 0; i < max_masters; i++) {
2090 demandHits.subname(i, system->getMasterName(i));
2091 }
2092
2093 overallHits.flags(total | nozero | nonan);
2094 overallHits = demandHits + SUM_NON_DEMAND(hits);
2095 for (int i = 0; i < max_masters; i++) {
2096 overallHits.subname(i, system->getMasterName(i));
2097 }
2098
2099 demandMisses.flags(total | nozero | nonan);
2100 demandMisses = SUM_DEMAND(misses);
2101 for (int i = 0; i < max_masters; i++) {
2102 demandMisses.subname(i, system->getMasterName(i));
2103 }
2104
2105 overallMisses.flags(total | nozero | nonan);
2106 overallMisses = demandMisses + SUM_NON_DEMAND(misses);
2107 for (int i = 0; i < max_masters; i++) {
2108 overallMisses.subname(i, system->getMasterName(i));
2109 }
2110
2111 demandMissLatency.flags(total | nozero | nonan);
2112 demandMissLatency = SUM_DEMAND(missLatency);
2113 for (int i = 0; i < max_masters; i++) {
2114 demandMissLatency.subname(i, system->getMasterName(i));
2115 }
2116
2117 overallMissLatency.flags(total | nozero | nonan);
2118 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
2119 for (int i = 0; i < max_masters; i++) {
2120 overallMissLatency.subname(i, system->getMasterName(i));
2121 }
2122
2123 demandAccesses.flags(total | nozero | nonan);
2124 demandAccesses = demandHits + demandMisses;
2125 for (int i = 0; i < max_masters; i++) {
2126 demandAccesses.subname(i, system->getMasterName(i));
2127 }
2128
2129 overallAccesses.flags(total | nozero | nonan);
2130 overallAccesses = overallHits + overallMisses;
2131 for (int i = 0; i < max_masters; i++) {
2132 overallAccesses.subname(i, system->getMasterName(i));
2133 }
2134
2135 demandMissRate.flags(total | nozero | nonan);
2136 demandMissRate = demandMisses / demandAccesses;
2137 for (int i = 0; i < max_masters; i++) {
2138 demandMissRate.subname(i, system->getMasterName(i));
2139 }
2140
2141 overallMissRate.flags(total | nozero | nonan);
2142 overallMissRate = overallMisses / overallAccesses;
2143 for (int i = 0; i < max_masters; i++) {
2144 overallMissRate.subname(i, system->getMasterName(i));
2145 }
2146
2147 demandAvgMissLatency.flags(total | nozero | nonan);
2148 demandAvgMissLatency = demandMissLatency / demandMisses;
2149 for (int i = 0; i < max_masters; i++) {
2150 demandAvgMissLatency.subname(i, system->getMasterName(i));
2151 }
2152
2153 overallAvgMissLatency.flags(total | nozero | nonan);
2154 overallAvgMissLatency = overallMissLatency / overallMisses;
2155 for (int i = 0; i < max_masters; i++) {
2156 overallAvgMissLatency.subname(i, system->getMasterName(i));
2157 }
2158
2159 blocked_cycles.init(NUM_BLOCKED_CAUSES);
2160 blocked_cycles
2161 .subname(Blocked_NoMSHRs, "no_mshrs")
2162 .subname(Blocked_NoTargets, "no_targets")
2163 ;
2164
2165
2166 blocked_causes.init(NUM_BLOCKED_CAUSES);
2167 blocked_causes
2168 .subname(Blocked_NoMSHRs, "no_mshrs")
2169 .subname(Blocked_NoTargets, "no_targets")
2170 ;
2171
2172 avg_blocked
2173 .subname(Blocked_NoMSHRs, "no_mshrs")
2174 .subname(Blocked_NoTargets, "no_targets")
2175 ;
2176 avg_blocked = blocked_cycles / blocked_causes;
2177
2178 unusedPrefetches.flags(nozero);
2179
2180 writebacks
2181 .init(max_masters)
2182 .flags(total | nozero | nonan)
2183 ;
2184 for (int i = 0; i < max_masters; i++) {
2185 writebacks.subname(i, system->getMasterName(i));
2186 }
2187
2188 demandMshrHits.flags(total | nozero | nonan);
2189 demandMshrHits = SUM_DEMAND(mshr_hits);
2190 for (int i = 0; i < max_masters; i++) {
2191 demandMshrHits.subname(i, system->getMasterName(i));
2192 }
2193
2194 overallMshrHits.flags(total | nozero | nonan);
2195 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
2196 for (int i = 0; i < max_masters; i++) {
2197 overallMshrHits.subname(i, system->getMasterName(i));
2198 }
2199
2200 demandMshrMisses.flags(total | nozero | nonan);
2201 demandMshrMisses = SUM_DEMAND(mshr_misses);
2202 for (int i = 0; i < max_masters; i++) {
2203 demandMshrMisses.subname(i, system->getMasterName(i));
2204 }
2205
2206 overallMshrMisses.flags(total | nozero | nonan);
2207 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
2208 for (int i = 0; i < max_masters; i++) {
2209 overallMshrMisses.subname(i, system->getMasterName(i));
2210 }
2211
2212 demandMshrMissLatency.flags(total | nozero | nonan);
2213 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
2214 for (int i = 0; i < max_masters; i++) {
2215 demandMshrMissLatency.subname(i, system->getMasterName(i));
2216 }
2217
2218 overallMshrMissLatency.flags(total | nozero | nonan);
2219 overallMshrMissLatency =
2220 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
2221 for (int i = 0; i < max_masters; i++) {
2222 overallMshrMissLatency.subname(i, system->getMasterName(i));
2223 }
2224
2225 overallMshrUncacheable.flags(total | nozero | nonan);
2226 overallMshrUncacheable =
2227 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
2228 for (int i = 0; i < max_masters; i++) {
2229 overallMshrUncacheable.subname(i, system->getMasterName(i));
2230 }
2231
2232
2233 overallMshrUncacheableLatency.flags(total | nozero | nonan);
2234 overallMshrUncacheableLatency =
2235 SUM_DEMAND(mshr_uncacheable_lat) +
2236 SUM_NON_DEMAND(mshr_uncacheable_lat);
2237 for (int i = 0; i < max_masters; i++) {
2238 overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
2239 }
2240
2241 demandMshrMissRate.flags(total | nozero | nonan);
2242 demandMshrMissRate = demandMshrMisses / demandAccesses;
2243 for (int i = 0; i < max_masters; i++) {
2244 demandMshrMissRate.subname(i, system->getMasterName(i));
2245 }
2246
2247 overallMshrMissRate.flags(total | nozero | nonan);
2248 overallMshrMissRate = overallMshrMisses / overallAccesses;
2249 for (int i = 0; i < max_masters; i++) {
2250 overallMshrMissRate.subname(i, system->getMasterName(i));
2251 }
2252
2253 demandAvgMshrMissLatency.flags(total | nozero | nonan);
2254 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
2255 for (int i = 0; i < max_masters; i++) {
2256 demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
2257 }
2258
2259 overallAvgMshrMissLatency.flags(total | nozero | nonan);
2260 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
2261 for (int i = 0; i < max_masters; i++) {
2262 overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
2263 }
2264
2265 overallAvgMshrUncacheableLatency.flags(total | nozero | nonan);
2266 overallAvgMshrUncacheableLatency =
2267 overallMshrUncacheableLatency / overallMshrUncacheable;
2268 for (int i = 0; i < max_masters; i++) {
2269 overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
2270 }
2271
2272 dataExpansions.flags(nozero | nonan);
2273 }
2274
2275 void
2276 BaseCache::regProbePoints()
2277 {
2278 ppHit = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Hit");
2279 ppMiss = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Miss");
2280 ppFill = new ProbePointArg<PacketPtr>(this->getProbeManager(), "Fill");
2281 }
2282
2283 ///////////////
2284 //
2285 // CpuSidePort
2286 //
2287 ///////////////
2288 bool
2289 BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
2290 {
2291 // Snoops shouldn't happen when bypassing caches
2292 assert(!cache->system->bypassCaches());
2293
2294 assert(pkt->isResponse());
2295
2296 // Express snoop responses from master to slave, e.g., from L1 to L2
2297 cache->recvTimingSnoopResp(pkt);
2298 return true;
2299 }
2300
2301
2302 bool
2303 BaseCache::CpuSidePort::tryTiming(PacketPtr pkt)
2304 {
2305 if (cache->system->bypassCaches() || pkt->isExpressSnoop()) {
2306 // always let express snoop packets through even if blocked
2307 return true;
2308 } else if (blocked || mustSendRetry) {
2309 // either already committed to send a retry, or blocked
2310 mustSendRetry = true;
2311 return false;
2312 }
2313 mustSendRetry = false;
2314 return true;
2315 }
2316
2317 bool
2318 BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt)
2319 {
2320 assert(pkt->isRequest());
2321
2322 if (cache->system->bypassCaches()) {
2323 // Just forward the packet if caches are disabled.
2324 // @todo This should really enqueue the packet rather
2325 bool M5_VAR_USED success = cache->memSidePort.sendTimingReq(pkt);
2326 assert(success);
2327 return true;
2328 } else if (tryTiming(pkt)) {
2329 cache->recvTimingReq(pkt);
2330 return true;
2331 }
2332 return false;
2333 }
2334
2335 Tick
2336 BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt)
2337 {
2338 if (cache->system->bypassCaches()) {
2339 // Forward the request if the system is in cache bypass mode.
2340 return cache->memSidePort.sendAtomic(pkt);
2341 } else {
2342 return cache->recvAtomic(pkt);
2343 }
2344 }
2345
2346 void
2347 BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt)
2348 {
2349 if (cache->system->bypassCaches()) {
2350 // The cache should be flushed if we are in cache bypass mode,
2351 // so we don't need to check if we need to update anything.
2352 cache->memSidePort.sendFunctional(pkt);
2353 return;
2354 }
2355
2356 // functional request
2357 cache->functionalAccess(pkt, true);
2358 }
2359
2360 AddrRangeList
2361 BaseCache::CpuSidePort::getAddrRanges() const
2362 {
2363 return cache->getAddrRanges();
2364 }
2365
2366
2367 BaseCache::
2368 CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
2369 const std::string &_label)
2370 : CacheSlavePort(_name, _cache, _label), cache(_cache)
2371 {
2372 }
2373
2374 ///////////////
2375 //
2376 // MemSidePort
2377 //
2378 ///////////////
2379 bool
2380 BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt)
2381 {
2382 cache->recvTimingResp(pkt);
2383 return true;
2384 }
2385
2386 // Express snooping requests to memside port
2387 void
2388 BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2389 {
2390 // Snoops shouldn't happen when bypassing caches
2391 assert(!cache->system->bypassCaches());
2392
2393 // handle snooping requests
2394 cache->recvTimingSnoopReq(pkt);
2395 }
2396
2397 Tick
2398 BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2399 {
2400 // Snoops shouldn't happen when bypassing caches
2401 assert(!cache->system->bypassCaches());
2402
2403 return cache->recvAtomicSnoop(pkt);
2404 }
2405
2406 void
2407 BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2408 {
2409 // Snoops shouldn't happen when bypassing caches
2410 assert(!cache->system->bypassCaches());
2411
2412 // functional snoop (note that in contrast to atomic we don't have
2413 // a specific functionalSnoop method, as they have the same
2414 // behaviour regardless)
2415 cache->functionalAccess(pkt, false);
2416 }
2417
2418 void
2419 BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2420 {
2421 // sanity check
2422 assert(!waitingOnRetry);
2423
2424 // there should never be any deferred request packets in the
2425 // queue, instead we resly on the cache to provide the packets
2426 // from the MSHR queue or write queue
2427 assert(deferredPacketReadyTime() == MaxTick);
2428
2429 // check for request packets (requests & writebacks)
2430 QueueEntry* entry = cache.getNextQueueEntry();
2431
2432 if (!entry) {
2433 // can happen if e.g. we attempt a writeback and fail, but
2434 // before the retry, the writeback is eliminated because
2435 // we snoop another cache's ReadEx.
2436 } else {
2437 // let our snoop responses go first if there are responses to
2438 // the same addresses
2439 if (checkConflictingSnoop(entry->getTarget()->pkt)) {
2440 return;
2441 }
2442 waitingOnRetry = entry->sendPacket(cache);
2443 }
2444
2445 // if we succeeded and are not waiting for a retry, schedule the
2446 // next send considering when the next queue is ready, note that
2447 // snoop responses have their own packet queue and thus schedule
2448 // their own events
2449 if (!waitingOnRetry) {
2450 schedSendEvent(cache.nextQueueReadyTime());
2451 }
2452 }
2453
2454 BaseCache::MemSidePort::MemSidePort(const std::string &_name,
2455 BaseCache *_cache,
2456 const std::string &_label)
2457 : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2458 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2459 _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
2460 {
2461 }
2462
2463 void
2464 WriteAllocator::updateMode(Addr write_addr, unsigned write_size,
2465 Addr blk_addr)
2466 {
2467 // check if we are continuing where the last write ended
2468 if (nextAddr == write_addr) {
2469 delayCtr[blk_addr] = delayThreshold;
2470 // stop if we have already saturated
2471 if (mode != WriteMode::NO_ALLOCATE) {
2472 byteCount += write_size;
2473 // switch to streaming mode if we have passed the lower
2474 // threshold
2475 if (mode == WriteMode::ALLOCATE &&
2476 byteCount > coalesceLimit) {
2477 mode = WriteMode::COALESCE;
2478 DPRINTF(Cache, "Switched to write coalescing\n");
2479 } else if (mode == WriteMode::COALESCE &&
2480 byteCount > noAllocateLimit) {
2481 // and continue and switch to non-allocating mode if we
2482 // pass the upper threshold
2483 mode = WriteMode::NO_ALLOCATE;
2484 DPRINTF(Cache, "Switched to write-no-allocate\n");
2485 }
2486 }
2487 } else {
2488 // we did not see a write matching the previous one, start
2489 // over again
2490 byteCount = write_size;
2491 mode = WriteMode::ALLOCATE;
2492 resetDelay(blk_addr);
2493 }
2494 nextAddr = write_addr + write_size;
2495 }
2496
2497 WriteAllocator*
2498 WriteAllocatorParams::create()
2499 {
2500 return new WriteAllocator(this);
2501 }