2 * Copyright (c) 2012-2013, 2018-2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
46 * Definition of BaseCache functions.
49 #include "mem/cache/base.hh"
51 #include "base/compiler.hh"
52 #include "base/logging.hh"
53 #include "debug/Cache.hh"
54 #include "debug/CacheComp.hh"
55 #include "debug/CachePort.hh"
56 #include "debug/CacheRepl.hh"
57 #include "debug/CacheVerbose.hh"
58 #include "mem/cache/compressors/base.hh"
59 #include "mem/cache/mshr.hh"
60 #include "mem/cache/prefetch/base.hh"
61 #include "mem/cache/queue_entry.hh"
62 #include "mem/cache/tags/super_blk.hh"
63 #include "params/BaseCache.hh"
64 #include "params/WriteAllocator.hh"
65 #include "sim/core.hh"
69 BaseCache::CacheSlavePort::CacheSlavePort(const std::string
&_name
,
71 const std::string
&_label
)
72 : QueuedSlavePort(_name
, _cache
, queue
),
73 queue(*_cache
, *this, true, _label
),
74 blocked(false), mustSendRetry(false),
75 sendRetryEvent([this]{ processSendRetry(); }, _name
)
79 BaseCache::BaseCache(const BaseCacheParams
*p
, unsigned blk_size
)
81 cpuSidePort (p
->name
+ ".cpu_side", this, "CpuSidePort"),
82 memSidePort(p
->name
+ ".mem_side", this, "MemSidePort"),
83 mshrQueue("MSHRs", p
->mshrs
, 0, p
->demand_mshr_reserve
), // see below
84 writeBuffer("write buffer", p
->write_buffers
, p
->mshrs
), // see below
86 compressor(p
->compressor
),
87 prefetcher(p
->prefetcher
),
88 writeAllocator(p
->write_allocator
),
89 writebackClean(p
->writeback_clean
),
90 tempBlockWriteback(nullptr),
91 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
93 EventBase::Delayed_Writeback_Pri
),
95 lookupLatency(p
->tag_latency
),
96 dataLatency(p
->data_latency
),
97 forwardLatency(p
->tag_latency
),
98 fillLatency(p
->data_latency
),
99 responseLatency(p
->response_latency
),
100 sequentialAccess(p
->sequential_access
),
101 numTarget(p
->tgts_per_mshr
),
103 clusivity(p
->clusivity
),
104 isReadOnly(p
->is_read_only
),
107 noTargetMSHR(nullptr),
108 missCount(p
->max_miss_count
),
109 addrRanges(p
->addr_ranges
.begin(), p
->addr_ranges
.end()),
113 // the MSHR queue has no reserve entries as we check the MSHR
114 // queue on every single allocation, whereas the write queue has
115 // as many reserve entries as we have MSHRs, since every MSHR may
116 // eventually require a writeback, and we do not check the write
117 // buffer before committing to an MSHR
119 // forward snoops is overridden in init() once we can query
120 // whether the connected master is actually snooping or not
122 tempBlock
= new TempCacheBlk(blkSize
);
126 prefetcher
->setCache(this);
129 BaseCache::~BaseCache()
135 BaseCache::CacheSlavePort::setBlocked()
138 DPRINTF(CachePort
, "Port is blocking new requests\n");
140 // if we already scheduled a retry in this cycle, but it has not yet
141 // happened, cancel it
142 if (sendRetryEvent
.scheduled()) {
143 owner
.deschedule(sendRetryEvent
);
144 DPRINTF(CachePort
, "Port descheduled retry\n");
145 mustSendRetry
= true;
150 BaseCache::CacheSlavePort::clearBlocked()
153 DPRINTF(CachePort
, "Port is accepting new requests\n");
156 // @TODO: need to find a better time (next cycle?)
157 owner
.schedule(sendRetryEvent
, curTick() + 1);
162 BaseCache::CacheSlavePort::processSendRetry()
164 DPRINTF(CachePort
, "Port is sending retry\n");
166 // reset the flag and call retry
167 mustSendRetry
= false;
172 BaseCache::regenerateBlkAddr(CacheBlk
* blk
)
174 if (blk
!= tempBlock
) {
175 return tags
->regenerateBlkAddr(blk
);
177 return tempBlock
->getAddr();
184 if (!cpuSidePort
.isConnected() || !memSidePort
.isConnected())
185 fatal("Cache ports on %s are not connected\n", name());
186 cpuSidePort
.sendRangeChange();
187 forwardSnoops
= cpuSidePort
.isSnooping();
191 BaseCache::getPort(const std::string
&if_name
, PortID idx
)
193 if (if_name
== "mem_side") {
195 } else if (if_name
== "cpu_side") {
198 return ClockedObject::getPort(if_name
, idx
);
203 BaseCache::inRange(Addr addr
) const
205 for (const auto& r
: addrRanges
) {
206 if (r
.contains(addr
)) {
214 BaseCache::handleTimingReqHit(PacketPtr pkt
, CacheBlk
*blk
, Tick request_time
)
216 if (pkt
->needsResponse()) {
217 // These delays should have been consumed by now
218 assert(pkt
->headerDelay
== 0);
219 assert(pkt
->payloadDelay
== 0);
221 pkt
->makeTimingResponse();
223 // In this case we are considering request_time that takes
224 // into account the delay of the xbar, if any, and just
225 // lat, neglecting responseLatency, modelling hit latency
226 // just as the value of lat overriden by access(), which calls
227 // the calculateAccessLatency() function.
228 cpuSidePort
.schedTimingResp(pkt
, request_time
);
230 DPRINTF(Cache
, "%s satisfied %s, no response needed\n", __func__
,
233 // queue the packet for deletion, as the sending cache is
234 // still relying on it; if the block is found in access(),
235 // CleanEvict and Writeback messages will be deleted
237 pendingDelete
.reset(pkt
);
242 BaseCache::handleTimingReqMiss(PacketPtr pkt
, MSHR
*mshr
, CacheBlk
*blk
,
243 Tick forward_time
, Tick request_time
)
245 if (writeAllocator
&&
246 pkt
&& pkt
->isWrite() && !pkt
->req
->isUncacheable()) {
247 writeAllocator
->updateMode(pkt
->getAddr(), pkt
->getSize(),
248 pkt
->getBlockAddr(blkSize
));
253 /// @note writebacks will be checked in getNextMSHR()
254 /// for any conflicting requests to the same block
256 //@todo remove hw_pf here
258 // Coalesce unless it was a software prefetch (see above).
260 assert(!pkt
->isWriteback());
261 // CleanEvicts corresponding to blocks which have
262 // outstanding requests in MSHRs are simply sunk here
263 if (pkt
->cmd
== MemCmd::CleanEvict
) {
264 pendingDelete
.reset(pkt
);
265 } else if (pkt
->cmd
== MemCmd::WriteClean
) {
266 // A WriteClean should never coalesce with any
267 // outstanding cache maintenance requests.
269 // We use forward_time here because there is an
270 // uncached memory write, forwarded to WriteBuffer.
271 allocateWriteBuffer(pkt
, forward_time
);
273 DPRINTF(Cache
, "%s coalescing MSHR for %s\n", __func__
,
276 assert(pkt
->req
->masterId() < system
->maxMasters());
277 stats
.cmdStats(pkt
).mshr_hits
[pkt
->req
->masterId()]++;
279 // We use forward_time here because it is the same
280 // considering new targets. We have multiple
281 // requests for the same address here. It
282 // specifies the latency to allocate an internal
283 // buffer and to schedule an event to the queued
284 // port and also takes into account the additional
285 // delay of the xbar.
286 mshr
->allocateTarget(pkt
, forward_time
, order
++,
287 allocOnFill(pkt
->cmd
));
288 if (mshr
->getNumTargets() == numTarget
) {
290 setBlocked(Blocked_NoTargets
);
291 // need to be careful with this... if this mshr isn't
292 // ready yet (i.e. time > curTick()), we don't want to
293 // move it ahead of mshrs that are ready
294 // mshrQueue.moveToFront(mshr);
300 assert(pkt
->req
->masterId() < system
->maxMasters());
301 stats
.cmdStats(pkt
).mshr_misses
[pkt
->req
->masterId()]++;
303 if (pkt
->isEviction() || pkt
->cmd
== MemCmd::WriteClean
) {
304 // We use forward_time here because there is an
305 // writeback or writeclean, forwarded to WriteBuffer.
306 allocateWriteBuffer(pkt
, forward_time
);
308 if (blk
&& blk
->isValid()) {
309 // If we have a write miss to a valid block, we
310 // need to mark the block non-readable. Otherwise
311 // if we allow reads while there's an outstanding
312 // write miss, the read could return stale data
313 // out of the cache block... a more aggressive
314 // system could detect the overlap (if any) and
315 // forward data out of the MSHRs, but we don't do
316 // that yet. Note that we do need to leave the
317 // block valid so that it stays in the cache, in
318 // case we get an upgrade response (and hence no
319 // new data) when the write miss completes.
320 // As long as CPUs do proper store/load forwarding
321 // internally, and have a sufficiently weak memory
322 // model, this is probably unnecessary, but at some
323 // point it must have seemed like we needed it...
324 assert((pkt
->needsWritable() && !blk
->isWritable()) ||
325 pkt
->req
->isCacheMaintenance());
326 blk
->status
&= ~BlkReadable
;
328 // Here we are using forward_time, modelling the latency of
329 // a miss (outbound) just as forwardLatency, neglecting the
330 // lookupLatency component.
331 allocateMissBuffer(pkt
, forward_time
);
337 BaseCache::recvTimingReq(PacketPtr pkt
)
339 // anything that is merely forwarded pays for the forward latency and
340 // the delay provided by the crossbar
341 Tick forward_time
= clockEdge(forwardLatency
) + pkt
->headerDelay
;
344 CacheBlk
*blk
= nullptr;
345 bool satisfied
= false;
347 PacketList writebacks
;
348 // Note that lat is passed by reference here. The function
349 // access() will set the lat value.
350 satisfied
= access(pkt
, blk
, lat
, writebacks
);
352 // After the evicted blocks are selected, they must be forwarded
353 // to the write buffer to ensure they logically precede anything
355 doWritebacks(writebacks
, clockEdge(lat
+ forwardLatency
));
358 // Here we charge the headerDelay that takes into account the latencies
359 // of the bus, if the packet comes from it.
360 // The latency charged is just the value set by the access() function.
361 // In case of a hit we are neglecting response latency.
362 // In case of a miss we are neglecting forward latency.
363 Tick request_time
= clockEdge(lat
);
364 // Here we reset the timing of the packet.
365 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
368 // notify before anything else as later handleTimingReqHit might turn
369 // the packet in a response
372 if (prefetcher
&& blk
&& blk
->wasPrefetched()) {
373 blk
->status
&= ~BlkHWPrefetched
;
376 handleTimingReqHit(pkt
, blk
, request_time
);
378 handleTimingReqMiss(pkt
, blk
, forward_time
, request_time
);
384 // track time of availability of next prefetch, if any
385 Tick next_pf_time
= prefetcher
->nextPrefetchReadyTime();
386 if (next_pf_time
!= MaxTick
) {
387 schedMemSideSendEvent(next_pf_time
);
393 BaseCache::handleUncacheableWriteResp(PacketPtr pkt
)
395 Tick completion_time
= clockEdge(responseLatency
) +
396 pkt
->headerDelay
+ pkt
->payloadDelay
;
398 // Reset the bus additional time as it is now accounted for
399 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
401 cpuSidePort
.schedTimingResp(pkt
, completion_time
);
405 BaseCache::recvTimingResp(PacketPtr pkt
)
407 assert(pkt
->isResponse());
409 // all header delay should be paid for by the crossbar, unless
410 // this is a prefetch response from above
411 panic_if(pkt
->headerDelay
!= 0 && pkt
->cmd
!= MemCmd::HardPFResp
,
412 "%s saw a non-zero packet delay\n", name());
414 const bool is_error
= pkt
->isError();
417 DPRINTF(Cache
, "%s: Cache received %s with error\n", __func__
,
421 DPRINTF(Cache
, "%s: Handling response %s\n", __func__
,
424 // if this is a write, we should be looking at an uncacheable
426 if (pkt
->isWrite()) {
427 assert(pkt
->req
->isUncacheable());
428 handleUncacheableWriteResp(pkt
);
432 // we have dealt with any (uncacheable) writes above, from here on
433 // we know we are dealing with an MSHR due to a miss or a prefetch
434 MSHR
*mshr
= dynamic_cast<MSHR
*>(pkt
->popSenderState());
437 if (mshr
== noTargetMSHR
) {
438 // we always clear at least one target
439 clearBlocked(Blocked_NoTargets
);
440 noTargetMSHR
= nullptr;
443 // Initial target is used just for stats
444 const QueueEntry::Target
*initial_tgt
= mshr
->getTarget();
445 const Tick miss_latency
= curTick() - initial_tgt
->recvTime
;
446 if (pkt
->req
->isUncacheable()) {
447 assert(pkt
->req
->masterId() < system
->maxMasters());
448 stats
.cmdStats(initial_tgt
->pkt
)
449 .mshr_uncacheable_lat
[pkt
->req
->masterId()] += miss_latency
;
451 assert(pkt
->req
->masterId() < system
->maxMasters());
452 stats
.cmdStats(initial_tgt
->pkt
)
453 .mshr_miss_latency
[pkt
->req
->masterId()] += miss_latency
;
456 PacketList writebacks
;
458 bool is_fill
= !mshr
->isForward
&&
459 (pkt
->isRead() || pkt
->cmd
== MemCmd::UpgradeResp
||
460 mshr
->wasWholeLineWrite
);
462 // make sure that if the mshr was due to a whole line write then
463 // the response is an invalidation
464 assert(!mshr
->wasWholeLineWrite
|| pkt
->isInvalidate());
466 CacheBlk
*blk
= tags
->findBlock(pkt
->getAddr(), pkt
->isSecure());
468 if (is_fill
&& !is_error
) {
469 DPRINTF(Cache
, "Block for addr %#llx being updated in Cache\n",
472 const bool allocate
= (writeAllocator
&& mshr
->wasWholeLineWrite
) ?
473 writeAllocator
->allocate() : mshr
->allocOnFill();
474 blk
= handleFill(pkt
, blk
, writebacks
, allocate
);
475 assert(blk
!= nullptr);
479 if (blk
&& blk
->isValid() && pkt
->isClean() && !pkt
->isInvalidate()) {
480 // The block was marked not readable while there was a pending
481 // cache maintenance operation, restore its flag.
482 blk
->status
|= BlkReadable
;
484 // This was a cache clean operation (without invalidate)
485 // and we have a copy of the block already. Since there
486 // is no invalidation, we can promote targets that don't
487 // require a writable copy
488 mshr
->promoteReadable();
491 if (blk
&& blk
->isWritable() && !pkt
->req
->isCacheInvalidate()) {
492 // If at this point the referenced block is writable and the
493 // response is not a cache invalidate, we promote targets that
494 // were deferred as we couldn't guarrantee a writable copy
495 mshr
->promoteWritable();
498 serviceMSHRTargets(mshr
, pkt
, blk
);
500 if (mshr
->promoteDeferredTargets()) {
501 // avoid later read getting stale data while write miss is
502 // outstanding.. see comment in timingAccess()
504 blk
->status
&= ~BlkReadable
;
506 mshrQueue
.markPending(mshr
);
507 schedMemSideSendEvent(clockEdge() + pkt
->payloadDelay
);
509 // while we deallocate an mshr from the queue we still have to
510 // check the isFull condition before and after as we might
511 // have been using the reserved entries already
512 const bool was_full
= mshrQueue
.isFull();
513 mshrQueue
.deallocate(mshr
);
514 if (was_full
&& !mshrQueue
.isFull()) {
515 clearBlocked(Blocked_NoMSHRs
);
518 // Request the bus for a prefetch if this deallocation freed enough
519 // MSHRs for a prefetch to take place
520 if (prefetcher
&& mshrQueue
.canPrefetch()) {
521 Tick next_pf_time
= std::max(prefetcher
->nextPrefetchReadyTime(),
523 if (next_pf_time
!= MaxTick
)
524 schedMemSideSendEvent(next_pf_time
);
528 // if we used temp block, check to see if its valid and then clear it out
529 if (blk
== tempBlock
&& tempBlock
->isValid()) {
530 evictBlock(blk
, writebacks
);
533 const Tick forward_time
= clockEdge(forwardLatency
) + pkt
->headerDelay
;
534 // copy writebacks to write buffer
535 doWritebacks(writebacks
, forward_time
);
537 DPRINTF(CacheVerbose
, "%s: Leaving with %s\n", __func__
, pkt
->print());
543 BaseCache::recvAtomic(PacketPtr pkt
)
545 // should assert here that there are no outstanding MSHRs or
546 // writebacks... that would mean that someone used an atomic
547 // access in timing mode
549 // We use lookupLatency here because it is used to specify the latency
551 Cycles lat
= lookupLatency
;
553 CacheBlk
*blk
= nullptr;
554 PacketList writebacks
;
555 bool satisfied
= access(pkt
, blk
, lat
, writebacks
);
557 if (pkt
->isClean() && blk
&& blk
->isDirty()) {
558 // A cache clean opearation is looking for a dirty
559 // block. If a dirty block is encountered a WriteClean
560 // will update any copies to the path to the memory
561 // until the point of reference.
562 DPRINTF(CacheVerbose
, "%s: packet %s found block: %s\n",
563 __func__
, pkt
->print(), blk
->print());
564 PacketPtr wb_pkt
= writecleanBlk(blk
, pkt
->req
->getDest(), pkt
->id
);
565 writebacks
.push_back(wb_pkt
);
569 // handle writebacks resulting from the access here to ensure they
570 // logically precede anything happening below
571 doWritebacksAtomic(writebacks
);
572 assert(writebacks
.empty());
575 lat
+= handleAtomicReqMiss(pkt
, blk
, writebacks
);
578 // Note that we don't invoke the prefetcher at all in atomic mode.
579 // It's not clear how to do it properly, particularly for
580 // prefetchers that aggressively generate prefetch candidates and
581 // rely on bandwidth contention to throttle them; these will tend
582 // to pollute the cache in atomic mode since there is no bandwidth
583 // contention. If we ever do want to enable prefetching in atomic
584 // mode, though, this is the place to do it... see timingAccess()
585 // for an example (though we'd want to issue the prefetch(es)
586 // immediately rather than calling requestMemSideBus() as we do
589 // do any writebacks resulting from the response handling
590 doWritebacksAtomic(writebacks
);
592 // if we used temp block, check to see if its valid and if so
593 // clear it out, but only do so after the call to recvAtomic is
594 // finished so that any downstream observers (such as a snoop
595 // filter), first see the fill, and only then see the eviction
596 if (blk
== tempBlock
&& tempBlock
->isValid()) {
597 // the atomic CPU calls recvAtomic for fetch and load/store
598 // sequentuially, and we may already have a tempBlock
599 // writeback from the fetch that we have not yet sent
600 if (tempBlockWriteback
) {
601 // if that is the case, write the prevoius one back, and
602 // do not schedule any new event
603 writebackTempBlockAtomic();
605 // the writeback/clean eviction happens after the call to
606 // recvAtomic has finished (but before any successive
607 // calls), so that the response handling from the fill is
608 // allowed to happen first
609 schedule(writebackTempBlockAtomicEvent
, curTick());
612 tempBlockWriteback
= evictBlock(blk
);
615 if (pkt
->needsResponse()) {
616 pkt
->makeAtomicResponse();
619 return lat
* clockPeriod();
623 BaseCache::functionalAccess(PacketPtr pkt
, bool from_cpu_side
)
625 Addr blk_addr
= pkt
->getBlockAddr(blkSize
);
626 bool is_secure
= pkt
->isSecure();
627 CacheBlk
*blk
= tags
->findBlock(pkt
->getAddr(), is_secure
);
628 MSHR
*mshr
= mshrQueue
.findMatch(blk_addr
, is_secure
);
630 pkt
->pushLabel(name());
632 CacheBlkPrintWrapper
cbpw(blk
);
634 // Note that just because an L2/L3 has valid data doesn't mean an
635 // L1 doesn't have a more up-to-date modified copy that still
636 // needs to be found. As a result we always update the request if
637 // we have it, but only declare it satisfied if we are the owner.
639 // see if we have data at all (owned or otherwise)
640 bool have_data
= blk
&& blk
->isValid()
641 && pkt
->trySatisfyFunctional(&cbpw
, blk_addr
, is_secure
, blkSize
,
644 // data we have is dirty if marked as such or if we have an
645 // in-service MSHR that is pending a modified line
647 have_data
&& (blk
->isDirty() ||
648 (mshr
&& mshr
->inService
&& mshr
->isPendingModified()));
650 bool done
= have_dirty
||
651 cpuSidePort
.trySatisfyFunctional(pkt
) ||
652 mshrQueue
.trySatisfyFunctional(pkt
) ||
653 writeBuffer
.trySatisfyFunctional(pkt
) ||
654 memSidePort
.trySatisfyFunctional(pkt
);
656 DPRINTF(CacheVerbose
, "%s: %s %s%s%s\n", __func__
, pkt
->print(),
657 (blk
&& blk
->isValid()) ? "valid " : "",
658 have_data
? "data " : "", done
? "done " : "");
660 // We're leaving the cache, so pop cache->name() label
666 // if it came as a request from the CPU side then make sure it
667 // continues towards the memory side
669 memSidePort
.sendFunctional(pkt
);
670 } else if (cpuSidePort
.isSnooping()) {
671 // if it came from the memory side, it must be a snoop request
672 // and we should only forward it if we are forwarding snoops
673 cpuSidePort
.sendFunctionalSnoop(pkt
);
680 BaseCache::cmpAndSwap(CacheBlk
*blk
, PacketPtr pkt
)
682 assert(pkt
->isRequest());
684 uint64_t overwrite_val
;
686 uint64_t condition_val64
;
687 uint32_t condition_val32
;
689 int offset
= pkt
->getOffset(blkSize
);
690 uint8_t *blk_data
= blk
->data
+ offset
;
692 assert(sizeof(uint64_t) >= pkt
->getSize());
694 overwrite_mem
= true;
695 // keep a copy of our possible write value, and copy what is at the
696 // memory address into the packet
697 pkt
->writeData((uint8_t *)&overwrite_val
);
698 pkt
->setData(blk_data
);
700 if (pkt
->req
->isCondSwap()) {
701 if (pkt
->getSize() == sizeof(uint64_t)) {
702 condition_val64
= pkt
->req
->getExtraData();
703 overwrite_mem
= !std::memcmp(&condition_val64
, blk_data
,
705 } else if (pkt
->getSize() == sizeof(uint32_t)) {
706 condition_val32
= (uint32_t)pkt
->req
->getExtraData();
707 overwrite_mem
= !std::memcmp(&condition_val32
, blk_data
,
710 panic("Invalid size for conditional read/write\n");
714 std::memcpy(blk_data
, &overwrite_val
, pkt
->getSize());
715 blk
->status
|= BlkDirty
;
720 BaseCache::getNextQueueEntry()
722 // Check both MSHR queue and write buffer for potential requests,
723 // note that null does not mean there is no request, it could
724 // simply be that it is not ready
725 MSHR
*miss_mshr
= mshrQueue
.getNext();
726 WriteQueueEntry
*wq_entry
= writeBuffer
.getNext();
728 // If we got a write buffer request ready, first priority is a
729 // full write buffer, otherwise we favour the miss requests
730 if (wq_entry
&& (writeBuffer
.isFull() || !miss_mshr
)) {
731 // need to search MSHR queue for conflicting earlier miss.
732 MSHR
*conflict_mshr
= mshrQueue
.findPending(wq_entry
);
734 if (conflict_mshr
&& conflict_mshr
->order
< wq_entry
->order
) {
735 // Service misses in order until conflict is cleared.
736 return conflict_mshr
;
738 // @todo Note that we ignore the ready time of the conflict here
741 // No conflicts; issue write
743 } else if (miss_mshr
) {
744 // need to check for conflicting earlier writeback
745 WriteQueueEntry
*conflict_mshr
= writeBuffer
.findPending(miss_mshr
);
747 // not sure why we don't check order here... it was in the
748 // original code but commented out.
750 // The only way this happens is if we are
751 // doing a write and we didn't have permissions
752 // then subsequently saw a writeback (owned got evicted)
753 // We need to make sure to perform the writeback first
754 // To preserve the dirty data, then we can issue the write
756 // should we return wq_entry here instead? I.e. do we
757 // have to flush writes in order? I don't think so... not
758 // for Alpha anyway. Maybe for x86?
759 return conflict_mshr
;
761 // @todo Note that we ignore the ready time of the conflict here
764 // No conflicts; issue read
768 // fall through... no pending requests. Try a prefetch.
769 assert(!miss_mshr
&& !wq_entry
);
770 if (prefetcher
&& mshrQueue
.canPrefetch()) {
771 // If we have a miss queue slot, we can try a prefetch
772 PacketPtr pkt
= prefetcher
->getPacket();
774 Addr pf_addr
= pkt
->getBlockAddr(blkSize
);
775 if (!tags
->findBlock(pf_addr
, pkt
->isSecure()) &&
776 !mshrQueue
.findMatch(pf_addr
, pkt
->isSecure()) &&
777 !writeBuffer
.findMatch(pf_addr
, pkt
->isSecure())) {
778 // Update statistic on number of prefetches issued
779 // (hwpf_mshr_misses)
780 assert(pkt
->req
->masterId() < system
->maxMasters());
781 stats
.cmdStats(pkt
).mshr_misses
[pkt
->req
->masterId()]++;
783 // allocate an MSHR and return it, note
784 // that we send the packet straight away, so do not
786 return allocateMissBuffer(pkt
, curTick(), false);
788 // free the request and packet
798 BaseCache::updateCompressionData(CacheBlk
*blk
, const uint64_t* data
,
799 PacketList
&writebacks
)
801 // tempBlock does not exist in the tags, so don't do anything for it.
802 if (blk
== tempBlock
) {
806 // Get superblock of the given block
807 CompressionBlk
* compression_blk
= static_cast<CompressionBlk
*>(blk
);
808 const SuperBlk
* superblock
= static_cast<const SuperBlk
*>(
809 compression_blk
->getSectorBlock());
811 // The compressor is called to compress the updated data, so that its
812 // metadata can be updated.
813 std::size_t compression_size
= 0;
814 Cycles compression_lat
= Cycles(0);
815 Cycles decompression_lat
= Cycles(0);
816 compressor
->compress(data
, compression_lat
, decompression_lat
,
819 // If block's compression factor increased, it may not be co-allocatable
820 // anymore. If so, some blocks might need to be evicted to make room for
823 // Get previous compressed size
824 const std::size_t M5_VAR_USED prev_size
= compression_blk
->getSizeBits();
826 // Check if new data is co-allocatable
827 const bool is_co_allocatable
= superblock
->isCompressed(compression_blk
) &&
828 superblock
->canCoAllocate(compression_size
);
830 // If block was compressed, possibly co-allocated with other blocks, and
831 // cannot be co-allocated anymore, one or more blocks must be evicted to
832 // make room for the expanded block. As of now we decide to evict the co-
833 // allocated blocks to make room for the expansion, but other approaches
834 // that take the replacement data of the superblock into account may
835 // generate better results
836 std::vector
<CacheBlk
*> evict_blks
;
837 const bool was_compressed
= compression_blk
->isCompressed();
838 if (was_compressed
&& !is_co_allocatable
) {
839 // Get all co-allocated blocks
840 for (const auto& sub_blk
: superblock
->blks
) {
841 if (sub_blk
->isValid() && (compression_blk
!= sub_blk
)) {
842 // Check for transient state allocations. If any of the
843 // entries listed for eviction has a transient state, the
845 const Addr repl_addr
= regenerateBlkAddr(sub_blk
);
846 const MSHR
*repl_mshr
=
847 mshrQueue
.findMatch(repl_addr
, sub_blk
->isSecure());
849 DPRINTF(CacheRepl
, "Aborting data expansion of %s due " \
850 "to replacement of block in transient state: %s\n",
851 compression_blk
->print(), sub_blk
->print());
852 // Too hard to replace block with transient state, so it
853 // cannot be evicted. Mark the update as failed and expect
854 // the caller to evict this block. Since this is called
855 // only when writebacks arrive, and packets do not contain
856 // compressed data, there is no need to decompress
857 compression_blk
->setSizeBits(blkSize
* 8);
858 compression_blk
->setDecompressionLatency(Cycles(0));
859 compression_blk
->setUncompressed();
863 evict_blks
.push_back(sub_blk
);
867 // Update the number of data expansions
868 stats
.dataExpansions
++;
870 DPRINTF(CacheComp
, "Data expansion: expanding [%s] from %d to %d bits"
871 "\n", blk
->print(), prev_size
, compression_size
);
874 // We always store compressed blocks when possible
875 if (is_co_allocatable
) {
876 compression_blk
->setCompressed();
878 compression_blk
->setUncompressed();
880 compression_blk
->setSizeBits(compression_size
);
881 compression_blk
->setDecompressionLatency(decompression_lat
);
883 // Evict valid blocks
884 for (const auto& evict_blk
: evict_blks
) {
885 if (evict_blk
->isValid()) {
886 evictBlock(evict_blk
, writebacks
);
894 BaseCache::satisfyRequest(PacketPtr pkt
, CacheBlk
*blk
, bool, bool)
896 assert(pkt
->isRequest());
898 assert(blk
&& blk
->isValid());
899 // Occasionally this is not true... if we are a lower-level cache
900 // satisfying a string of Read and ReadEx requests from
901 // upper-level caches, a Read will mark the block as shared but we
902 // can satisfy a following ReadEx anyway since we can rely on the
903 // Read requester(s) to have buffered the ReadEx snoop and to
904 // invalidate their blocks after receiving them.
905 // assert(!pkt->needsWritable() || blk->isWritable());
906 assert(pkt
->getOffset(blkSize
) + pkt
->getSize() <= blkSize
);
908 // Check RMW operations first since both isRead() and
909 // isWrite() will be true for them
910 if (pkt
->cmd
== MemCmd::SwapReq
) {
911 if (pkt
->isAtomicOp()) {
912 // extract data from cache and save it into the data field in
913 // the packet as a return value from this atomic op
914 int offset
= tags
->extractBlkOffset(pkt
->getAddr());
915 uint8_t *blk_data
= blk
->data
+ offset
;
916 pkt
->setData(blk_data
);
918 // execute AMO operation
919 (*(pkt
->getAtomicOp()))(blk_data
);
921 // set block status to dirty
922 blk
->status
|= BlkDirty
;
924 cmpAndSwap(blk
, pkt
);
926 } else if (pkt
->isWrite()) {
927 // we have the block in a writable state and can go ahead,
928 // note that the line may be also be considered writable in
929 // downstream caches along the path to memory, but always
930 // Exclusive, and never Modified
931 assert(blk
->isWritable());
932 // Write or WriteLine at the first cache with block in writable state
933 if (blk
->checkWrite(pkt
)) {
934 pkt
->writeDataToBlock(blk
->data
, blkSize
);
936 // Always mark the line as dirty (and thus transition to the
937 // Modified state) even if we are a failed StoreCond so we
938 // supply data to any snoops that have appended themselves to
939 // this cache before knowing the store will fail.
940 blk
->status
|= BlkDirty
;
941 DPRINTF(CacheVerbose
, "%s for %s (write)\n", __func__
, pkt
->print());
942 } else if (pkt
->isRead()) {
944 blk
->trackLoadLocked(pkt
);
947 // all read responses have a data payload
948 assert(pkt
->hasRespData());
949 pkt
->setDataFromBlock(blk
->data
, blkSize
);
950 } else if (pkt
->isUpgrade()) {
952 assert(!pkt
->hasSharers());
954 if (blk
->isDirty()) {
955 // we were in the Owned state, and a cache above us that
956 // has the line in Shared state needs to be made aware
957 // that the data it already has is in fact dirty
958 pkt
->setCacheResponding();
959 blk
->status
&= ~BlkDirty
;
961 } else if (pkt
->isClean()) {
962 blk
->status
&= ~BlkDirty
;
964 assert(pkt
->isInvalidate());
965 invalidateBlock(blk
);
966 DPRINTF(CacheVerbose
, "%s for %s (invalidation)\n", __func__
,
971 /////////////////////////////////////////////////////
973 // Access path: requests coming in from the CPU side
975 /////////////////////////////////////////////////////
977 BaseCache::calculateTagOnlyLatency(const uint32_t delay
,
978 const Cycles lookup_lat
) const
980 // A tag-only access has to wait for the packet to arrive in order to
981 // perform the tag lookup.
982 return ticksToCycles(delay
) + lookup_lat
;
986 BaseCache::calculateAccessLatency(const CacheBlk
* blk
, const uint32_t delay
,
987 const Cycles lookup_lat
) const
991 if (blk
!= nullptr) {
992 // As soon as the access arrives, for sequential accesses first access
993 // tags, then the data entry. In the case of parallel accesses the
994 // latency is dictated by the slowest of tag and data latencies.
995 if (sequentialAccess
) {
996 lat
= ticksToCycles(delay
) + lookup_lat
+ dataLatency
;
998 lat
= ticksToCycles(delay
) + std::max(lookup_lat
, dataLatency
);
1001 // Check if the block to be accessed is available. If not, apply the
1002 // access latency on top of when the block is ready to be accessed.
1003 const Tick tick
= curTick() + delay
;
1004 const Tick when_ready
= blk
->getWhenReady();
1005 if (when_ready
> tick
&&
1006 ticksToCycles(when_ready
- tick
) > lat
) {
1007 lat
+= ticksToCycles(when_ready
- tick
);
1010 // In case of a miss, we neglect the data access in a parallel
1011 // configuration (i.e., the data access will be stopped as soon as
1012 // we find out it is a miss), and use the tag-only latency.
1013 lat
= calculateTagOnlyLatency(delay
, lookup_lat
);
1020 BaseCache::access(PacketPtr pkt
, CacheBlk
*&blk
, Cycles
&lat
,
1021 PacketList
&writebacks
)
1024 assert(pkt
->isRequest());
1026 chatty_assert(!(isReadOnly
&& pkt
->isWrite()),
1027 "Should never see a write in a read-only cache %s\n",
1030 // Access block in the tags
1031 Cycles
tag_latency(0);
1032 blk
= tags
->accessBlock(pkt
->getAddr(), pkt
->isSecure(), tag_latency
);
1034 DPRINTF(Cache
, "%s for %s %s\n", __func__
, pkt
->print(),
1035 blk
? "hit " + blk
->print() : "miss");
1037 if (pkt
->req
->isCacheMaintenance()) {
1038 // A cache maintenance operation is always forwarded to the
1039 // memory below even if the block is found in dirty state.
1041 // We defer any changes to the state of the block until we
1042 // create and mark as in service the mshr for the downstream
1045 // Calculate access latency on top of when the packet arrives. This
1046 // takes into account the bus delay.
1047 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1052 if (pkt
->isEviction()) {
1053 // We check for presence of block in above caches before issuing
1054 // Writeback or CleanEvict to write buffer. Therefore the only
1055 // possible cases can be of a CleanEvict packet coming from above
1056 // encountering a Writeback generated in this cache peer cache and
1057 // waiting in the write buffer. Cases of upper level peer caches
1058 // generating CleanEvict and Writeback or simply CleanEvict and
1059 // CleanEvict almost simultaneously will be caught by snoops sent out
1061 WriteQueueEntry
*wb_entry
= writeBuffer
.findMatch(pkt
->getAddr(),
1064 assert(wb_entry
->getNumTargets() == 1);
1065 PacketPtr wbPkt
= wb_entry
->getTarget()->pkt
;
1066 assert(wbPkt
->isWriteback());
1068 if (pkt
->isCleanEviction()) {
1069 // The CleanEvict and WritebackClean snoops into other
1070 // peer caches of the same level while traversing the
1071 // crossbar. If a copy of the block is found, the
1072 // packet is deleted in the crossbar. Hence, none of
1073 // the other upper level caches connected to this
1074 // cache have the block, so we can clear the
1075 // BLOCK_CACHED flag in the Writeback if set and
1076 // discard the CleanEvict by returning true.
1077 wbPkt
->clearBlockCached();
1079 // A clean evict does not need to access the data array
1080 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1084 assert(pkt
->cmd
== MemCmd::WritebackDirty
);
1085 // Dirty writeback from above trumps our clean
1086 // writeback... discard here
1087 // Note: markInService will remove entry from writeback buffer.
1088 markInService(wb_entry
);
1094 // The critical latency part of a write depends only on the tag access
1095 if (pkt
->isWrite()) {
1096 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1099 // Writeback handling is special case. We can write the block into
1100 // the cache without having a writeable copy (or any copy at all).
1101 if (pkt
->isWriteback()) {
1102 assert(blkSize
== pkt
->getSize());
1104 // we could get a clean writeback while we are having
1105 // outstanding accesses to a block, do the simple thing for
1106 // now and drop the clean writeback so that we do not upset
1107 // any ordering/decisions about ownership already taken
1108 if (pkt
->cmd
== MemCmd::WritebackClean
&&
1109 mshrQueue
.findMatch(pkt
->getAddr(), pkt
->isSecure())) {
1110 DPRINTF(Cache
, "Clean writeback %#llx to block with MSHR, "
1111 "dropping\n", pkt
->getAddr());
1113 // A writeback searches for the block, then writes the data.
1114 // As the writeback is being dropped, the data is not touched,
1115 // and we just had to wait for the time to find a match in the
1116 // MSHR. As of now assume a mshr queue search takes as long as
1117 // a tag lookup for simplicity.
1122 // need to do a replacement
1123 blk
= allocateBlock(pkt
, writebacks
);
1125 // no replaceable block available: give up, fwd to next level.
1130 blk
->status
|= BlkReadable
;
1131 } else if (compressor
) {
1132 // This is an overwrite to an existing block, therefore we need
1133 // to check for data expansion (i.e., block was compressed with
1134 // a smaller size, and now it doesn't fit the entry anymore).
1135 // If that is the case we might need to evict blocks.
1136 if (!updateCompressionData(blk
, pkt
->getConstPtr
<uint64_t>(),
1138 invalidateBlock(blk
);
1143 // only mark the block dirty if we got a writeback command,
1144 // and leave it as is for a clean writeback
1145 if (pkt
->cmd
== MemCmd::WritebackDirty
) {
1146 // TODO: the coherent cache can assert(!blk->isDirty());
1147 blk
->status
|= BlkDirty
;
1149 // if the packet does not have sharers, it is passing
1150 // writable, and we got the writeback in Modified or Exclusive
1151 // state, if not we are in the Owned or Shared state
1152 if (!pkt
->hasSharers()) {
1153 blk
->status
|= BlkWritable
;
1155 // nothing else to do; writeback doesn't expect response
1156 assert(!pkt
->needsResponse());
1157 pkt
->writeDataToBlock(blk
->data
, blkSize
);
1158 DPRINTF(Cache
, "%s new state is %s\n", __func__
, blk
->print());
1161 // When the packet metadata arrives, the tag lookup will be done while
1162 // the payload is arriving. Then the block will be ready to access as
1163 // soon as the fill is done
1164 blk
->setWhenReady(clockEdge(fillLatency
) + pkt
->headerDelay
+
1165 std::max(cyclesToTicks(tag_latency
), (uint64_t)pkt
->payloadDelay
));
1168 } else if (pkt
->cmd
== MemCmd::CleanEvict
) {
1169 // A CleanEvict does not need to access the data array
1170 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1173 // Found the block in the tags, need to stop CleanEvict from
1174 // propagating further down the hierarchy. Returning true will
1175 // treat the CleanEvict like a satisfied write request and delete
1179 // We didn't find the block here, propagate the CleanEvict further
1180 // down the memory hierarchy. Returning false will treat the CleanEvict
1181 // like a Writeback which could not find a replaceable block so has to
1182 // go to next level.
1184 } else if (pkt
->cmd
== MemCmd::WriteClean
) {
1185 // WriteClean handling is a special case. We can allocate a
1186 // block directly if it doesn't exist and we can update the
1187 // block immediately. The WriteClean transfers the ownership
1188 // of the block as well.
1189 assert(blkSize
== pkt
->getSize());
1192 if (pkt
->writeThrough()) {
1193 // if this is a write through packet, we don't try to
1194 // allocate if the block is not present
1197 // a writeback that misses needs to allocate a new block
1198 blk
= allocateBlock(pkt
, writebacks
);
1200 // no replaceable block available: give up, fwd to
1206 blk
->status
|= BlkReadable
;
1208 } else if (compressor
) {
1209 // This is an overwrite to an existing block, therefore we need
1210 // to check for data expansion (i.e., block was compressed with
1211 // a smaller size, and now it doesn't fit the entry anymore).
1212 // If that is the case we might need to evict blocks.
1213 if (!updateCompressionData(blk
, pkt
->getConstPtr
<uint64_t>(),
1215 invalidateBlock(blk
);
1220 // at this point either this is a writeback or a write-through
1221 // write clean operation and the block is already in this
1222 // cache, we need to update the data and the block flags
1224 // TODO: the coherent cache can assert(!blk->isDirty());
1225 if (!pkt
->writeThrough()) {
1226 blk
->status
|= BlkDirty
;
1228 // nothing else to do; writeback doesn't expect response
1229 assert(!pkt
->needsResponse());
1230 pkt
->writeDataToBlock(blk
->data
, blkSize
);
1231 DPRINTF(Cache
, "%s new state is %s\n", __func__
, blk
->print());
1235 // When the packet metadata arrives, the tag lookup will be done while
1236 // the payload is arriving. Then the block will be ready to access as
1237 // soon as the fill is done
1238 blk
->setWhenReady(clockEdge(fillLatency
) + pkt
->headerDelay
+
1239 std::max(cyclesToTicks(tag_latency
), (uint64_t)pkt
->payloadDelay
));
1241 // If this a write-through packet it will be sent to cache below
1242 return !pkt
->writeThrough();
1243 } else if (blk
&& (pkt
->needsWritable() ? blk
->isWritable() :
1244 blk
->isReadable())) {
1245 // OK to satisfy access
1248 // Calculate access latency based on the need to access the data array
1249 if (pkt
->isRead()) {
1250 lat
= calculateAccessLatency(blk
, pkt
->headerDelay
, tag_latency
);
1252 // When a block is compressed, it must first be decompressed
1253 // before being read. This adds to the access latency.
1255 lat
+= compressor
->getDecompressionLatency(blk
);
1258 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1261 satisfyRequest(pkt
, blk
);
1262 maintainClusivity(pkt
->fromCache(), blk
);
1267 // Can't satisfy access normally... either no block (blk == nullptr)
1268 // or have block but need writable
1272 lat
= calculateAccessLatency(blk
, pkt
->headerDelay
, tag_latency
);
1274 if (!blk
&& pkt
->isLLSC() && pkt
->isWrite()) {
1275 // complete miss on store conditional... just give up now
1276 pkt
->req
->setExtraData(0);
1284 BaseCache::maintainClusivity(bool from_cache
, CacheBlk
*blk
)
1286 if (from_cache
&& blk
&& blk
->isValid() && !blk
->isDirty() &&
1287 clusivity
== Enums::mostly_excl
) {
1288 // if we have responded to a cache, and our block is still
1289 // valid, but not dirty, and this cache is mostly exclusive
1290 // with respect to the cache above, drop the block
1291 invalidateBlock(blk
);
1296 BaseCache::handleFill(PacketPtr pkt
, CacheBlk
*blk
, PacketList
&writebacks
,
1299 assert(pkt
->isResponse());
1300 Addr addr
= pkt
->getAddr();
1301 bool is_secure
= pkt
->isSecure();
1303 CacheBlk::State old_state
= blk
? blk
->status
: 0;
1306 // When handling a fill, we should have no writes to this line.
1307 assert(addr
== pkt
->getBlockAddr(blkSize
));
1308 assert(!writeBuffer
.findMatch(addr
, is_secure
));
1311 // better have read new data...
1312 assert(pkt
->hasData() || pkt
->cmd
== MemCmd::InvalidateResp
);
1314 // need to do a replacement if allocating, otherwise we stick
1315 // with the temporary storage
1316 blk
= allocate
? allocateBlock(pkt
, writebacks
) : nullptr;
1319 // No replaceable block or a mostly exclusive
1320 // cache... just use temporary storage to complete the
1321 // current request and then get rid of it
1323 tempBlock
->insert(addr
, is_secure
);
1324 DPRINTF(Cache
, "using temp block for %#llx (%s)\n", addr
,
1325 is_secure
? "s" : "ns");
1328 // existing block... probably an upgrade
1329 // don't clear block status... if block is already dirty we
1330 // don't want to lose that
1333 // Block is guaranteed to be valid at this point
1334 assert(blk
->isValid());
1335 assert(blk
->isSecure() == is_secure
);
1336 assert(regenerateBlkAddr(blk
) == addr
);
1338 blk
->status
|= BlkReadable
;
1340 // sanity check for whole-line writes, which should always be
1341 // marked as writable as part of the fill, and then later marked
1342 // dirty as part of satisfyRequest
1343 if (pkt
->cmd
== MemCmd::InvalidateResp
) {
1344 assert(!pkt
->hasSharers());
1347 // here we deal with setting the appropriate state of the line,
1348 // and we start by looking at the hasSharers flag, and ignore the
1349 // cacheResponding flag (normally signalling dirty data) if the
1350 // packet has sharers, thus the line is never allocated as Owned
1351 // (dirty but not writable), and always ends up being either
1352 // Shared, Exclusive or Modified, see Packet::setCacheResponding
1354 if (!pkt
->hasSharers()) {
1355 // we could get a writable line from memory (rather than a
1356 // cache) even in a read-only cache, note that we set this bit
1357 // even for a read-only cache, possibly revisit this decision
1358 blk
->status
|= BlkWritable
;
1360 // check if we got this via cache-to-cache transfer (i.e., from a
1361 // cache that had the block in Modified or Owned state)
1362 if (pkt
->cacheResponding()) {
1363 // we got the block in Modified state, and invalidated the
1365 blk
->status
|= BlkDirty
;
1367 chatty_assert(!isReadOnly
, "Should never see dirty snoop response "
1368 "in read-only cache %s\n", name());
1373 DPRINTF(Cache
, "Block addr %#llx (%s) moving from state %x to %s\n",
1374 addr
, is_secure
? "s" : "ns", old_state
, blk
->print());
1376 // if we got new data, copy it in (checking for a read response
1377 // and a response that has data is the same in the end)
1378 if (pkt
->isRead()) {
1380 assert(pkt
->hasData());
1381 assert(pkt
->getSize() == blkSize
);
1383 pkt
->writeDataToBlock(blk
->data
, blkSize
);
1385 // The block will be ready when the payload arrives and the fill is done
1386 blk
->setWhenReady(clockEdge(fillLatency
) + pkt
->headerDelay
+
1393 BaseCache::allocateBlock(const PacketPtr pkt
, PacketList
&writebacks
)
1396 const Addr addr
= pkt
->getAddr();
1399 const bool is_secure
= pkt
->isSecure();
1401 // Block size and compression related access latency. Only relevant if
1402 // using a compressor, otherwise there is no extra delay, and the block
1404 std::size_t blk_size_bits
= blkSize
*8;
1405 Cycles compression_lat
= Cycles(0);
1406 Cycles decompression_lat
= Cycles(0);
1408 // If a compressor is being used, it is called to compress data before
1409 // insertion. Although in Gem5 the data is stored uncompressed, even if a
1410 // compressor is used, the compression/decompression methods are called to
1411 // calculate the amount of extra cycles needed to read or write compressed
1413 if (compressor
&& pkt
->hasData()) {
1414 compressor
->compress(pkt
->getConstPtr
<uint64_t>(), compression_lat
,
1415 decompression_lat
, blk_size_bits
);
1418 // Find replacement victim
1419 std::vector
<CacheBlk
*> evict_blks
;
1420 CacheBlk
*victim
= tags
->findVictim(addr
, is_secure
, blk_size_bits
,
1423 // It is valid to return nullptr if there is no victim
1427 // Print victim block's information
1428 DPRINTF(CacheRepl
, "Replacement victim: %s\n", victim
->print());
1430 // Check for transient state allocations. If any of the entries listed
1431 // for eviction has a transient state, the allocation fails
1432 bool replacement
= false;
1433 for (const auto& blk
: evict_blks
) {
1434 if (blk
->isValid()) {
1437 Addr repl_addr
= regenerateBlkAddr(blk
);
1438 MSHR
*repl_mshr
= mshrQueue
.findMatch(repl_addr
, blk
->isSecure());
1440 // must be an outstanding upgrade or clean request
1441 // on a block we're about to replace...
1442 assert((!blk
->isWritable() && repl_mshr
->needsWritable()) ||
1443 repl_mshr
->isCleaning());
1445 // too hard to replace block with transient state
1446 // allocation failed, block not inserted
1452 // The victim will be replaced by a new entry, so increase the replacement
1453 // counter if a valid block is being replaced
1455 // Evict valid blocks associated to this victim block
1456 for (const auto& blk
: evict_blks
) {
1457 if (blk
->isValid()) {
1458 DPRINTF(CacheRepl
, "Evicting %s (%#llx) to make room for " \
1459 "%#llx (%s)\n", blk
->print(), regenerateBlkAddr(blk
),
1461 evictBlock(blk
, writebacks
);
1465 stats
.replacements
++;
1468 // If using a compressor, set compression data. This must be done before
1469 // block insertion, as compressed tags use this information.
1471 compressor
->setSizeBits(victim
, blk_size_bits
);
1472 compressor
->setDecompressionLatency(victim
, decompression_lat
);
1475 // Insert new block at victimized entry
1476 tags
->insertBlock(pkt
, victim
);
1482 BaseCache::invalidateBlock(CacheBlk
*blk
)
1484 // If block is still marked as prefetched, then it hasn't been used
1485 if (blk
->wasPrefetched()) {
1486 stats
.unusedPrefetches
++;
1489 // If handling a block present in the Tags, let it do its invalidation
1490 // process, which will update stats and invalidate the block itself
1491 if (blk
!= tempBlock
) {
1492 tags
->invalidate(blk
);
1494 tempBlock
->invalidate();
1499 BaseCache::evictBlock(CacheBlk
*blk
, PacketList
&writebacks
)
1501 PacketPtr pkt
= evictBlock(blk
);
1503 writebacks
.push_back(pkt
);
1508 BaseCache::writebackBlk(CacheBlk
*blk
)
1510 chatty_assert(!isReadOnly
|| writebackClean
,
1511 "Writeback from read-only cache");
1512 assert(blk
&& blk
->isValid() && (blk
->isDirty() || writebackClean
));
1514 stats
.writebacks
[Request::wbMasterId
]++;
1516 RequestPtr req
= std::make_shared
<Request
>(
1517 regenerateBlkAddr(blk
), blkSize
, 0, Request::wbMasterId
);
1519 if (blk
->isSecure())
1520 req
->setFlags(Request::SECURE
);
1522 req
->taskId(blk
->task_id
);
1525 new Packet(req
, blk
->isDirty() ?
1526 MemCmd::WritebackDirty
: MemCmd::WritebackClean
);
1528 DPRINTF(Cache
, "Create Writeback %s writable: %d, dirty: %d\n",
1529 pkt
->print(), blk
->isWritable(), blk
->isDirty());
1531 if (blk
->isWritable()) {
1532 // not asserting shared means we pass the block in modified
1533 // state, mark our own block non-writeable
1534 blk
->status
&= ~BlkWritable
;
1536 // we are in the Owned state, tell the receiver
1537 pkt
->setHasSharers();
1540 // make sure the block is not marked dirty
1541 blk
->status
&= ~BlkDirty
;
1544 pkt
->setDataFromBlock(blk
->data
, blkSize
);
1546 // When a block is compressed, it must first be decompressed before being
1547 // sent for writeback.
1549 pkt
->payloadDelay
= compressor
->getDecompressionLatency(blk
);
1556 BaseCache::writecleanBlk(CacheBlk
*blk
, Request::Flags dest
, PacketId id
)
1558 RequestPtr req
= std::make_shared
<Request
>(
1559 regenerateBlkAddr(blk
), blkSize
, 0, Request::wbMasterId
);
1561 if (blk
->isSecure()) {
1562 req
->setFlags(Request::SECURE
);
1564 req
->taskId(blk
->task_id
);
1566 PacketPtr pkt
= new Packet(req
, MemCmd::WriteClean
, blkSize
, id
);
1569 req
->setFlags(dest
);
1570 pkt
->setWriteThrough();
1573 DPRINTF(Cache
, "Create %s writable: %d, dirty: %d\n", pkt
->print(),
1574 blk
->isWritable(), blk
->isDirty());
1576 if (blk
->isWritable()) {
1577 // not asserting shared means we pass the block in modified
1578 // state, mark our own block non-writeable
1579 blk
->status
&= ~BlkWritable
;
1581 // we are in the Owned state, tell the receiver
1582 pkt
->setHasSharers();
1585 // make sure the block is not marked dirty
1586 blk
->status
&= ~BlkDirty
;
1589 pkt
->setDataFromBlock(blk
->data
, blkSize
);
1591 // When a block is compressed, it must first be decompressed before being
1592 // sent for writeback.
1594 pkt
->payloadDelay
= compressor
->getDecompressionLatency(blk
);
1602 BaseCache::memWriteback()
1604 tags
->forEachBlk([this](CacheBlk
&blk
) { writebackVisitor(blk
); });
1608 BaseCache::memInvalidate()
1610 tags
->forEachBlk([this](CacheBlk
&blk
) { invalidateVisitor(blk
); });
1614 BaseCache::isDirty() const
1616 return tags
->anyBlk([](CacheBlk
&blk
) { return blk
.isDirty(); });
1620 BaseCache::coalesce() const
1622 return writeAllocator
&& writeAllocator
->coalesce();
1626 BaseCache::writebackVisitor(CacheBlk
&blk
)
1628 if (blk
.isDirty()) {
1629 assert(blk
.isValid());
1631 RequestPtr request
= std::make_shared
<Request
>(
1632 regenerateBlkAddr(&blk
), blkSize
, 0, Request::funcMasterId
);
1634 request
->taskId(blk
.task_id
);
1635 if (blk
.isSecure()) {
1636 request
->setFlags(Request::SECURE
);
1639 Packet
packet(request
, MemCmd::WriteReq
);
1640 packet
.dataStatic(blk
.data
);
1642 memSidePort
.sendFunctional(&packet
);
1644 blk
.status
&= ~BlkDirty
;
1649 BaseCache::invalidateVisitor(CacheBlk
&blk
)
1652 warn_once("Invalidating dirty cache lines. " \
1653 "Expect things to break.\n");
1655 if (blk
.isValid()) {
1656 assert(!blk
.isDirty());
1657 invalidateBlock(&blk
);
1662 BaseCache::nextQueueReadyTime() const
1664 Tick nextReady
= std::min(mshrQueue
.nextReadyTime(),
1665 writeBuffer
.nextReadyTime());
1667 // Don't signal prefetch ready time if no MSHRs available
1668 // Will signal once enoguh MSHRs are deallocated
1669 if (prefetcher
&& mshrQueue
.canPrefetch()) {
1670 nextReady
= std::min(nextReady
,
1671 prefetcher
->nextPrefetchReadyTime());
1679 BaseCache::sendMSHRQueuePacket(MSHR
* mshr
)
1683 // use request from 1st target
1684 PacketPtr tgt_pkt
= mshr
->getTarget()->pkt
;
1686 DPRINTF(Cache
, "%s: MSHR %s\n", __func__
, tgt_pkt
->print());
1688 // if the cache is in write coalescing mode or (additionally) in
1689 // no allocation mode, and we have a write packet with an MSHR
1690 // that is not a whole-line write (due to incompatible flags etc),
1691 // then reset the write mode
1692 if (writeAllocator
&& writeAllocator
->coalesce() && tgt_pkt
->isWrite()) {
1693 if (!mshr
->isWholeLineWrite()) {
1694 // if we are currently write coalescing, hold on the
1695 // MSHR as many cycles extra as we need to completely
1696 // write a cache line
1697 if (writeAllocator
->delay(mshr
->blkAddr
)) {
1698 Tick delay
= blkSize
/ tgt_pkt
->getSize() * clockPeriod();
1699 DPRINTF(CacheVerbose
, "Delaying pkt %s %llu ticks to allow "
1700 "for write coalescing\n", tgt_pkt
->print(), delay
);
1701 mshrQueue
.delay(mshr
, delay
);
1704 writeAllocator
->reset();
1707 writeAllocator
->resetDelay(mshr
->blkAddr
);
1711 CacheBlk
*blk
= tags
->findBlock(mshr
->blkAddr
, mshr
->isSecure
);
1713 // either a prefetch that is not present upstream, or a normal
1714 // MSHR request, proceed to get the packet to send downstream
1715 PacketPtr pkt
= createMissPacket(tgt_pkt
, blk
, mshr
->needsWritable(),
1716 mshr
->isWholeLineWrite());
1718 mshr
->isForward
= (pkt
== nullptr);
1720 if (mshr
->isForward
) {
1721 // not a cache block request, but a response is expected
1722 // make copy of current packet to forward, keep current
1723 // copy for response handling
1724 pkt
= new Packet(tgt_pkt
, false, true);
1725 assert(!pkt
->isWrite());
1728 // play it safe and append (rather than set) the sender state,
1729 // as forwarded packets may already have existing state
1730 pkt
->pushSenderState(mshr
);
1732 if (pkt
->isClean() && blk
&& blk
->isDirty()) {
1733 // A cache clean opearation is looking for a dirty block. Mark
1734 // the packet so that the destination xbar can determine that
1735 // there will be a follow-up write packet as well.
1736 pkt
->setSatisfied();
1739 if (!memSidePort
.sendTimingReq(pkt
)) {
1740 // we are awaiting a retry, but we
1741 // delete the packet and will be creating a new packet
1742 // when we get the opportunity
1745 // note that we have now masked any requestBus and
1746 // schedSendEvent (we will wait for a retry before
1747 // doing anything), and this is so even if we do not
1748 // care about this packet and might override it before
1752 // As part of the call to sendTimingReq the packet is
1753 // forwarded to all neighbouring caches (and any caches
1754 // above them) as a snoop. Thus at this point we know if
1755 // any of the neighbouring caches are responding, and if
1756 // so, we know it is dirty, and we can determine if it is
1757 // being passed as Modified, making our MSHR the ordering
1759 bool pending_modified_resp
= !pkt
->hasSharers() &&
1760 pkt
->cacheResponding();
1761 markInService(mshr
, pending_modified_resp
);
1763 if (pkt
->isClean() && blk
&& blk
->isDirty()) {
1764 // A cache clean opearation is looking for a dirty
1765 // block. If a dirty block is encountered a WriteClean
1766 // will update any copies to the path to the memory
1767 // until the point of reference.
1768 DPRINTF(CacheVerbose
, "%s: packet %s found block: %s\n",
1769 __func__
, pkt
->print(), blk
->print());
1770 PacketPtr wb_pkt
= writecleanBlk(blk
, pkt
->req
->getDest(),
1772 PacketList writebacks
;
1773 writebacks
.push_back(wb_pkt
);
1774 doWritebacks(writebacks
, 0);
1782 BaseCache::sendWriteQueuePacket(WriteQueueEntry
* wq_entry
)
1786 // always a single target for write queue entries
1787 PacketPtr tgt_pkt
= wq_entry
->getTarget()->pkt
;
1789 DPRINTF(Cache
, "%s: write %s\n", __func__
, tgt_pkt
->print());
1791 // forward as is, both for evictions and uncacheable writes
1792 if (!memSidePort
.sendTimingReq(tgt_pkt
)) {
1793 // note that we have now masked any requestBus and
1794 // schedSendEvent (we will wait for a retry before
1795 // doing anything), and this is so even if we do not
1796 // care about this packet and might override it before
1800 markInService(wq_entry
);
1806 BaseCache::serialize(CheckpointOut
&cp
) const
1808 bool dirty(isDirty());
1811 warn("*** The cache still contains dirty data. ***\n");
1812 warn(" Make sure to drain the system using the correct flags.\n");
1813 warn(" This checkpoint will not restore correctly " \
1814 "and dirty data in the cache will be lost!\n");
1817 // Since we don't checkpoint the data in the cache, any dirty data
1818 // will be lost when restoring from a checkpoint of a system that
1819 // wasn't drained properly. Flag the checkpoint as invalid if the
1820 // cache contains dirty data.
1821 bool bad_checkpoint(dirty
);
1822 SERIALIZE_SCALAR(bad_checkpoint
);
1826 BaseCache::unserialize(CheckpointIn
&cp
)
1828 bool bad_checkpoint
;
1829 UNSERIALIZE_SCALAR(bad_checkpoint
);
1830 if (bad_checkpoint
) {
1831 fatal("Restoring from checkpoints with dirty caches is not "
1832 "supported in the classic memory system. Please remove any "
1833 "caches or drain them properly before taking checkpoints.\n");
1838 BaseCache::CacheCmdStats::CacheCmdStats(BaseCache
&c
,
1839 const std::string
&name
)
1840 : Stats::Group(&c
), cache(c
),
1843 this, (name
+ "_hits").c_str(),
1844 ("number of " + name
+ " hits").c_str()),
1846 this, (name
+ "_misses").c_str(),
1847 ("number of " + name
+ " misses").c_str()),
1849 this, (name
+ "_miss_latency").c_str(),
1850 ("number of " + name
+ " miss cycles").c_str()),
1852 this, (name
+ "_accesses").c_str(),
1853 ("number of " + name
+ " accesses(hits+misses)").c_str()),
1855 this, (name
+ "_miss_rate").c_str(),
1856 ("miss rate for " + name
+ " accesses").c_str()),
1858 this, (name
+ "_avg_miss_latency").c_str(),
1859 ("average " + name
+ " miss latency").c_str()),
1861 this, (name
+ "_mshr_hits").c_str(),
1862 ("number of " + name
+ " MSHR hits").c_str()),
1864 this, (name
+ "_mshr_misses").c_str(),
1865 ("number of " + name
+ " MSHR misses").c_str()),
1867 this, (name
+ "_mshr_uncacheable").c_str(),
1868 ("number of " + name
+ " MSHR uncacheable").c_str()),
1870 this, (name
+ "_mshr_miss_latency").c_str(),
1871 ("number of " + name
+ " MSHR miss cycles").c_str()),
1872 mshr_uncacheable_lat(
1873 this, (name
+ "_mshr_uncacheable_latency").c_str(),
1874 ("number of " + name
+ " MSHR uncacheable cycles").c_str()),
1876 this, (name
+ "_mshr_miss_rate").c_str(),
1877 ("mshr miss rate for " + name
+ " accesses").c_str()),
1879 this, (name
+ "_avg_mshr_miss_latency").c_str(),
1880 ("average " + name
+ " mshr miss latency").c_str()),
1881 avgMshrUncacheableLatency(
1882 this, (name
+ "_avg_mshr_uncacheable_latency").c_str(),
1883 ("average " + name
+ " mshr uncacheable latency").c_str())
1888 BaseCache::CacheCmdStats::regStatsFromParent()
1890 using namespace Stats
;
1892 Stats::Group::regStats();
1893 System
*system
= cache
.system
;
1894 const auto max_masters
= system
->maxMasters();
1898 .flags(total
| nozero
| nonan
)
1900 for (int i
= 0; i
< max_masters
; i
++) {
1901 hits
.subname(i
, system
->getMasterName(i
));
1907 .flags(total
| nozero
| nonan
)
1909 for (int i
= 0; i
< max_masters
; i
++) {
1910 misses
.subname(i
, system
->getMasterName(i
));
1913 // Miss latency statistics
1916 .flags(total
| nozero
| nonan
)
1918 for (int i
= 0; i
< max_masters
; i
++) {
1919 missLatency
.subname(i
, system
->getMasterName(i
));
1923 accesses
.flags(total
| nozero
| nonan
);
1924 accesses
= hits
+ misses
;
1925 for (int i
= 0; i
< max_masters
; i
++) {
1926 accesses
.subname(i
, system
->getMasterName(i
));
1929 // miss rate formulas
1930 missRate
.flags(total
| nozero
| nonan
);
1931 missRate
= misses
/ accesses
;
1932 for (int i
= 0; i
< max_masters
; i
++) {
1933 missRate
.subname(i
, system
->getMasterName(i
));
1936 // miss latency formulas
1937 avgMissLatency
.flags(total
| nozero
| nonan
);
1938 avgMissLatency
= missLatency
/ misses
;
1939 for (int i
= 0; i
< max_masters
; i
++) {
1940 avgMissLatency
.subname(i
, system
->getMasterName(i
));
1944 // MSHR hit statistics
1947 .flags(total
| nozero
| nonan
)
1949 for (int i
= 0; i
< max_masters
; i
++) {
1950 mshr_hits
.subname(i
, system
->getMasterName(i
));
1953 // MSHR miss statistics
1956 .flags(total
| nozero
| nonan
)
1958 for (int i
= 0; i
< max_masters
; i
++) {
1959 mshr_misses
.subname(i
, system
->getMasterName(i
));
1962 // MSHR miss latency statistics
1965 .flags(total
| nozero
| nonan
)
1967 for (int i
= 0; i
< max_masters
; i
++) {
1968 mshr_miss_latency
.subname(i
, system
->getMasterName(i
));
1971 // MSHR uncacheable statistics
1974 .flags(total
| nozero
| nonan
)
1976 for (int i
= 0; i
< max_masters
; i
++) {
1977 mshr_uncacheable
.subname(i
, system
->getMasterName(i
));
1980 // MSHR miss latency statistics
1981 mshr_uncacheable_lat
1983 .flags(total
| nozero
| nonan
)
1985 for (int i
= 0; i
< max_masters
; i
++) {
1986 mshr_uncacheable_lat
.subname(i
, system
->getMasterName(i
));
1989 // MSHR miss rate formulas
1990 mshrMissRate
.flags(total
| nozero
| nonan
);
1991 mshrMissRate
= mshr_misses
/ accesses
;
1993 for (int i
= 0; i
< max_masters
; i
++) {
1994 mshrMissRate
.subname(i
, system
->getMasterName(i
));
1997 // mshrMiss latency formulas
1998 avgMshrMissLatency
.flags(total
| nozero
| nonan
);
1999 avgMshrMissLatency
= mshr_miss_latency
/ mshr_misses
;
2000 for (int i
= 0; i
< max_masters
; i
++) {
2001 avgMshrMissLatency
.subname(i
, system
->getMasterName(i
));
2004 // mshrUncacheable latency formulas
2005 avgMshrUncacheableLatency
.flags(total
| nozero
| nonan
);
2006 avgMshrUncacheableLatency
= mshr_uncacheable_lat
/ mshr_uncacheable
;
2007 for (int i
= 0; i
< max_masters
; i
++) {
2008 avgMshrUncacheableLatency
.subname(i
, system
->getMasterName(i
));
2012 BaseCache::CacheStats::CacheStats(BaseCache
&c
)
2013 : Stats::Group(&c
), cache(c
),
2015 demandHits(this, "demand_hits", "number of demand (read+write) hits"),
2017 overallHits(this, "overall_hits", "number of overall hits"),
2018 demandMisses(this, "demand_misses",
2019 "number of demand (read+write) misses"),
2020 overallMisses(this, "overall_misses", "number of overall misses"),
2021 demandMissLatency(this, "demand_miss_latency",
2022 "number of demand (read+write) miss cycles"),
2023 overallMissLatency(this, "overall_miss_latency",
2024 "number of overall miss cycles"),
2025 demandAccesses(this, "demand_accesses",
2026 "number of demand (read+write) accesses"),
2027 overallAccesses(this, "overall_accesses",
2028 "number of overall (read+write) accesses"),
2029 demandMissRate(this, "demand_miss_rate",
2030 "miss rate for demand accesses"),
2031 overallMissRate(this, "overall_miss_rate",
2032 "miss rate for overall accesses"),
2033 demandAvgMissLatency(this, "demand_avg_miss_latency",
2034 "average overall miss latency"),
2035 overallAvgMissLatency(this, "overall_avg_miss_latency",
2036 "average overall miss latency"),
2037 blocked_cycles(this, "blocked_cycles",
2038 "number of cycles access was blocked"),
2039 blocked_causes(this, "blocked", "number of cycles access was blocked"),
2040 avg_blocked(this, "avg_blocked_cycles",
2041 "average number of cycles each access was blocked"),
2042 unusedPrefetches(this, "unused_prefetches",
2043 "number of HardPF blocks evicted w/o reference"),
2044 writebacks(this, "writebacks", "number of writebacks"),
2045 demandMshrHits(this, "demand_mshr_hits",
2046 "number of demand (read+write) MSHR hits"),
2047 overallMshrHits(this, "overall_mshr_hits",
2048 "number of overall MSHR hits"),
2049 demandMshrMisses(this, "demand_mshr_misses",
2050 "number of demand (read+write) MSHR misses"),
2051 overallMshrMisses(this, "overall_mshr_misses",
2052 "number of overall MSHR misses"),
2053 overallMshrUncacheable(this, "overall_mshr_uncacheable_misses",
2054 "number of overall MSHR uncacheable misses"),
2055 demandMshrMissLatency(this, "demand_mshr_miss_latency",
2056 "number of demand (read+write) MSHR miss cycles"),
2057 overallMshrMissLatency(this, "overall_mshr_miss_latency",
2058 "number of overall MSHR miss cycles"),
2059 overallMshrUncacheableLatency(this, "overall_mshr_uncacheable_latency",
2060 "number of overall MSHR uncacheable cycles"),
2061 demandMshrMissRate(this, "demand_mshr_miss_rate",
2062 "mshr miss rate for demand accesses"),
2063 overallMshrMissRate(this, "overall_mshr_miss_rate",
2064 "mshr miss rate for overall accesses"),
2065 demandAvgMshrMissLatency(this, "demand_avg_mshr_miss_latency",
2066 "average overall mshr miss latency"),
2067 overallAvgMshrMissLatency(this, "overall_avg_mshr_miss_latency",
2068 "average overall mshr miss latency"),
2069 overallAvgMshrUncacheableLatency(
2070 this, "overall_avg_mshr_uncacheable_latency",
2071 "average overall mshr uncacheable latency"),
2072 replacements(this, "replacements", "number of replacements"),
2074 dataExpansions(this, "data_expansions", "number of data expansions"),
2075 cmd(MemCmd::NUM_MEM_CMDS
)
2077 for (int idx
= 0; idx
< MemCmd::NUM_MEM_CMDS
; ++idx
)
2078 cmd
[idx
].reset(new CacheCmdStats(c
, MemCmd(idx
).toString()));
2082 BaseCache::CacheStats::regStats()
2084 using namespace Stats
;
2086 Stats::Group::regStats();
2088 System
*system
= cache
.system
;
2089 const auto max_masters
= system
->maxMasters();
2091 for (auto &cs
: cmd
)
2092 cs
->regStatsFromParent();
2094 // These macros make it easier to sum the right subset of commands and
2095 // to change the subset of commands that are considered "demand" vs
2097 #define SUM_DEMAND(s) \
2098 (cmd[MemCmd::ReadReq]->s + cmd[MemCmd::WriteReq]->s + \
2099 cmd[MemCmd::WriteLineReq]->s + cmd[MemCmd::ReadExReq]->s + \
2100 cmd[MemCmd::ReadCleanReq]->s + cmd[MemCmd::ReadSharedReq]->s)
2102 // should writebacks be included here? prior code was inconsistent...
2103 #define SUM_NON_DEMAND(s) \
2104 (cmd[MemCmd::SoftPFReq]->s + cmd[MemCmd::HardPFReq]->s + \
2105 cmd[MemCmd::SoftPFExReq]->s)
2107 demandHits
.flags(total
| nozero
| nonan
);
2108 demandHits
= SUM_DEMAND(hits
);
2109 for (int i
= 0; i
< max_masters
; i
++) {
2110 demandHits
.subname(i
, system
->getMasterName(i
));
2113 overallHits
.flags(total
| nozero
| nonan
);
2114 overallHits
= demandHits
+ SUM_NON_DEMAND(hits
);
2115 for (int i
= 0; i
< max_masters
; i
++) {
2116 overallHits
.subname(i
, system
->getMasterName(i
));
2119 demandMisses
.flags(total
| nozero
| nonan
);
2120 demandMisses
= SUM_DEMAND(misses
);
2121 for (int i
= 0; i
< max_masters
; i
++) {
2122 demandMisses
.subname(i
, system
->getMasterName(i
));
2125 overallMisses
.flags(total
| nozero
| nonan
);
2126 overallMisses
= demandMisses
+ SUM_NON_DEMAND(misses
);
2127 for (int i
= 0; i
< max_masters
; i
++) {
2128 overallMisses
.subname(i
, system
->getMasterName(i
));
2131 demandMissLatency
.flags(total
| nozero
| nonan
);
2132 demandMissLatency
= SUM_DEMAND(missLatency
);
2133 for (int i
= 0; i
< max_masters
; i
++) {
2134 demandMissLatency
.subname(i
, system
->getMasterName(i
));
2137 overallMissLatency
.flags(total
| nozero
| nonan
);
2138 overallMissLatency
= demandMissLatency
+ SUM_NON_DEMAND(missLatency
);
2139 for (int i
= 0; i
< max_masters
; i
++) {
2140 overallMissLatency
.subname(i
, system
->getMasterName(i
));
2143 demandAccesses
.flags(total
| nozero
| nonan
);
2144 demandAccesses
= demandHits
+ demandMisses
;
2145 for (int i
= 0; i
< max_masters
; i
++) {
2146 demandAccesses
.subname(i
, system
->getMasterName(i
));
2149 overallAccesses
.flags(total
| nozero
| nonan
);
2150 overallAccesses
= overallHits
+ overallMisses
;
2151 for (int i
= 0; i
< max_masters
; i
++) {
2152 overallAccesses
.subname(i
, system
->getMasterName(i
));
2155 demandMissRate
.flags(total
| nozero
| nonan
);
2156 demandMissRate
= demandMisses
/ demandAccesses
;
2157 for (int i
= 0; i
< max_masters
; i
++) {
2158 demandMissRate
.subname(i
, system
->getMasterName(i
));
2161 overallMissRate
.flags(total
| nozero
| nonan
);
2162 overallMissRate
= overallMisses
/ overallAccesses
;
2163 for (int i
= 0; i
< max_masters
; i
++) {
2164 overallMissRate
.subname(i
, system
->getMasterName(i
));
2167 demandAvgMissLatency
.flags(total
| nozero
| nonan
);
2168 demandAvgMissLatency
= demandMissLatency
/ demandMisses
;
2169 for (int i
= 0; i
< max_masters
; i
++) {
2170 demandAvgMissLatency
.subname(i
, system
->getMasterName(i
));
2173 overallAvgMissLatency
.flags(total
| nozero
| nonan
);
2174 overallAvgMissLatency
= overallMissLatency
/ overallMisses
;
2175 for (int i
= 0; i
< max_masters
; i
++) {
2176 overallAvgMissLatency
.subname(i
, system
->getMasterName(i
));
2179 blocked_cycles
.init(NUM_BLOCKED_CAUSES
);
2181 .subname(Blocked_NoMSHRs
, "no_mshrs")
2182 .subname(Blocked_NoTargets
, "no_targets")
2186 blocked_causes
.init(NUM_BLOCKED_CAUSES
);
2188 .subname(Blocked_NoMSHRs
, "no_mshrs")
2189 .subname(Blocked_NoTargets
, "no_targets")
2193 .subname(Blocked_NoMSHRs
, "no_mshrs")
2194 .subname(Blocked_NoTargets
, "no_targets")
2196 avg_blocked
= blocked_cycles
/ blocked_causes
;
2198 unusedPrefetches
.flags(nozero
);
2202 .flags(total
| nozero
| nonan
)
2204 for (int i
= 0; i
< max_masters
; i
++) {
2205 writebacks
.subname(i
, system
->getMasterName(i
));
2208 demandMshrHits
.flags(total
| nozero
| nonan
);
2209 demandMshrHits
= SUM_DEMAND(mshr_hits
);
2210 for (int i
= 0; i
< max_masters
; i
++) {
2211 demandMshrHits
.subname(i
, system
->getMasterName(i
));
2214 overallMshrHits
.flags(total
| nozero
| nonan
);
2215 overallMshrHits
= demandMshrHits
+ SUM_NON_DEMAND(mshr_hits
);
2216 for (int i
= 0; i
< max_masters
; i
++) {
2217 overallMshrHits
.subname(i
, system
->getMasterName(i
));
2220 demandMshrMisses
.flags(total
| nozero
| nonan
);
2221 demandMshrMisses
= SUM_DEMAND(mshr_misses
);
2222 for (int i
= 0; i
< max_masters
; i
++) {
2223 demandMshrMisses
.subname(i
, system
->getMasterName(i
));
2226 overallMshrMisses
.flags(total
| nozero
| nonan
);
2227 overallMshrMisses
= demandMshrMisses
+ SUM_NON_DEMAND(mshr_misses
);
2228 for (int i
= 0; i
< max_masters
; i
++) {
2229 overallMshrMisses
.subname(i
, system
->getMasterName(i
));
2232 demandMshrMissLatency
.flags(total
| nozero
| nonan
);
2233 demandMshrMissLatency
= SUM_DEMAND(mshr_miss_latency
);
2234 for (int i
= 0; i
< max_masters
; i
++) {
2235 demandMshrMissLatency
.subname(i
, system
->getMasterName(i
));
2238 overallMshrMissLatency
.flags(total
| nozero
| nonan
);
2239 overallMshrMissLatency
=
2240 demandMshrMissLatency
+ SUM_NON_DEMAND(mshr_miss_latency
);
2241 for (int i
= 0; i
< max_masters
; i
++) {
2242 overallMshrMissLatency
.subname(i
, system
->getMasterName(i
));
2245 overallMshrUncacheable
.flags(total
| nozero
| nonan
);
2246 overallMshrUncacheable
=
2247 SUM_DEMAND(mshr_uncacheable
) + SUM_NON_DEMAND(mshr_uncacheable
);
2248 for (int i
= 0; i
< max_masters
; i
++) {
2249 overallMshrUncacheable
.subname(i
, system
->getMasterName(i
));
2253 overallMshrUncacheableLatency
.flags(total
| nozero
| nonan
);
2254 overallMshrUncacheableLatency
=
2255 SUM_DEMAND(mshr_uncacheable_lat
) +
2256 SUM_NON_DEMAND(mshr_uncacheable_lat
);
2257 for (int i
= 0; i
< max_masters
; i
++) {
2258 overallMshrUncacheableLatency
.subname(i
, system
->getMasterName(i
));
2261 demandMshrMissRate
.flags(total
| nozero
| nonan
);
2262 demandMshrMissRate
= demandMshrMisses
/ demandAccesses
;
2263 for (int i
= 0; i
< max_masters
; i
++) {
2264 demandMshrMissRate
.subname(i
, system
->getMasterName(i
));
2267 overallMshrMissRate
.flags(total
| nozero
| nonan
);
2268 overallMshrMissRate
= overallMshrMisses
/ overallAccesses
;
2269 for (int i
= 0; i
< max_masters
; i
++) {
2270 overallMshrMissRate
.subname(i
, system
->getMasterName(i
));
2273 demandAvgMshrMissLatency
.flags(total
| nozero
| nonan
);
2274 demandAvgMshrMissLatency
= demandMshrMissLatency
/ demandMshrMisses
;
2275 for (int i
= 0; i
< max_masters
; i
++) {
2276 demandAvgMshrMissLatency
.subname(i
, system
->getMasterName(i
));
2279 overallAvgMshrMissLatency
.flags(total
| nozero
| nonan
);
2280 overallAvgMshrMissLatency
= overallMshrMissLatency
/ overallMshrMisses
;
2281 for (int i
= 0; i
< max_masters
; i
++) {
2282 overallAvgMshrMissLatency
.subname(i
, system
->getMasterName(i
));
2285 overallAvgMshrUncacheableLatency
.flags(total
| nozero
| nonan
);
2286 overallAvgMshrUncacheableLatency
=
2287 overallMshrUncacheableLatency
/ overallMshrUncacheable
;
2288 for (int i
= 0; i
< max_masters
; i
++) {
2289 overallAvgMshrUncacheableLatency
.subname(i
, system
->getMasterName(i
));
2292 dataExpansions
.flags(nozero
| nonan
);
2296 BaseCache::regProbePoints()
2298 ppHit
= new ProbePointArg
<PacketPtr
>(this->getProbeManager(), "Hit");
2299 ppMiss
= new ProbePointArg
<PacketPtr
>(this->getProbeManager(), "Miss");
2300 ppFill
= new ProbePointArg
<PacketPtr
>(this->getProbeManager(), "Fill");
2309 BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt
)
2311 // Snoops shouldn't happen when bypassing caches
2312 assert(!cache
->system
->bypassCaches());
2314 assert(pkt
->isResponse());
2316 // Express snoop responses from master to slave, e.g., from L1 to L2
2317 cache
->recvTimingSnoopResp(pkt
);
2323 BaseCache::CpuSidePort::tryTiming(PacketPtr pkt
)
2325 if (cache
->system
->bypassCaches() || pkt
->isExpressSnoop()) {
2326 // always let express snoop packets through even if blocked
2328 } else if (blocked
|| mustSendRetry
) {
2329 // either already committed to send a retry, or blocked
2330 mustSendRetry
= true;
2333 mustSendRetry
= false;
2338 BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt
)
2340 assert(pkt
->isRequest());
2342 if (cache
->system
->bypassCaches()) {
2343 // Just forward the packet if caches are disabled.
2344 // @todo This should really enqueue the packet rather
2345 bool M5_VAR_USED success
= cache
->memSidePort
.sendTimingReq(pkt
);
2348 } else if (tryTiming(pkt
)) {
2349 cache
->recvTimingReq(pkt
);
2356 BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt
)
2358 if (cache
->system
->bypassCaches()) {
2359 // Forward the request if the system is in cache bypass mode.
2360 return cache
->memSidePort
.sendAtomic(pkt
);
2362 return cache
->recvAtomic(pkt
);
2367 BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt
)
2369 if (cache
->system
->bypassCaches()) {
2370 // The cache should be flushed if we are in cache bypass mode,
2371 // so we don't need to check if we need to update anything.
2372 cache
->memSidePort
.sendFunctional(pkt
);
2376 // functional request
2377 cache
->functionalAccess(pkt
, true);
2381 BaseCache::CpuSidePort::getAddrRanges() const
2383 return cache
->getAddrRanges();
2388 CpuSidePort::CpuSidePort(const std::string
&_name
, BaseCache
*_cache
,
2389 const std::string
&_label
)
2390 : CacheSlavePort(_name
, _cache
, _label
), cache(_cache
)
2400 BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt
)
2402 cache
->recvTimingResp(pkt
);
2406 // Express snooping requests to memside port
2408 BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt
)
2410 // Snoops shouldn't happen when bypassing caches
2411 assert(!cache
->system
->bypassCaches());
2413 // handle snooping requests
2414 cache
->recvTimingSnoopReq(pkt
);
2418 BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt
)
2420 // Snoops shouldn't happen when bypassing caches
2421 assert(!cache
->system
->bypassCaches());
2423 return cache
->recvAtomicSnoop(pkt
);
2427 BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt
)
2429 // Snoops shouldn't happen when bypassing caches
2430 assert(!cache
->system
->bypassCaches());
2432 // functional snoop (note that in contrast to atomic we don't have
2433 // a specific functionalSnoop method, as they have the same
2434 // behaviour regardless)
2435 cache
->functionalAccess(pkt
, false);
2439 BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2442 assert(!waitingOnRetry
);
2444 // there should never be any deferred request packets in the
2445 // queue, instead we resly on the cache to provide the packets
2446 // from the MSHR queue or write queue
2447 assert(deferredPacketReadyTime() == MaxTick
);
2449 // check for request packets (requests & writebacks)
2450 QueueEntry
* entry
= cache
.getNextQueueEntry();
2453 // can happen if e.g. we attempt a writeback and fail, but
2454 // before the retry, the writeback is eliminated because
2455 // we snoop another cache's ReadEx.
2457 // let our snoop responses go first if there are responses to
2458 // the same addresses
2459 if (checkConflictingSnoop(entry
->getTarget()->pkt
)) {
2462 waitingOnRetry
= entry
->sendPacket(cache
);
2465 // if we succeeded and are not waiting for a retry, schedule the
2466 // next send considering when the next queue is ready, note that
2467 // snoop responses have their own packet queue and thus schedule
2469 if (!waitingOnRetry
) {
2470 schedSendEvent(cache
.nextQueueReadyTime());
2474 BaseCache::MemSidePort::MemSidePort(const std::string
&_name
,
2476 const std::string
&_label
)
2477 : CacheMasterPort(_name
, _cache
, _reqQueue
, _snoopRespQueue
),
2478 _reqQueue(*_cache
, *this, _snoopRespQueue
, _label
),
2479 _snoopRespQueue(*_cache
, *this, true, _label
), cache(_cache
)
2484 WriteAllocator::updateMode(Addr write_addr
, unsigned write_size
,
2487 // check if we are continuing where the last write ended
2488 if (nextAddr
== write_addr
) {
2489 delayCtr
[blk_addr
] = delayThreshold
;
2490 // stop if we have already saturated
2491 if (mode
!= WriteMode::NO_ALLOCATE
) {
2492 byteCount
+= write_size
;
2493 // switch to streaming mode if we have passed the lower
2495 if (mode
== WriteMode::ALLOCATE
&&
2496 byteCount
> coalesceLimit
) {
2497 mode
= WriteMode::COALESCE
;
2498 DPRINTF(Cache
, "Switched to write coalescing\n");
2499 } else if (mode
== WriteMode::COALESCE
&&
2500 byteCount
> noAllocateLimit
) {
2501 // and continue and switch to non-allocating mode if we
2502 // pass the upper threshold
2503 mode
= WriteMode::NO_ALLOCATE
;
2504 DPRINTF(Cache
, "Switched to write-no-allocate\n");
2508 // we did not see a write matching the previous one, start
2510 byteCount
= write_size
;
2511 mode
= WriteMode::ALLOCATE
;
2512 resetDelay(blk_addr
);
2514 nextAddr
= write_addr
+ write_size
;
2518 WriteAllocatorParams::create()
2520 return new WriteAllocator(this);