2 * Copyright (c) 2012-2013, 2018 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2003-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Authors: Erik Hallnor
46 * Definition of BaseCache functions.
49 #include "mem/cache/base.hh"
51 #include "base/compiler.hh"
52 #include "base/logging.hh"
53 #include "debug/Cache.hh"
54 #include "debug/CachePort.hh"
55 #include "debug/CacheRepl.hh"
56 #include "debug/CacheVerbose.hh"
57 #include "mem/cache/mshr.hh"
58 #include "mem/cache/prefetch/base.hh"
59 #include "mem/cache/queue_entry.hh"
60 #include "params/BaseCache.hh"
61 #include "params/WriteAllocator.hh"
62 #include "sim/core.hh"
69 BaseCache::CacheSlavePort::CacheSlavePort(const std::string
&_name
,
71 const std::string
&_label
)
72 : QueuedSlavePort(_name
, _cache
, queue
),
73 queue(*_cache
, *this, true, _label
),
74 blocked(false), mustSendRetry(false),
75 sendRetryEvent([this]{ processSendRetry(); }, _name
)
79 BaseCache::BaseCache(const BaseCacheParams
*p
, unsigned blk_size
)
81 cpuSidePort (p
->name
+ ".cpu_side", this, "CpuSidePort"),
82 memSidePort(p
->name
+ ".mem_side", this, "MemSidePort"),
83 mshrQueue("MSHRs", p
->mshrs
, 0, p
->demand_mshr_reserve
), // see below
84 writeBuffer("write buffer", p
->write_buffers
, p
->mshrs
), // see below
86 prefetcher(p
->prefetcher
),
87 writeAllocator(p
->write_allocator
),
88 writebackClean(p
->writeback_clean
),
89 tempBlockWriteback(nullptr),
90 writebackTempBlockAtomicEvent([this]{ writebackTempBlockAtomic(); },
92 EventBase::Delayed_Writeback_Pri
),
94 lookupLatency(p
->tag_latency
),
95 dataLatency(p
->data_latency
),
96 forwardLatency(p
->tag_latency
),
97 fillLatency(p
->data_latency
),
98 responseLatency(p
->response_latency
),
99 sequentialAccess(p
->sequential_access
),
100 numTarget(p
->tgts_per_mshr
),
102 clusivity(p
->clusivity
),
103 isReadOnly(p
->is_read_only
),
106 noTargetMSHR(nullptr),
107 missCount(p
->max_miss_count
),
108 addrRanges(p
->addr_ranges
.begin(), p
->addr_ranges
.end()),
111 // the MSHR queue has no reserve entries as we check the MSHR
112 // queue on every single allocation, whereas the write queue has
113 // as many reserve entries as we have MSHRs, since every MSHR may
114 // eventually require a writeback, and we do not check the write
115 // buffer before committing to an MSHR
117 // forward snoops is overridden in init() once we can query
118 // whether the connected master is actually snooping or not
120 tempBlock
= new TempCacheBlk(blkSize
);
124 prefetcher
->setCache(this);
127 BaseCache::~BaseCache()
133 BaseCache::CacheSlavePort::setBlocked()
136 DPRINTF(CachePort
, "Port is blocking new requests\n");
138 // if we already scheduled a retry in this cycle, but it has not yet
139 // happened, cancel it
140 if (sendRetryEvent
.scheduled()) {
141 owner
.deschedule(sendRetryEvent
);
142 DPRINTF(CachePort
, "Port descheduled retry\n");
143 mustSendRetry
= true;
148 BaseCache::CacheSlavePort::clearBlocked()
151 DPRINTF(CachePort
, "Port is accepting new requests\n");
154 // @TODO: need to find a better time (next cycle?)
155 owner
.schedule(sendRetryEvent
, curTick() + 1);
160 BaseCache::CacheSlavePort::processSendRetry()
162 DPRINTF(CachePort
, "Port is sending retry\n");
164 // reset the flag and call retry
165 mustSendRetry
= false;
170 BaseCache::regenerateBlkAddr(CacheBlk
* blk
)
172 if (blk
!= tempBlock
) {
173 return tags
->regenerateBlkAddr(blk
);
175 return tempBlock
->getAddr();
182 if (!cpuSidePort
.isConnected() || !memSidePort
.isConnected())
183 fatal("Cache ports on %s are not connected\n", name());
184 cpuSidePort
.sendRangeChange();
185 forwardSnoops
= cpuSidePort
.isSnooping();
189 BaseCache::getPort(const std::string
&if_name
, PortID idx
)
191 if (if_name
== "mem_side") {
193 } else if (if_name
== "cpu_side") {
196 return MemObject::getPort(if_name
, idx
);
201 BaseCache::inRange(Addr addr
) const
203 for (const auto& r
: addrRanges
) {
204 if (r
.contains(addr
)) {
212 BaseCache::handleTimingReqHit(PacketPtr pkt
, CacheBlk
*blk
, Tick request_time
)
214 if (pkt
->needsResponse()) {
215 // These delays should have been consumed by now
216 assert(pkt
->headerDelay
== 0);
217 assert(pkt
->payloadDelay
== 0);
219 pkt
->makeTimingResponse();
221 // In this case we are considering request_time that takes
222 // into account the delay of the xbar, if any, and just
223 // lat, neglecting responseLatency, modelling hit latency
224 // just as the value of lat overriden by access(), which calls
225 // the calculateAccessLatency() function.
226 cpuSidePort
.schedTimingResp(pkt
, request_time
);
228 DPRINTF(Cache
, "%s satisfied %s, no response needed\n", __func__
,
231 // queue the packet for deletion, as the sending cache is
232 // still relying on it; if the block is found in access(),
233 // CleanEvict and Writeback messages will be deleted
235 pendingDelete
.reset(pkt
);
240 BaseCache::handleTimingReqMiss(PacketPtr pkt
, MSHR
*mshr
, CacheBlk
*blk
,
241 Tick forward_time
, Tick request_time
)
243 if (writeAllocator
&&
244 pkt
&& pkt
->isWrite() && !pkt
->req
->isUncacheable()) {
245 writeAllocator
->updateMode(pkt
->getAddr(), pkt
->getSize(),
246 pkt
->getBlockAddr(blkSize
));
251 /// @note writebacks will be checked in getNextMSHR()
252 /// for any conflicting requests to the same block
254 //@todo remove hw_pf here
256 // Coalesce unless it was a software prefetch (see above).
258 assert(!pkt
->isWriteback());
259 // CleanEvicts corresponding to blocks which have
260 // outstanding requests in MSHRs are simply sunk here
261 if (pkt
->cmd
== MemCmd::CleanEvict
) {
262 pendingDelete
.reset(pkt
);
263 } else if (pkt
->cmd
== MemCmd::WriteClean
) {
264 // A WriteClean should never coalesce with any
265 // outstanding cache maintenance requests.
267 // We use forward_time here because there is an
268 // uncached memory write, forwarded to WriteBuffer.
269 allocateWriteBuffer(pkt
, forward_time
);
271 DPRINTF(Cache
, "%s coalescing MSHR for %s\n", __func__
,
274 assert(pkt
->req
->masterId() < system
->maxMasters());
275 mshr_hits
[pkt
->cmdToIndex()][pkt
->req
->masterId()]++;
277 // We use forward_time here because it is the same
278 // considering new targets. We have multiple
279 // requests for the same address here. It
280 // specifies the latency to allocate an internal
281 // buffer and to schedule an event to the queued
282 // port and also takes into account the additional
283 // delay of the xbar.
284 mshr
->allocateTarget(pkt
, forward_time
, order
++,
285 allocOnFill(pkt
->cmd
));
286 if (mshr
->getNumTargets() == numTarget
) {
288 setBlocked(Blocked_NoTargets
);
289 // need to be careful with this... if this mshr isn't
290 // ready yet (i.e. time > curTick()), we don't want to
291 // move it ahead of mshrs that are ready
292 // mshrQueue.moveToFront(mshr);
298 assert(pkt
->req
->masterId() < system
->maxMasters());
299 mshr_misses
[pkt
->cmdToIndex()][pkt
->req
->masterId()]++;
301 if (pkt
->isEviction() || pkt
->cmd
== MemCmd::WriteClean
) {
302 // We use forward_time here because there is an
303 // writeback or writeclean, forwarded to WriteBuffer.
304 allocateWriteBuffer(pkt
, forward_time
);
306 if (blk
&& blk
->isValid()) {
307 // If we have a write miss to a valid block, we
308 // need to mark the block non-readable. Otherwise
309 // if we allow reads while there's an outstanding
310 // write miss, the read could return stale data
311 // out of the cache block... a more aggressive
312 // system could detect the overlap (if any) and
313 // forward data out of the MSHRs, but we don't do
314 // that yet. Note that we do need to leave the
315 // block valid so that it stays in the cache, in
316 // case we get an upgrade response (and hence no
317 // new data) when the write miss completes.
318 // As long as CPUs do proper store/load forwarding
319 // internally, and have a sufficiently weak memory
320 // model, this is probably unnecessary, but at some
321 // point it must have seemed like we needed it...
322 assert((pkt
->needsWritable() && !blk
->isWritable()) ||
323 pkt
->req
->isCacheMaintenance());
324 blk
->status
&= ~BlkReadable
;
326 // Here we are using forward_time, modelling the latency of
327 // a miss (outbound) just as forwardLatency, neglecting the
328 // lookupLatency component.
329 allocateMissBuffer(pkt
, forward_time
);
335 BaseCache::recvTimingReq(PacketPtr pkt
)
337 // anything that is merely forwarded pays for the forward latency and
338 // the delay provided by the crossbar
339 Tick forward_time
= clockEdge(forwardLatency
) + pkt
->headerDelay
;
342 CacheBlk
*blk
= nullptr;
343 bool satisfied
= false;
345 PacketList writebacks
;
346 // Note that lat is passed by reference here. The function
347 // access() will set the lat value.
348 satisfied
= access(pkt
, blk
, lat
, writebacks
);
350 // After the evicted blocks are selected, they must be forwarded
351 // to the write buffer to ensure they logically precede anything
353 doWritebacks(writebacks
, clockEdge(lat
+ forwardLatency
));
356 // Here we charge the headerDelay that takes into account the latencies
357 // of the bus, if the packet comes from it.
358 // The latency charged is just the value set by the access() function.
359 // In case of a hit we are neglecting response latency.
360 // In case of a miss we are neglecting forward latency.
361 Tick request_time
= clockEdge(lat
);
362 // Here we reset the timing of the packet.
363 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
366 // notify before anything else as later handleTimingReqHit might turn
367 // the packet in a response
370 if (prefetcher
&& blk
&& blk
->wasPrefetched()) {
371 blk
->status
&= ~BlkHWPrefetched
;
374 handleTimingReqHit(pkt
, blk
, request_time
);
376 handleTimingReqMiss(pkt
, blk
, forward_time
, request_time
);
382 // track time of availability of next prefetch, if any
383 Tick next_pf_time
= prefetcher
->nextPrefetchReadyTime();
384 if (next_pf_time
!= MaxTick
) {
385 schedMemSideSendEvent(next_pf_time
);
391 BaseCache::handleUncacheableWriteResp(PacketPtr pkt
)
393 Tick completion_time
= clockEdge(responseLatency
) +
394 pkt
->headerDelay
+ pkt
->payloadDelay
;
396 // Reset the bus additional time as it is now accounted for
397 pkt
->headerDelay
= pkt
->payloadDelay
= 0;
399 cpuSidePort
.schedTimingResp(pkt
, completion_time
);
403 BaseCache::recvTimingResp(PacketPtr pkt
)
405 assert(pkt
->isResponse());
407 // all header delay should be paid for by the crossbar, unless
408 // this is a prefetch response from above
409 panic_if(pkt
->headerDelay
!= 0 && pkt
->cmd
!= MemCmd::HardPFResp
,
410 "%s saw a non-zero packet delay\n", name());
412 const bool is_error
= pkt
->isError();
415 DPRINTF(Cache
, "%s: Cache received %s with error\n", __func__
,
419 DPRINTF(Cache
, "%s: Handling response %s\n", __func__
,
422 // if this is a write, we should be looking at an uncacheable
424 if (pkt
->isWrite()) {
425 assert(pkt
->req
->isUncacheable());
426 handleUncacheableWriteResp(pkt
);
430 // we have dealt with any (uncacheable) writes above, from here on
431 // we know we are dealing with an MSHR due to a miss or a prefetch
432 MSHR
*mshr
= dynamic_cast<MSHR
*>(pkt
->popSenderState());
435 if (mshr
== noTargetMSHR
) {
436 // we always clear at least one target
437 clearBlocked(Blocked_NoTargets
);
438 noTargetMSHR
= nullptr;
441 // Initial target is used just for stats
442 QueueEntry::Target
*initial_tgt
= mshr
->getTarget();
443 int stats_cmd_idx
= initial_tgt
->pkt
->cmdToIndex();
444 Tick miss_latency
= curTick() - initial_tgt
->recvTime
;
446 if (pkt
->req
->isUncacheable()) {
447 assert(pkt
->req
->masterId() < system
->maxMasters());
448 mshr_uncacheable_lat
[stats_cmd_idx
][pkt
->req
->masterId()] +=
451 assert(pkt
->req
->masterId() < system
->maxMasters());
452 mshr_miss_latency
[stats_cmd_idx
][pkt
->req
->masterId()] +=
456 PacketList writebacks
;
458 bool is_fill
= !mshr
->isForward
&&
459 (pkt
->isRead() || pkt
->cmd
== MemCmd::UpgradeResp
||
460 mshr
->wasWholeLineWrite
);
462 // make sure that if the mshr was due to a whole line write then
463 // the response is an invalidation
464 assert(!mshr
->wasWholeLineWrite
|| pkt
->isInvalidate());
466 CacheBlk
*blk
= tags
->findBlock(pkt
->getAddr(), pkt
->isSecure());
468 if (is_fill
&& !is_error
) {
469 DPRINTF(Cache
, "Block for addr %#llx being updated in Cache\n",
472 const bool allocate
= (writeAllocator
&& mshr
->wasWholeLineWrite
) ?
473 writeAllocator
->allocate() : mshr
->allocOnFill();
474 blk
= handleFill(pkt
, blk
, writebacks
, allocate
);
475 assert(blk
!= nullptr);
479 if (blk
&& blk
->isValid() && pkt
->isClean() && !pkt
->isInvalidate()) {
480 // The block was marked not readable while there was a pending
481 // cache maintenance operation, restore its flag.
482 blk
->status
|= BlkReadable
;
484 // This was a cache clean operation (without invalidate)
485 // and we have a copy of the block already. Since there
486 // is no invalidation, we can promote targets that don't
487 // require a writable copy
488 mshr
->promoteReadable();
491 if (blk
&& blk
->isWritable() && !pkt
->req
->isCacheInvalidate()) {
492 // If at this point the referenced block is writable and the
493 // response is not a cache invalidate, we promote targets that
494 // were deferred as we couldn't guarrantee a writable copy
495 mshr
->promoteWritable();
498 serviceMSHRTargets(mshr
, pkt
, blk
);
500 if (mshr
->promoteDeferredTargets()) {
501 // avoid later read getting stale data while write miss is
502 // outstanding.. see comment in timingAccess()
504 blk
->status
&= ~BlkReadable
;
506 mshrQueue
.markPending(mshr
);
507 schedMemSideSendEvent(clockEdge() + pkt
->payloadDelay
);
509 // while we deallocate an mshr from the queue we still have to
510 // check the isFull condition before and after as we might
511 // have been using the reserved entries already
512 const bool was_full
= mshrQueue
.isFull();
513 mshrQueue
.deallocate(mshr
);
514 if (was_full
&& !mshrQueue
.isFull()) {
515 clearBlocked(Blocked_NoMSHRs
);
518 // Request the bus for a prefetch if this deallocation freed enough
519 // MSHRs for a prefetch to take place
520 if (prefetcher
&& mshrQueue
.canPrefetch()) {
521 Tick next_pf_time
= std::max(prefetcher
->nextPrefetchReadyTime(),
523 if (next_pf_time
!= MaxTick
)
524 schedMemSideSendEvent(next_pf_time
);
528 // if we used temp block, check to see if its valid and then clear it out
529 if (blk
== tempBlock
&& tempBlock
->isValid()) {
530 evictBlock(blk
, writebacks
);
533 const Tick forward_time
= clockEdge(forwardLatency
) + pkt
->headerDelay
;
534 // copy writebacks to write buffer
535 doWritebacks(writebacks
, forward_time
);
537 DPRINTF(CacheVerbose
, "%s: Leaving with %s\n", __func__
, pkt
->print());
543 BaseCache::recvAtomic(PacketPtr pkt
)
545 // should assert here that there are no outstanding MSHRs or
546 // writebacks... that would mean that someone used an atomic
547 // access in timing mode
549 // We use lookupLatency here because it is used to specify the latency
551 Cycles lat
= lookupLatency
;
553 CacheBlk
*blk
= nullptr;
554 PacketList writebacks
;
555 bool satisfied
= access(pkt
, blk
, lat
, writebacks
);
557 if (pkt
->isClean() && blk
&& blk
->isDirty()) {
558 // A cache clean opearation is looking for a dirty
559 // block. If a dirty block is encountered a WriteClean
560 // will update any copies to the path to the memory
561 // until the point of reference.
562 DPRINTF(CacheVerbose
, "%s: packet %s found block: %s\n",
563 __func__
, pkt
->print(), blk
->print());
564 PacketPtr wb_pkt
= writecleanBlk(blk
, pkt
->req
->getDest(), pkt
->id
);
565 writebacks
.push_back(wb_pkt
);
569 // handle writebacks resulting from the access here to ensure they
570 // logically precede anything happening below
571 doWritebacksAtomic(writebacks
);
572 assert(writebacks
.empty());
575 lat
+= handleAtomicReqMiss(pkt
, blk
, writebacks
);
578 // Note that we don't invoke the prefetcher at all in atomic mode.
579 // It's not clear how to do it properly, particularly for
580 // prefetchers that aggressively generate prefetch candidates and
581 // rely on bandwidth contention to throttle them; these will tend
582 // to pollute the cache in atomic mode since there is no bandwidth
583 // contention. If we ever do want to enable prefetching in atomic
584 // mode, though, this is the place to do it... see timingAccess()
585 // for an example (though we'd want to issue the prefetch(es)
586 // immediately rather than calling requestMemSideBus() as we do
589 // do any writebacks resulting from the response handling
590 doWritebacksAtomic(writebacks
);
592 // if we used temp block, check to see if its valid and if so
593 // clear it out, but only do so after the call to recvAtomic is
594 // finished so that any downstream observers (such as a snoop
595 // filter), first see the fill, and only then see the eviction
596 if (blk
== tempBlock
&& tempBlock
->isValid()) {
597 // the atomic CPU calls recvAtomic for fetch and load/store
598 // sequentuially, and we may already have a tempBlock
599 // writeback from the fetch that we have not yet sent
600 if (tempBlockWriteback
) {
601 // if that is the case, write the prevoius one back, and
602 // do not schedule any new event
603 writebackTempBlockAtomic();
605 // the writeback/clean eviction happens after the call to
606 // recvAtomic has finished (but before any successive
607 // calls), so that the response handling from the fill is
608 // allowed to happen first
609 schedule(writebackTempBlockAtomicEvent
, curTick());
612 tempBlockWriteback
= evictBlock(blk
);
615 if (pkt
->needsResponse()) {
616 pkt
->makeAtomicResponse();
619 return lat
* clockPeriod();
623 BaseCache::functionalAccess(PacketPtr pkt
, bool from_cpu_side
)
625 Addr blk_addr
= pkt
->getBlockAddr(blkSize
);
626 bool is_secure
= pkt
->isSecure();
627 CacheBlk
*blk
= tags
->findBlock(pkt
->getAddr(), is_secure
);
628 MSHR
*mshr
= mshrQueue
.findMatch(blk_addr
, is_secure
);
630 pkt
->pushLabel(name());
632 CacheBlkPrintWrapper
cbpw(blk
);
634 // Note that just because an L2/L3 has valid data doesn't mean an
635 // L1 doesn't have a more up-to-date modified copy that still
636 // needs to be found. As a result we always update the request if
637 // we have it, but only declare it satisfied if we are the owner.
639 // see if we have data at all (owned or otherwise)
640 bool have_data
= blk
&& blk
->isValid()
641 && pkt
->trySatisfyFunctional(&cbpw
, blk_addr
, is_secure
, blkSize
,
644 // data we have is dirty if marked as such or if we have an
645 // in-service MSHR that is pending a modified line
647 have_data
&& (blk
->isDirty() ||
648 (mshr
&& mshr
->inService
&& mshr
->isPendingModified()));
650 bool done
= have_dirty
||
651 cpuSidePort
.trySatisfyFunctional(pkt
) ||
652 mshrQueue
.trySatisfyFunctional(pkt
) ||
653 writeBuffer
.trySatisfyFunctional(pkt
) ||
654 memSidePort
.trySatisfyFunctional(pkt
);
656 DPRINTF(CacheVerbose
, "%s: %s %s%s%s\n", __func__
, pkt
->print(),
657 (blk
&& blk
->isValid()) ? "valid " : "",
658 have_data
? "data " : "", done
? "done " : "");
660 // We're leaving the cache, so pop cache->name() label
666 // if it came as a request from the CPU side then make sure it
667 // continues towards the memory side
669 memSidePort
.sendFunctional(pkt
);
670 } else if (cpuSidePort
.isSnooping()) {
671 // if it came from the memory side, it must be a snoop request
672 // and we should only forward it if we are forwarding snoops
673 cpuSidePort
.sendFunctionalSnoop(pkt
);
680 BaseCache::cmpAndSwap(CacheBlk
*blk
, PacketPtr pkt
)
682 assert(pkt
->isRequest());
684 uint64_t overwrite_val
;
686 uint64_t condition_val64
;
687 uint32_t condition_val32
;
689 int offset
= pkt
->getOffset(blkSize
);
690 uint8_t *blk_data
= blk
->data
+ offset
;
692 assert(sizeof(uint64_t) >= pkt
->getSize());
694 overwrite_mem
= true;
695 // keep a copy of our possible write value, and copy what is at the
696 // memory address into the packet
697 pkt
->writeData((uint8_t *)&overwrite_val
);
698 pkt
->setData(blk_data
);
700 if (pkt
->req
->isCondSwap()) {
701 if (pkt
->getSize() == sizeof(uint64_t)) {
702 condition_val64
= pkt
->req
->getExtraData();
703 overwrite_mem
= !std::memcmp(&condition_val64
, blk_data
,
705 } else if (pkt
->getSize() == sizeof(uint32_t)) {
706 condition_val32
= (uint32_t)pkt
->req
->getExtraData();
707 overwrite_mem
= !std::memcmp(&condition_val32
, blk_data
,
710 panic("Invalid size for conditional read/write\n");
714 std::memcpy(blk_data
, &overwrite_val
, pkt
->getSize());
715 blk
->status
|= BlkDirty
;
720 BaseCache::getNextQueueEntry()
722 // Check both MSHR queue and write buffer for potential requests,
723 // note that null does not mean there is no request, it could
724 // simply be that it is not ready
725 MSHR
*miss_mshr
= mshrQueue
.getNext();
726 WriteQueueEntry
*wq_entry
= writeBuffer
.getNext();
728 // If we got a write buffer request ready, first priority is a
729 // full write buffer, otherwise we favour the miss requests
730 if (wq_entry
&& (writeBuffer
.isFull() || !miss_mshr
)) {
731 // need to search MSHR queue for conflicting earlier miss.
732 MSHR
*conflict_mshr
= mshrQueue
.findPending(wq_entry
);
734 if (conflict_mshr
&& conflict_mshr
->order
< wq_entry
->order
) {
735 // Service misses in order until conflict is cleared.
736 return conflict_mshr
;
738 // @todo Note that we ignore the ready time of the conflict here
741 // No conflicts; issue write
743 } else if (miss_mshr
) {
744 // need to check for conflicting earlier writeback
745 WriteQueueEntry
*conflict_mshr
= writeBuffer
.findPending(miss_mshr
);
747 // not sure why we don't check order here... it was in the
748 // original code but commented out.
750 // The only way this happens is if we are
751 // doing a write and we didn't have permissions
752 // then subsequently saw a writeback (owned got evicted)
753 // We need to make sure to perform the writeback first
754 // To preserve the dirty data, then we can issue the write
756 // should we return wq_entry here instead? I.e. do we
757 // have to flush writes in order? I don't think so... not
758 // for Alpha anyway. Maybe for x86?
759 return conflict_mshr
;
761 // @todo Note that we ignore the ready time of the conflict here
764 // No conflicts; issue read
768 // fall through... no pending requests. Try a prefetch.
769 assert(!miss_mshr
&& !wq_entry
);
770 if (prefetcher
&& mshrQueue
.canPrefetch()) {
771 // If we have a miss queue slot, we can try a prefetch
772 PacketPtr pkt
= prefetcher
->getPacket();
774 Addr pf_addr
= pkt
->getBlockAddr(blkSize
);
775 if (!tags
->findBlock(pf_addr
, pkt
->isSecure()) &&
776 !mshrQueue
.findMatch(pf_addr
, pkt
->isSecure()) &&
777 !writeBuffer
.findMatch(pf_addr
, pkt
->isSecure())) {
778 // Update statistic on number of prefetches issued
779 // (hwpf_mshr_misses)
780 assert(pkt
->req
->masterId() < system
->maxMasters());
781 mshr_misses
[pkt
->cmdToIndex()][pkt
->req
->masterId()]++;
783 // allocate an MSHR and return it, note
784 // that we send the packet straight away, so do not
786 return allocateMissBuffer(pkt
, curTick(), false);
788 // free the request and packet
798 BaseCache::satisfyRequest(PacketPtr pkt
, CacheBlk
*blk
, bool, bool)
800 assert(pkt
->isRequest());
802 assert(blk
&& blk
->isValid());
803 // Occasionally this is not true... if we are a lower-level cache
804 // satisfying a string of Read and ReadEx requests from
805 // upper-level caches, a Read will mark the block as shared but we
806 // can satisfy a following ReadEx anyway since we can rely on the
807 // Read requester(s) to have buffered the ReadEx snoop and to
808 // invalidate their blocks after receiving them.
809 // assert(!pkt->needsWritable() || blk->isWritable());
810 assert(pkt
->getOffset(blkSize
) + pkt
->getSize() <= blkSize
);
812 // Check RMW operations first since both isRead() and
813 // isWrite() will be true for them
814 if (pkt
->cmd
== MemCmd::SwapReq
) {
815 if (pkt
->isAtomicOp()) {
816 // extract data from cache and save it into the data field in
817 // the packet as a return value from this atomic op
818 int offset
= tags
->extractBlkOffset(pkt
->getAddr());
819 uint8_t *blk_data
= blk
->data
+ offset
;
820 pkt
->setData(blk_data
);
822 // execute AMO operation
823 (*(pkt
->getAtomicOp()))(blk_data
);
825 // set block status to dirty
826 blk
->status
|= BlkDirty
;
828 cmpAndSwap(blk
, pkt
);
830 } else if (pkt
->isWrite()) {
831 // we have the block in a writable state and can go ahead,
832 // note that the line may be also be considered writable in
833 // downstream caches along the path to memory, but always
834 // Exclusive, and never Modified
835 assert(blk
->isWritable());
836 // Write or WriteLine at the first cache with block in writable state
837 if (blk
->checkWrite(pkt
)) {
838 pkt
->writeDataToBlock(blk
->data
, blkSize
);
840 // Always mark the line as dirty (and thus transition to the
841 // Modified state) even if we are a failed StoreCond so we
842 // supply data to any snoops that have appended themselves to
843 // this cache before knowing the store will fail.
844 blk
->status
|= BlkDirty
;
845 DPRINTF(CacheVerbose
, "%s for %s (write)\n", __func__
, pkt
->print());
846 } else if (pkt
->isRead()) {
848 blk
->trackLoadLocked(pkt
);
851 // all read responses have a data payload
852 assert(pkt
->hasRespData());
853 pkt
->setDataFromBlock(blk
->data
, blkSize
);
854 } else if (pkt
->isUpgrade()) {
856 assert(!pkt
->hasSharers());
858 if (blk
->isDirty()) {
859 // we were in the Owned state, and a cache above us that
860 // has the line in Shared state needs to be made aware
861 // that the data it already has is in fact dirty
862 pkt
->setCacheResponding();
863 blk
->status
&= ~BlkDirty
;
865 } else if (pkt
->isClean()) {
866 blk
->status
&= ~BlkDirty
;
868 assert(pkt
->isInvalidate());
869 invalidateBlock(blk
);
870 DPRINTF(CacheVerbose
, "%s for %s (invalidation)\n", __func__
,
875 /////////////////////////////////////////////////////
877 // Access path: requests coming in from the CPU side
879 /////////////////////////////////////////////////////
881 BaseCache::calculateTagOnlyLatency(const uint32_t delay
,
882 const Cycles lookup_lat
) const
884 // A tag-only access has to wait for the packet to arrive in order to
885 // perform the tag lookup.
886 return ticksToCycles(delay
) + lookup_lat
;
890 BaseCache::calculateAccessLatency(const CacheBlk
* blk
, const uint32_t delay
,
891 const Cycles lookup_lat
) const
895 if (blk
!= nullptr) {
896 // As soon as the access arrives, for sequential accesses first access
897 // tags, then the data entry. In the case of parallel accesses the
898 // latency is dictated by the slowest of tag and data latencies.
899 if (sequentialAccess
) {
900 lat
= ticksToCycles(delay
) + lookup_lat
+ dataLatency
;
902 lat
= ticksToCycles(delay
) + std::max(lookup_lat
, dataLatency
);
905 // Check if the block to be accessed is available. If not, apply the
906 // access latency on top of when the block is ready to be accessed.
907 const Tick tick
= curTick() + delay
;
908 const Tick when_ready
= blk
->getWhenReady();
909 if (when_ready
> tick
&&
910 ticksToCycles(when_ready
- tick
) > lat
) {
911 lat
+= ticksToCycles(when_ready
- tick
);
914 // In case of a miss, we neglect the data access in a parallel
915 // configuration (i.e., the data access will be stopped as soon as
916 // we find out it is a miss), and use the tag-only latency.
917 lat
= calculateTagOnlyLatency(delay
, lookup_lat
);
924 BaseCache::access(PacketPtr pkt
, CacheBlk
*&blk
, Cycles
&lat
,
925 PacketList
&writebacks
)
928 assert(pkt
->isRequest());
930 chatty_assert(!(isReadOnly
&& pkt
->isWrite()),
931 "Should never see a write in a read-only cache %s\n",
934 // Access block in the tags
935 Cycles
tag_latency(0);
936 blk
= tags
->accessBlock(pkt
->getAddr(), pkt
->isSecure(), tag_latency
);
938 DPRINTF(Cache
, "%s for %s %s\n", __func__
, pkt
->print(),
939 blk
? "hit " + blk
->print() : "miss");
941 if (pkt
->req
->isCacheMaintenance()) {
942 // A cache maintenance operation is always forwarded to the
943 // memory below even if the block is found in dirty state.
945 // We defer any changes to the state of the block until we
946 // create and mark as in service the mshr for the downstream
949 // Calculate access latency on top of when the packet arrives. This
950 // takes into account the bus delay.
951 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
956 if (pkt
->isEviction()) {
957 // We check for presence of block in above caches before issuing
958 // Writeback or CleanEvict to write buffer. Therefore the only
959 // possible cases can be of a CleanEvict packet coming from above
960 // encountering a Writeback generated in this cache peer cache and
961 // waiting in the write buffer. Cases of upper level peer caches
962 // generating CleanEvict and Writeback or simply CleanEvict and
963 // CleanEvict almost simultaneously will be caught by snoops sent out
965 WriteQueueEntry
*wb_entry
= writeBuffer
.findMatch(pkt
->getAddr(),
968 assert(wb_entry
->getNumTargets() == 1);
969 PacketPtr wbPkt
= wb_entry
->getTarget()->pkt
;
970 assert(wbPkt
->isWriteback());
972 if (pkt
->isCleanEviction()) {
973 // The CleanEvict and WritebackClean snoops into other
974 // peer caches of the same level while traversing the
975 // crossbar. If a copy of the block is found, the
976 // packet is deleted in the crossbar. Hence, none of
977 // the other upper level caches connected to this
978 // cache have the block, so we can clear the
979 // BLOCK_CACHED flag in the Writeback if set and
980 // discard the CleanEvict by returning true.
981 wbPkt
->clearBlockCached();
983 // A clean evict does not need to access the data array
984 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
988 assert(pkt
->cmd
== MemCmd::WritebackDirty
);
989 // Dirty writeback from above trumps our clean
990 // writeback... discard here
991 // Note: markInService will remove entry from writeback buffer.
992 markInService(wb_entry
);
998 // Writeback handling is special case. We can write the block into
999 // the cache without having a writeable copy (or any copy at all).
1000 if (pkt
->isWriteback()) {
1001 assert(blkSize
== pkt
->getSize());
1003 // we could get a clean writeback while we are having
1004 // outstanding accesses to a block, do the simple thing for
1005 // now and drop the clean writeback so that we do not upset
1006 // any ordering/decisions about ownership already taken
1007 if (pkt
->cmd
== MemCmd::WritebackClean
&&
1008 mshrQueue
.findMatch(pkt
->getAddr(), pkt
->isSecure())) {
1009 DPRINTF(Cache
, "Clean writeback %#llx to block with MSHR, "
1010 "dropping\n", pkt
->getAddr());
1012 // A writeback searches for the block, then writes the data.
1013 // As the writeback is being dropped, the data is not touched,
1014 // and we just had to wait for the time to find a match in the
1015 // MSHR. As of now assume a mshr queue search takes as long as
1016 // a tag lookup for simplicity.
1017 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1023 // need to do a replacement
1024 blk
= allocateBlock(pkt
, writebacks
);
1026 // no replaceable block available: give up, fwd to next level.
1029 // A writeback searches for the block, then writes the data.
1030 // As the block could not be found, it was a tag-only access.
1031 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1036 blk
->status
|= BlkReadable
;
1038 // only mark the block dirty if we got a writeback command,
1039 // and leave it as is for a clean writeback
1040 if (pkt
->cmd
== MemCmd::WritebackDirty
) {
1041 // TODO: the coherent cache can assert(!blk->isDirty());
1042 blk
->status
|= BlkDirty
;
1044 // if the packet does not have sharers, it is passing
1045 // writable, and we got the writeback in Modified or Exclusive
1046 // state, if not we are in the Owned or Shared state
1047 if (!pkt
->hasSharers()) {
1048 blk
->status
|= BlkWritable
;
1050 // nothing else to do; writeback doesn't expect response
1051 assert(!pkt
->needsResponse());
1052 pkt
->writeDataToBlock(blk
->data
, blkSize
);
1053 DPRINTF(Cache
, "%s new state is %s\n", __func__
, blk
->print());
1056 // A writeback searches for the block, then writes the data
1057 lat
= calculateAccessLatency(blk
, pkt
->headerDelay
, tag_latency
);
1059 // When the packet metadata arrives, the tag lookup will be done while
1060 // the payload is arriving. Then the block will be ready to access as
1061 // soon as the fill is done
1062 blk
->setWhenReady(clockEdge(fillLatency
) + pkt
->headerDelay
+
1063 std::max(cyclesToTicks(tag_latency
), (uint64_t)pkt
->payloadDelay
));
1066 } else if (pkt
->cmd
== MemCmd::CleanEvict
) {
1067 // A CleanEvict does not need to access the data array
1068 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1071 // Found the block in the tags, need to stop CleanEvict from
1072 // propagating further down the hierarchy. Returning true will
1073 // treat the CleanEvict like a satisfied write request and delete
1077 // We didn't find the block here, propagate the CleanEvict further
1078 // down the memory hierarchy. Returning false will treat the CleanEvict
1079 // like a Writeback which could not find a replaceable block so has to
1080 // go to next level.
1082 } else if (pkt
->cmd
== MemCmd::WriteClean
) {
1083 // WriteClean handling is a special case. We can allocate a
1084 // block directly if it doesn't exist and we can update the
1085 // block immediately. The WriteClean transfers the ownership
1086 // of the block as well.
1087 assert(blkSize
== pkt
->getSize());
1090 if (pkt
->writeThrough()) {
1091 // A writeback searches for the block, then writes the data.
1092 // As the block could not be found, it was a tag-only access.
1093 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1095 // if this is a write through packet, we don't try to
1096 // allocate if the block is not present
1099 // a writeback that misses needs to allocate a new block
1100 blk
= allocateBlock(pkt
, writebacks
);
1102 // no replaceable block available: give up, fwd to
1106 // A writeback searches for the block, then writes the
1107 // data. As the block could not be found, it was a tag-only
1109 lat
= calculateTagOnlyLatency(pkt
->headerDelay
,
1115 blk
->status
|= BlkReadable
;
1119 // at this point either this is a writeback or a write-through
1120 // write clean operation and the block is already in this
1121 // cache, we need to update the data and the block flags
1123 // TODO: the coherent cache can assert(!blk->isDirty());
1124 if (!pkt
->writeThrough()) {
1125 blk
->status
|= BlkDirty
;
1127 // nothing else to do; writeback doesn't expect response
1128 assert(!pkt
->needsResponse());
1129 pkt
->writeDataToBlock(blk
->data
, blkSize
);
1130 DPRINTF(Cache
, "%s new state is %s\n", __func__
, blk
->print());
1134 // A writeback searches for the block, then writes the data
1135 lat
= calculateAccessLatency(blk
, pkt
->headerDelay
, tag_latency
);
1137 // When the packet metadata arrives, the tag lookup will be done while
1138 // the payload is arriving. Then the block will be ready to access as
1139 // soon as the fill is done
1140 blk
->setWhenReady(clockEdge(fillLatency
) + pkt
->headerDelay
+
1141 std::max(cyclesToTicks(tag_latency
), (uint64_t)pkt
->payloadDelay
));
1143 // if this a write-through packet it will be sent to cache
1145 return !pkt
->writeThrough();
1146 } else if (blk
&& (pkt
->needsWritable() ? blk
->isWritable() :
1147 blk
->isReadable())) {
1148 // OK to satisfy access
1151 // Calculate access latency based on the need to access the data array
1152 if (pkt
->isRead() || pkt
->isWrite()) {
1153 lat
= calculateAccessLatency(blk
, pkt
->headerDelay
, tag_latency
);
1155 lat
= calculateTagOnlyLatency(pkt
->headerDelay
, tag_latency
);
1158 satisfyRequest(pkt
, blk
);
1159 maintainClusivity(pkt
->fromCache(), blk
);
1164 // Can't satisfy access normally... either no block (blk == nullptr)
1165 // or have block but need writable
1169 lat
= calculateAccessLatency(blk
, pkt
->headerDelay
, tag_latency
);
1171 if (!blk
&& pkt
->isLLSC() && pkt
->isWrite()) {
1172 // complete miss on store conditional... just give up now
1173 pkt
->req
->setExtraData(0);
1181 BaseCache::maintainClusivity(bool from_cache
, CacheBlk
*blk
)
1183 if (from_cache
&& blk
&& blk
->isValid() && !blk
->isDirty() &&
1184 clusivity
== Enums::mostly_excl
) {
1185 // if we have responded to a cache, and our block is still
1186 // valid, but not dirty, and this cache is mostly exclusive
1187 // with respect to the cache above, drop the block
1188 invalidateBlock(blk
);
1193 BaseCache::handleFill(PacketPtr pkt
, CacheBlk
*blk
, PacketList
&writebacks
,
1196 assert(pkt
->isResponse());
1197 Addr addr
= pkt
->getAddr();
1198 bool is_secure
= pkt
->isSecure();
1200 CacheBlk::State old_state
= blk
? blk
->status
: 0;
1203 // When handling a fill, we should have no writes to this line.
1204 assert(addr
== pkt
->getBlockAddr(blkSize
));
1205 assert(!writeBuffer
.findMatch(addr
, is_secure
));
1208 // better have read new data...
1209 assert(pkt
->hasData() || pkt
->cmd
== MemCmd::InvalidateResp
);
1211 // need to do a replacement if allocating, otherwise we stick
1212 // with the temporary storage
1213 blk
= allocate
? allocateBlock(pkt
, writebacks
) : nullptr;
1216 // No replaceable block or a mostly exclusive
1217 // cache... just use temporary storage to complete the
1218 // current request and then get rid of it
1220 tempBlock
->insert(addr
, is_secure
);
1221 DPRINTF(Cache
, "using temp block for %#llx (%s)\n", addr
,
1222 is_secure
? "s" : "ns");
1225 // existing block... probably an upgrade
1226 // don't clear block status... if block is already dirty we
1227 // don't want to lose that
1230 // Block is guaranteed to be valid at this point
1231 assert(blk
->isValid());
1232 assert(blk
->isSecure() == is_secure
);
1233 assert(regenerateBlkAddr(blk
) == addr
);
1235 blk
->status
|= BlkReadable
;
1237 // sanity check for whole-line writes, which should always be
1238 // marked as writable as part of the fill, and then later marked
1239 // dirty as part of satisfyRequest
1240 if (pkt
->cmd
== MemCmd::InvalidateResp
) {
1241 assert(!pkt
->hasSharers());
1244 // here we deal with setting the appropriate state of the line,
1245 // and we start by looking at the hasSharers flag, and ignore the
1246 // cacheResponding flag (normally signalling dirty data) if the
1247 // packet has sharers, thus the line is never allocated as Owned
1248 // (dirty but not writable), and always ends up being either
1249 // Shared, Exclusive or Modified, see Packet::setCacheResponding
1251 if (!pkt
->hasSharers()) {
1252 // we could get a writable line from memory (rather than a
1253 // cache) even in a read-only cache, note that we set this bit
1254 // even for a read-only cache, possibly revisit this decision
1255 blk
->status
|= BlkWritable
;
1257 // check if we got this via cache-to-cache transfer (i.e., from a
1258 // cache that had the block in Modified or Owned state)
1259 if (pkt
->cacheResponding()) {
1260 // we got the block in Modified state, and invalidated the
1262 blk
->status
|= BlkDirty
;
1264 chatty_assert(!isReadOnly
, "Should never see dirty snoop response "
1265 "in read-only cache %s\n", name());
1269 DPRINTF(Cache
, "Block addr %#llx (%s) moving from state %x to %s\n",
1270 addr
, is_secure
? "s" : "ns", old_state
, blk
->print());
1272 // if we got new data, copy it in (checking for a read response
1273 // and a response that has data is the same in the end)
1274 if (pkt
->isRead()) {
1276 assert(pkt
->hasData());
1277 assert(pkt
->getSize() == blkSize
);
1279 pkt
->writeDataToBlock(blk
->data
, blkSize
);
1281 // The block will be ready when the payload arrives and the fill is done
1282 blk
->setWhenReady(clockEdge(fillLatency
) + pkt
->headerDelay
+
1289 BaseCache::allocateBlock(const PacketPtr pkt
, PacketList
&writebacks
)
1292 const Addr addr
= pkt
->getAddr();
1295 const bool is_secure
= pkt
->isSecure();
1297 // Find replacement victim
1298 std::vector
<CacheBlk
*> evict_blks
;
1299 CacheBlk
*victim
= tags
->findVictim(addr
, is_secure
, evict_blks
);
1301 // It is valid to return nullptr if there is no victim
1305 // Print victim block's information
1306 DPRINTF(CacheRepl
, "Replacement victim: %s\n", victim
->print());
1308 // Check for transient state allocations. If any of the entries listed
1309 // for eviction has a transient state, the allocation fails
1310 for (const auto& blk
: evict_blks
) {
1311 if (blk
->isValid()) {
1312 Addr repl_addr
= regenerateBlkAddr(blk
);
1313 MSHR
*repl_mshr
= mshrQueue
.findMatch(repl_addr
, blk
->isSecure());
1315 // must be an outstanding upgrade or clean request
1316 // on a block we're about to replace...
1317 assert((!blk
->isWritable() && repl_mshr
->needsWritable()) ||
1318 repl_mshr
->isCleaning());
1320 // too hard to replace block with transient state
1321 // allocation failed, block not inserted
1327 // The victim will be replaced by a new entry, so increase the replacement
1328 // counter if a valid block is being replaced
1329 if (victim
->isValid()) {
1330 DPRINTF(Cache
, "replacement: replacing %#llx (%s) with %#llx "
1331 "(%s): %s\n", regenerateBlkAddr(victim
),
1332 victim
->isSecure() ? "s" : "ns",
1333 addr
, is_secure
? "s" : "ns",
1334 victim
->isDirty() ? "writeback" : "clean");
1339 // Evict valid blocks associated to this victim block
1340 for (const auto& blk
: evict_blks
) {
1341 if (blk
->isValid()) {
1342 if (blk
->wasPrefetched()) {
1346 evictBlock(blk
, writebacks
);
1350 // Insert new block at victimized entry
1351 tags
->insertBlock(pkt
, victim
);
1357 BaseCache::invalidateBlock(CacheBlk
*blk
)
1359 // If handling a block present in the Tags, let it do its invalidation
1360 // process, which will update stats and invalidate the block itself
1361 if (blk
!= tempBlock
) {
1362 tags
->invalidate(blk
);
1364 tempBlock
->invalidate();
1369 BaseCache::evictBlock(CacheBlk
*blk
, PacketList
&writebacks
)
1371 PacketPtr pkt
= evictBlock(blk
);
1373 writebacks
.push_back(pkt
);
1378 BaseCache::writebackBlk(CacheBlk
*blk
)
1380 chatty_assert(!isReadOnly
|| writebackClean
,
1381 "Writeback from read-only cache");
1382 assert(blk
&& blk
->isValid() && (blk
->isDirty() || writebackClean
));
1384 writebacks
[Request::wbMasterId
]++;
1386 RequestPtr req
= std::make_shared
<Request
>(
1387 regenerateBlkAddr(blk
), blkSize
, 0, Request::wbMasterId
);
1389 if (blk
->isSecure())
1390 req
->setFlags(Request::SECURE
);
1392 req
->taskId(blk
->task_id
);
1395 new Packet(req
, blk
->isDirty() ?
1396 MemCmd::WritebackDirty
: MemCmd::WritebackClean
);
1398 DPRINTF(Cache
, "Create Writeback %s writable: %d, dirty: %d\n",
1399 pkt
->print(), blk
->isWritable(), blk
->isDirty());
1401 if (blk
->isWritable()) {
1402 // not asserting shared means we pass the block in modified
1403 // state, mark our own block non-writeable
1404 blk
->status
&= ~BlkWritable
;
1406 // we are in the Owned state, tell the receiver
1407 pkt
->setHasSharers();
1410 // make sure the block is not marked dirty
1411 blk
->status
&= ~BlkDirty
;
1414 pkt
->setDataFromBlock(blk
->data
, blkSize
);
1420 BaseCache::writecleanBlk(CacheBlk
*blk
, Request::Flags dest
, PacketId id
)
1422 RequestPtr req
= std::make_shared
<Request
>(
1423 regenerateBlkAddr(blk
), blkSize
, 0, Request::wbMasterId
);
1425 if (blk
->isSecure()) {
1426 req
->setFlags(Request::SECURE
);
1428 req
->taskId(blk
->task_id
);
1430 PacketPtr pkt
= new Packet(req
, MemCmd::WriteClean
, blkSize
, id
);
1433 req
->setFlags(dest
);
1434 pkt
->setWriteThrough();
1437 DPRINTF(Cache
, "Create %s writable: %d, dirty: %d\n", pkt
->print(),
1438 blk
->isWritable(), blk
->isDirty());
1440 if (blk
->isWritable()) {
1441 // not asserting shared means we pass the block in modified
1442 // state, mark our own block non-writeable
1443 blk
->status
&= ~BlkWritable
;
1445 // we are in the Owned state, tell the receiver
1446 pkt
->setHasSharers();
1449 // make sure the block is not marked dirty
1450 blk
->status
&= ~BlkDirty
;
1453 pkt
->setDataFromBlock(blk
->data
, blkSize
);
1460 BaseCache::memWriteback()
1462 tags
->forEachBlk([this](CacheBlk
&blk
) { writebackVisitor(blk
); });
1466 BaseCache::memInvalidate()
1468 tags
->forEachBlk([this](CacheBlk
&blk
) { invalidateVisitor(blk
); });
1472 BaseCache::isDirty() const
1474 return tags
->anyBlk([](CacheBlk
&blk
) { return blk
.isDirty(); });
1478 BaseCache::coalesce() const
1480 return writeAllocator
&& writeAllocator
->coalesce();
1484 BaseCache::writebackVisitor(CacheBlk
&blk
)
1486 if (blk
.isDirty()) {
1487 assert(blk
.isValid());
1489 RequestPtr request
= std::make_shared
<Request
>(
1490 regenerateBlkAddr(&blk
), blkSize
, 0, Request::funcMasterId
);
1492 request
->taskId(blk
.task_id
);
1493 if (blk
.isSecure()) {
1494 request
->setFlags(Request::SECURE
);
1497 Packet
packet(request
, MemCmd::WriteReq
);
1498 packet
.dataStatic(blk
.data
);
1500 memSidePort
.sendFunctional(&packet
);
1502 blk
.status
&= ~BlkDirty
;
1507 BaseCache::invalidateVisitor(CacheBlk
&blk
)
1510 warn_once("Invalidating dirty cache lines. " \
1511 "Expect things to break.\n");
1513 if (blk
.isValid()) {
1514 assert(!blk
.isDirty());
1515 invalidateBlock(&blk
);
1520 BaseCache::nextQueueReadyTime() const
1522 Tick nextReady
= std::min(mshrQueue
.nextReadyTime(),
1523 writeBuffer
.nextReadyTime());
1525 // Don't signal prefetch ready time if no MSHRs available
1526 // Will signal once enoguh MSHRs are deallocated
1527 if (prefetcher
&& mshrQueue
.canPrefetch()) {
1528 nextReady
= std::min(nextReady
,
1529 prefetcher
->nextPrefetchReadyTime());
1537 BaseCache::sendMSHRQueuePacket(MSHR
* mshr
)
1541 // use request from 1st target
1542 PacketPtr tgt_pkt
= mshr
->getTarget()->pkt
;
1544 DPRINTF(Cache
, "%s: MSHR %s\n", __func__
, tgt_pkt
->print());
1546 // if the cache is in write coalescing mode or (additionally) in
1547 // no allocation mode, and we have a write packet with an MSHR
1548 // that is not a whole-line write (due to incompatible flags etc),
1549 // then reset the write mode
1550 if (writeAllocator
&& writeAllocator
->coalesce() && tgt_pkt
->isWrite()) {
1551 if (!mshr
->isWholeLineWrite()) {
1552 // if we are currently write coalescing, hold on the
1553 // MSHR as many cycles extra as we need to completely
1554 // write a cache line
1555 if (writeAllocator
->delay(mshr
->blkAddr
)) {
1556 Tick delay
= blkSize
/ tgt_pkt
->getSize() * clockPeriod();
1557 DPRINTF(CacheVerbose
, "Delaying pkt %s %llu ticks to allow "
1558 "for write coalescing\n", tgt_pkt
->print(), delay
);
1559 mshrQueue
.delay(mshr
, delay
);
1562 writeAllocator
->reset();
1565 writeAllocator
->resetDelay(mshr
->blkAddr
);
1569 CacheBlk
*blk
= tags
->findBlock(mshr
->blkAddr
, mshr
->isSecure
);
1571 // either a prefetch that is not present upstream, or a normal
1572 // MSHR request, proceed to get the packet to send downstream
1573 PacketPtr pkt
= createMissPacket(tgt_pkt
, blk
, mshr
->needsWritable(),
1574 mshr
->isWholeLineWrite());
1576 mshr
->isForward
= (pkt
== nullptr);
1578 if (mshr
->isForward
) {
1579 // not a cache block request, but a response is expected
1580 // make copy of current packet to forward, keep current
1581 // copy for response handling
1582 pkt
= new Packet(tgt_pkt
, false, true);
1583 assert(!pkt
->isWrite());
1586 // play it safe and append (rather than set) the sender state,
1587 // as forwarded packets may already have existing state
1588 pkt
->pushSenderState(mshr
);
1590 if (pkt
->isClean() && blk
&& blk
->isDirty()) {
1591 // A cache clean opearation is looking for a dirty block. Mark
1592 // the packet so that the destination xbar can determine that
1593 // there will be a follow-up write packet as well.
1594 pkt
->setSatisfied();
1597 if (!memSidePort
.sendTimingReq(pkt
)) {
1598 // we are awaiting a retry, but we
1599 // delete the packet and will be creating a new packet
1600 // when we get the opportunity
1603 // note that we have now masked any requestBus and
1604 // schedSendEvent (we will wait for a retry before
1605 // doing anything), and this is so even if we do not
1606 // care about this packet and might override it before
1610 // As part of the call to sendTimingReq the packet is
1611 // forwarded to all neighbouring caches (and any caches
1612 // above them) as a snoop. Thus at this point we know if
1613 // any of the neighbouring caches are responding, and if
1614 // so, we know it is dirty, and we can determine if it is
1615 // being passed as Modified, making our MSHR the ordering
1617 bool pending_modified_resp
= !pkt
->hasSharers() &&
1618 pkt
->cacheResponding();
1619 markInService(mshr
, pending_modified_resp
);
1621 if (pkt
->isClean() && blk
&& blk
->isDirty()) {
1622 // A cache clean opearation is looking for a dirty
1623 // block. If a dirty block is encountered a WriteClean
1624 // will update any copies to the path to the memory
1625 // until the point of reference.
1626 DPRINTF(CacheVerbose
, "%s: packet %s found block: %s\n",
1627 __func__
, pkt
->print(), blk
->print());
1628 PacketPtr wb_pkt
= writecleanBlk(blk
, pkt
->req
->getDest(),
1630 PacketList writebacks
;
1631 writebacks
.push_back(wb_pkt
);
1632 doWritebacks(writebacks
, 0);
1640 BaseCache::sendWriteQueuePacket(WriteQueueEntry
* wq_entry
)
1644 // always a single target for write queue entries
1645 PacketPtr tgt_pkt
= wq_entry
->getTarget()->pkt
;
1647 DPRINTF(Cache
, "%s: write %s\n", __func__
, tgt_pkt
->print());
1649 // forward as is, both for evictions and uncacheable writes
1650 if (!memSidePort
.sendTimingReq(tgt_pkt
)) {
1651 // note that we have now masked any requestBus and
1652 // schedSendEvent (we will wait for a retry before
1653 // doing anything), and this is so even if we do not
1654 // care about this packet and might override it before
1658 markInService(wq_entry
);
1664 BaseCache::serialize(CheckpointOut
&cp
) const
1666 bool dirty(isDirty());
1669 warn("*** The cache still contains dirty data. ***\n");
1670 warn(" Make sure to drain the system using the correct flags.\n");
1671 warn(" This checkpoint will not restore correctly " \
1672 "and dirty data in the cache will be lost!\n");
1675 // Since we don't checkpoint the data in the cache, any dirty data
1676 // will be lost when restoring from a checkpoint of a system that
1677 // wasn't drained properly. Flag the checkpoint as invalid if the
1678 // cache contains dirty data.
1679 bool bad_checkpoint(dirty
);
1680 SERIALIZE_SCALAR(bad_checkpoint
);
1684 BaseCache::unserialize(CheckpointIn
&cp
)
1686 bool bad_checkpoint
;
1687 UNSERIALIZE_SCALAR(bad_checkpoint
);
1688 if (bad_checkpoint
) {
1689 fatal("Restoring from checkpoints with dirty caches is not "
1690 "supported in the classic memory system. Please remove any "
1691 "caches or drain them properly before taking checkpoints.\n");
1696 BaseCache::regStats()
1698 MemObject::regStats();
1700 using namespace Stats
;
1703 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
1704 MemCmd
cmd(access_idx
);
1705 const string
&cstr
= cmd
.toString();
1708 .init(system
->maxMasters())
1709 .name(name() + "." + cstr
+ "_hits")
1710 .desc("number of " + cstr
+ " hits")
1711 .flags(total
| nozero
| nonan
)
1713 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1714 hits
[access_idx
].subname(i
, system
->getMasterName(i
));
1718 // These macros make it easier to sum the right subset of commands and
1719 // to change the subset of commands that are considered "demand" vs
1721 #define SUM_DEMAND(s) \
1722 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
1723 s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
1725 // should writebacks be included here? prior code was inconsistent...
1726 #define SUM_NON_DEMAND(s) \
1727 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
1730 .name(name() + ".demand_hits")
1731 .desc("number of demand (read+write) hits")
1732 .flags(total
| nozero
| nonan
)
1734 demandHits
= SUM_DEMAND(hits
);
1735 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1736 demandHits
.subname(i
, system
->getMasterName(i
));
1740 .name(name() + ".overall_hits")
1741 .desc("number of overall hits")
1742 .flags(total
| nozero
| nonan
)
1744 overallHits
= demandHits
+ SUM_NON_DEMAND(hits
);
1745 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1746 overallHits
.subname(i
, system
->getMasterName(i
));
1750 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
1751 MemCmd
cmd(access_idx
);
1752 const string
&cstr
= cmd
.toString();
1755 .init(system
->maxMasters())
1756 .name(name() + "." + cstr
+ "_misses")
1757 .desc("number of " + cstr
+ " misses")
1758 .flags(total
| nozero
| nonan
)
1760 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1761 misses
[access_idx
].subname(i
, system
->getMasterName(i
));
1766 .name(name() + ".demand_misses")
1767 .desc("number of demand (read+write) misses")
1768 .flags(total
| nozero
| nonan
)
1770 demandMisses
= SUM_DEMAND(misses
);
1771 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1772 demandMisses
.subname(i
, system
->getMasterName(i
));
1776 .name(name() + ".overall_misses")
1777 .desc("number of overall misses")
1778 .flags(total
| nozero
| nonan
)
1780 overallMisses
= demandMisses
+ SUM_NON_DEMAND(misses
);
1781 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1782 overallMisses
.subname(i
, system
->getMasterName(i
));
1785 // Miss latency statistics
1786 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
1787 MemCmd
cmd(access_idx
);
1788 const string
&cstr
= cmd
.toString();
1790 missLatency
[access_idx
]
1791 .init(system
->maxMasters())
1792 .name(name() + "." + cstr
+ "_miss_latency")
1793 .desc("number of " + cstr
+ " miss cycles")
1794 .flags(total
| nozero
| nonan
)
1796 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1797 missLatency
[access_idx
].subname(i
, system
->getMasterName(i
));
1802 .name(name() + ".demand_miss_latency")
1803 .desc("number of demand (read+write) miss cycles")
1804 .flags(total
| nozero
| nonan
)
1806 demandMissLatency
= SUM_DEMAND(missLatency
);
1807 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1808 demandMissLatency
.subname(i
, system
->getMasterName(i
));
1812 .name(name() + ".overall_miss_latency")
1813 .desc("number of overall miss cycles")
1814 .flags(total
| nozero
| nonan
)
1816 overallMissLatency
= demandMissLatency
+ SUM_NON_DEMAND(missLatency
);
1817 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1818 overallMissLatency
.subname(i
, system
->getMasterName(i
));
1822 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
1823 MemCmd
cmd(access_idx
);
1824 const string
&cstr
= cmd
.toString();
1826 accesses
[access_idx
]
1827 .name(name() + "." + cstr
+ "_accesses")
1828 .desc("number of " + cstr
+ " accesses(hits+misses)")
1829 .flags(total
| nozero
| nonan
)
1831 accesses
[access_idx
] = hits
[access_idx
] + misses
[access_idx
];
1833 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1834 accesses
[access_idx
].subname(i
, system
->getMasterName(i
));
1839 .name(name() + ".demand_accesses")
1840 .desc("number of demand (read+write) accesses")
1841 .flags(total
| nozero
| nonan
)
1843 demandAccesses
= demandHits
+ demandMisses
;
1844 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1845 demandAccesses
.subname(i
, system
->getMasterName(i
));
1849 .name(name() + ".overall_accesses")
1850 .desc("number of overall (read+write) accesses")
1851 .flags(total
| nozero
| nonan
)
1853 overallAccesses
= overallHits
+ overallMisses
;
1854 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1855 overallAccesses
.subname(i
, system
->getMasterName(i
));
1858 // miss rate formulas
1859 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
1860 MemCmd
cmd(access_idx
);
1861 const string
&cstr
= cmd
.toString();
1863 missRate
[access_idx
]
1864 .name(name() + "." + cstr
+ "_miss_rate")
1865 .desc("miss rate for " + cstr
+ " accesses")
1866 .flags(total
| nozero
| nonan
)
1868 missRate
[access_idx
] = misses
[access_idx
] / accesses
[access_idx
];
1870 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1871 missRate
[access_idx
].subname(i
, system
->getMasterName(i
));
1876 .name(name() + ".demand_miss_rate")
1877 .desc("miss rate for demand accesses")
1878 .flags(total
| nozero
| nonan
)
1880 demandMissRate
= demandMisses
/ demandAccesses
;
1881 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1882 demandMissRate
.subname(i
, system
->getMasterName(i
));
1886 .name(name() + ".overall_miss_rate")
1887 .desc("miss rate for overall accesses")
1888 .flags(total
| nozero
| nonan
)
1890 overallMissRate
= overallMisses
/ overallAccesses
;
1891 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1892 overallMissRate
.subname(i
, system
->getMasterName(i
));
1895 // miss latency formulas
1896 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
1897 MemCmd
cmd(access_idx
);
1898 const string
&cstr
= cmd
.toString();
1900 avgMissLatency
[access_idx
]
1901 .name(name() + "." + cstr
+ "_avg_miss_latency")
1902 .desc("average " + cstr
+ " miss latency")
1903 .flags(total
| nozero
| nonan
)
1905 avgMissLatency
[access_idx
] =
1906 missLatency
[access_idx
] / misses
[access_idx
];
1908 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1909 avgMissLatency
[access_idx
].subname(i
, system
->getMasterName(i
));
1913 demandAvgMissLatency
1914 .name(name() + ".demand_avg_miss_latency")
1915 .desc("average overall miss latency")
1916 .flags(total
| nozero
| nonan
)
1918 demandAvgMissLatency
= demandMissLatency
/ demandMisses
;
1919 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1920 demandAvgMissLatency
.subname(i
, system
->getMasterName(i
));
1923 overallAvgMissLatency
1924 .name(name() + ".overall_avg_miss_latency")
1925 .desc("average overall miss latency")
1926 .flags(total
| nozero
| nonan
)
1928 overallAvgMissLatency
= overallMissLatency
/ overallMisses
;
1929 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1930 overallAvgMissLatency
.subname(i
, system
->getMasterName(i
));
1933 blocked_cycles
.init(NUM_BLOCKED_CAUSES
);
1935 .name(name() + ".blocked_cycles")
1936 .desc("number of cycles access was blocked")
1937 .subname(Blocked_NoMSHRs
, "no_mshrs")
1938 .subname(Blocked_NoTargets
, "no_targets")
1942 blocked_causes
.init(NUM_BLOCKED_CAUSES
);
1944 .name(name() + ".blocked")
1945 .desc("number of cycles access was blocked")
1946 .subname(Blocked_NoMSHRs
, "no_mshrs")
1947 .subname(Blocked_NoTargets
, "no_targets")
1951 .name(name() + ".avg_blocked_cycles")
1952 .desc("average number of cycles each access was blocked")
1953 .subname(Blocked_NoMSHRs
, "no_mshrs")
1954 .subname(Blocked_NoTargets
, "no_targets")
1957 avg_blocked
= blocked_cycles
/ blocked_causes
;
1960 .name(name() + ".unused_prefetches")
1961 .desc("number of HardPF blocks evicted w/o reference")
1966 .init(system
->maxMasters())
1967 .name(name() + ".writebacks")
1968 .desc("number of writebacks")
1969 .flags(total
| nozero
| nonan
)
1971 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1972 writebacks
.subname(i
, system
->getMasterName(i
));
1976 // MSHR hit statistics
1977 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
1978 MemCmd
cmd(access_idx
);
1979 const string
&cstr
= cmd
.toString();
1981 mshr_hits
[access_idx
]
1982 .init(system
->maxMasters())
1983 .name(name() + "." + cstr
+ "_mshr_hits")
1984 .desc("number of " + cstr
+ " MSHR hits")
1985 .flags(total
| nozero
| nonan
)
1987 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1988 mshr_hits
[access_idx
].subname(i
, system
->getMasterName(i
));
1993 .name(name() + ".demand_mshr_hits")
1994 .desc("number of demand (read+write) MSHR hits")
1995 .flags(total
| nozero
| nonan
)
1997 demandMshrHits
= SUM_DEMAND(mshr_hits
);
1998 for (int i
= 0; i
< system
->maxMasters(); i
++) {
1999 demandMshrHits
.subname(i
, system
->getMasterName(i
));
2003 .name(name() + ".overall_mshr_hits")
2004 .desc("number of overall MSHR hits")
2005 .flags(total
| nozero
| nonan
)
2007 overallMshrHits
= demandMshrHits
+ SUM_NON_DEMAND(mshr_hits
);
2008 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2009 overallMshrHits
.subname(i
, system
->getMasterName(i
));
2012 // MSHR miss statistics
2013 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
2014 MemCmd
cmd(access_idx
);
2015 const string
&cstr
= cmd
.toString();
2017 mshr_misses
[access_idx
]
2018 .init(system
->maxMasters())
2019 .name(name() + "." + cstr
+ "_mshr_misses")
2020 .desc("number of " + cstr
+ " MSHR misses")
2021 .flags(total
| nozero
| nonan
)
2023 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2024 mshr_misses
[access_idx
].subname(i
, system
->getMasterName(i
));
2029 .name(name() + ".demand_mshr_misses")
2030 .desc("number of demand (read+write) MSHR misses")
2031 .flags(total
| nozero
| nonan
)
2033 demandMshrMisses
= SUM_DEMAND(mshr_misses
);
2034 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2035 demandMshrMisses
.subname(i
, system
->getMasterName(i
));
2039 .name(name() + ".overall_mshr_misses")
2040 .desc("number of overall MSHR misses")
2041 .flags(total
| nozero
| nonan
)
2043 overallMshrMisses
= demandMshrMisses
+ SUM_NON_DEMAND(mshr_misses
);
2044 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2045 overallMshrMisses
.subname(i
, system
->getMasterName(i
));
2048 // MSHR miss latency statistics
2049 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
2050 MemCmd
cmd(access_idx
);
2051 const string
&cstr
= cmd
.toString();
2053 mshr_miss_latency
[access_idx
]
2054 .init(system
->maxMasters())
2055 .name(name() + "." + cstr
+ "_mshr_miss_latency")
2056 .desc("number of " + cstr
+ " MSHR miss cycles")
2057 .flags(total
| nozero
| nonan
)
2059 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2060 mshr_miss_latency
[access_idx
].subname(i
, system
->getMasterName(i
));
2064 demandMshrMissLatency
2065 .name(name() + ".demand_mshr_miss_latency")
2066 .desc("number of demand (read+write) MSHR miss cycles")
2067 .flags(total
| nozero
| nonan
)
2069 demandMshrMissLatency
= SUM_DEMAND(mshr_miss_latency
);
2070 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2071 demandMshrMissLatency
.subname(i
, system
->getMasterName(i
));
2074 overallMshrMissLatency
2075 .name(name() + ".overall_mshr_miss_latency")
2076 .desc("number of overall MSHR miss cycles")
2077 .flags(total
| nozero
| nonan
)
2079 overallMshrMissLatency
=
2080 demandMshrMissLatency
+ SUM_NON_DEMAND(mshr_miss_latency
);
2081 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2082 overallMshrMissLatency
.subname(i
, system
->getMasterName(i
));
2085 // MSHR uncacheable statistics
2086 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
2087 MemCmd
cmd(access_idx
);
2088 const string
&cstr
= cmd
.toString();
2090 mshr_uncacheable
[access_idx
]
2091 .init(system
->maxMasters())
2092 .name(name() + "." + cstr
+ "_mshr_uncacheable")
2093 .desc("number of " + cstr
+ " MSHR uncacheable")
2094 .flags(total
| nozero
| nonan
)
2096 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2097 mshr_uncacheable
[access_idx
].subname(i
, system
->getMasterName(i
));
2101 overallMshrUncacheable
2102 .name(name() + ".overall_mshr_uncacheable_misses")
2103 .desc("number of overall MSHR uncacheable misses")
2104 .flags(total
| nozero
| nonan
)
2106 overallMshrUncacheable
=
2107 SUM_DEMAND(mshr_uncacheable
) + SUM_NON_DEMAND(mshr_uncacheable
);
2108 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2109 overallMshrUncacheable
.subname(i
, system
->getMasterName(i
));
2112 // MSHR miss latency statistics
2113 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
2114 MemCmd
cmd(access_idx
);
2115 const string
&cstr
= cmd
.toString();
2117 mshr_uncacheable_lat
[access_idx
]
2118 .init(system
->maxMasters())
2119 .name(name() + "." + cstr
+ "_mshr_uncacheable_latency")
2120 .desc("number of " + cstr
+ " MSHR uncacheable cycles")
2121 .flags(total
| nozero
| nonan
)
2123 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2124 mshr_uncacheable_lat
[access_idx
].subname(
2125 i
, system
->getMasterName(i
));
2129 overallMshrUncacheableLatency
2130 .name(name() + ".overall_mshr_uncacheable_latency")
2131 .desc("number of overall MSHR uncacheable cycles")
2132 .flags(total
| nozero
| nonan
)
2134 overallMshrUncacheableLatency
=
2135 SUM_DEMAND(mshr_uncacheable_lat
) +
2136 SUM_NON_DEMAND(mshr_uncacheable_lat
);
2137 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2138 overallMshrUncacheableLatency
.subname(i
, system
->getMasterName(i
));
2142 // MSHR access formulas
2143 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
2144 MemCmd
cmd(access_idx
);
2145 const string
&cstr
= cmd
.toString();
2147 mshrAccesses
[access_idx
]
2148 .name(name() + "." + cstr
+ "_mshr_accesses")
2149 .desc("number of " + cstr
+ " mshr accesses(hits+misses)")
2150 .flags(total
| nozero
| nonan
)
2152 mshrAccesses
[access_idx
] =
2153 mshr_hits
[access_idx
] + mshr_misses
[access_idx
]
2154 + mshr_uncacheable
[access_idx
];
2158 .name(name() + ".demand_mshr_accesses")
2159 .desc("number of demand (read+write) mshr accesses")
2160 .flags(total
| nozero
| nonan
)
2162 demandMshrAccesses
= demandMshrHits
+ demandMshrMisses
;
2165 .name(name() + ".overall_mshr_accesses")
2166 .desc("number of overall (read+write) mshr accesses")
2167 .flags(total
| nozero
| nonan
)
2169 overallMshrAccesses
= overallMshrHits
+ overallMshrMisses
2170 + overallMshrUncacheable
;
2173 // MSHR miss rate formulas
2174 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
2175 MemCmd
cmd(access_idx
);
2176 const string
&cstr
= cmd
.toString();
2178 mshrMissRate
[access_idx
]
2179 .name(name() + "." + cstr
+ "_mshr_miss_rate")
2180 .desc("mshr miss rate for " + cstr
+ " accesses")
2181 .flags(total
| nozero
| nonan
)
2183 mshrMissRate
[access_idx
] =
2184 mshr_misses
[access_idx
] / accesses
[access_idx
];
2186 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2187 mshrMissRate
[access_idx
].subname(i
, system
->getMasterName(i
));
2192 .name(name() + ".demand_mshr_miss_rate")
2193 .desc("mshr miss rate for demand accesses")
2194 .flags(total
| nozero
| nonan
)
2196 demandMshrMissRate
= demandMshrMisses
/ demandAccesses
;
2197 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2198 demandMshrMissRate
.subname(i
, system
->getMasterName(i
));
2202 .name(name() + ".overall_mshr_miss_rate")
2203 .desc("mshr miss rate for overall accesses")
2204 .flags(total
| nozero
| nonan
)
2206 overallMshrMissRate
= overallMshrMisses
/ overallAccesses
;
2207 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2208 overallMshrMissRate
.subname(i
, system
->getMasterName(i
));
2211 // mshrMiss latency formulas
2212 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
2213 MemCmd
cmd(access_idx
);
2214 const string
&cstr
= cmd
.toString();
2216 avgMshrMissLatency
[access_idx
]
2217 .name(name() + "." + cstr
+ "_avg_mshr_miss_latency")
2218 .desc("average " + cstr
+ " mshr miss latency")
2219 .flags(total
| nozero
| nonan
)
2221 avgMshrMissLatency
[access_idx
] =
2222 mshr_miss_latency
[access_idx
] / mshr_misses
[access_idx
];
2224 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2225 avgMshrMissLatency
[access_idx
].subname(
2226 i
, system
->getMasterName(i
));
2230 demandAvgMshrMissLatency
2231 .name(name() + ".demand_avg_mshr_miss_latency")
2232 .desc("average overall mshr miss latency")
2233 .flags(total
| nozero
| nonan
)
2235 demandAvgMshrMissLatency
= demandMshrMissLatency
/ demandMshrMisses
;
2236 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2237 demandAvgMshrMissLatency
.subname(i
, system
->getMasterName(i
));
2240 overallAvgMshrMissLatency
2241 .name(name() + ".overall_avg_mshr_miss_latency")
2242 .desc("average overall mshr miss latency")
2243 .flags(total
| nozero
| nonan
)
2245 overallAvgMshrMissLatency
= overallMshrMissLatency
/ overallMshrMisses
;
2246 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2247 overallAvgMshrMissLatency
.subname(i
, system
->getMasterName(i
));
2250 // mshrUncacheable latency formulas
2251 for (int access_idx
= 0; access_idx
< MemCmd::NUM_MEM_CMDS
; ++access_idx
) {
2252 MemCmd
cmd(access_idx
);
2253 const string
&cstr
= cmd
.toString();
2255 avgMshrUncacheableLatency
[access_idx
]
2256 .name(name() + "." + cstr
+ "_avg_mshr_uncacheable_latency")
2257 .desc("average " + cstr
+ " mshr uncacheable latency")
2258 .flags(total
| nozero
| nonan
)
2260 avgMshrUncacheableLatency
[access_idx
] =
2261 mshr_uncacheable_lat
[access_idx
] / mshr_uncacheable
[access_idx
];
2263 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2264 avgMshrUncacheableLatency
[access_idx
].subname(
2265 i
, system
->getMasterName(i
));
2269 overallAvgMshrUncacheableLatency
2270 .name(name() + ".overall_avg_mshr_uncacheable_latency")
2271 .desc("average overall mshr uncacheable latency")
2272 .flags(total
| nozero
| nonan
)
2274 overallAvgMshrUncacheableLatency
=
2275 overallMshrUncacheableLatency
/ overallMshrUncacheable
;
2276 for (int i
= 0; i
< system
->maxMasters(); i
++) {
2277 overallAvgMshrUncacheableLatency
.subname(i
, system
->getMasterName(i
));
2281 .name(name() + ".replacements")
2282 .desc("number of replacements")
2287 BaseCache::regProbePoints()
2289 ppHit
= new ProbePointArg
<PacketPtr
>(this->getProbeManager(), "Hit");
2290 ppMiss
= new ProbePointArg
<PacketPtr
>(this->getProbeManager(), "Miss");
2291 ppFill
= new ProbePointArg
<PacketPtr
>(this->getProbeManager(), "Fill");
2300 BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt
)
2302 // Snoops shouldn't happen when bypassing caches
2303 assert(!cache
->system
->bypassCaches());
2305 assert(pkt
->isResponse());
2307 // Express snoop responses from master to slave, e.g., from L1 to L2
2308 cache
->recvTimingSnoopResp(pkt
);
2314 BaseCache::CpuSidePort::tryTiming(PacketPtr pkt
)
2316 if (cache
->system
->bypassCaches() || pkt
->isExpressSnoop()) {
2317 // always let express snoop packets through even if blocked
2319 } else if (blocked
|| mustSendRetry
) {
2320 // either already committed to send a retry, or blocked
2321 mustSendRetry
= true;
2324 mustSendRetry
= false;
2329 BaseCache::CpuSidePort::recvTimingReq(PacketPtr pkt
)
2331 assert(pkt
->isRequest());
2333 if (cache
->system
->bypassCaches()) {
2334 // Just forward the packet if caches are disabled.
2335 // @todo This should really enqueue the packet rather
2336 bool M5_VAR_USED success
= cache
->memSidePort
.sendTimingReq(pkt
);
2339 } else if (tryTiming(pkt
)) {
2340 cache
->recvTimingReq(pkt
);
2347 BaseCache::CpuSidePort::recvAtomic(PacketPtr pkt
)
2349 if (cache
->system
->bypassCaches()) {
2350 // Forward the request if the system is in cache bypass mode.
2351 return cache
->memSidePort
.sendAtomic(pkt
);
2353 return cache
->recvAtomic(pkt
);
2358 BaseCache::CpuSidePort::recvFunctional(PacketPtr pkt
)
2360 if (cache
->system
->bypassCaches()) {
2361 // The cache should be flushed if we are in cache bypass mode,
2362 // so we don't need to check if we need to update anything.
2363 cache
->memSidePort
.sendFunctional(pkt
);
2367 // functional request
2368 cache
->functionalAccess(pkt
, true);
2372 BaseCache::CpuSidePort::getAddrRanges() const
2374 return cache
->getAddrRanges();
2379 CpuSidePort::CpuSidePort(const std::string
&_name
, BaseCache
*_cache
,
2380 const std::string
&_label
)
2381 : CacheSlavePort(_name
, _cache
, _label
), cache(_cache
)
2391 BaseCache::MemSidePort::recvTimingResp(PacketPtr pkt
)
2393 cache
->recvTimingResp(pkt
);
2397 // Express snooping requests to memside port
2399 BaseCache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt
)
2401 // Snoops shouldn't happen when bypassing caches
2402 assert(!cache
->system
->bypassCaches());
2404 // handle snooping requests
2405 cache
->recvTimingSnoopReq(pkt
);
2409 BaseCache::MemSidePort::recvAtomicSnoop(PacketPtr pkt
)
2411 // Snoops shouldn't happen when bypassing caches
2412 assert(!cache
->system
->bypassCaches());
2414 return cache
->recvAtomicSnoop(pkt
);
2418 BaseCache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt
)
2420 // Snoops shouldn't happen when bypassing caches
2421 assert(!cache
->system
->bypassCaches());
2423 // functional snoop (note that in contrast to atomic we don't have
2424 // a specific functionalSnoop method, as they have the same
2425 // behaviour regardless)
2426 cache
->functionalAccess(pkt
, false);
2430 BaseCache::CacheReqPacketQueue::sendDeferredPacket()
2433 assert(!waitingOnRetry
);
2435 // there should never be any deferred request packets in the
2436 // queue, instead we resly on the cache to provide the packets
2437 // from the MSHR queue or write queue
2438 assert(deferredPacketReadyTime() == MaxTick
);
2440 // check for request packets (requests & writebacks)
2441 QueueEntry
* entry
= cache
.getNextQueueEntry();
2444 // can happen if e.g. we attempt a writeback and fail, but
2445 // before the retry, the writeback is eliminated because
2446 // we snoop another cache's ReadEx.
2448 // let our snoop responses go first if there are responses to
2449 // the same addresses
2450 if (checkConflictingSnoop(entry
->getTarget()->pkt
)) {
2453 waitingOnRetry
= entry
->sendPacket(cache
);
2456 // if we succeeded and are not waiting for a retry, schedule the
2457 // next send considering when the next queue is ready, note that
2458 // snoop responses have their own packet queue and thus schedule
2460 if (!waitingOnRetry
) {
2461 schedSendEvent(cache
.nextQueueReadyTime());
2465 BaseCache::MemSidePort::MemSidePort(const std::string
&_name
,
2467 const std::string
&_label
)
2468 : CacheMasterPort(_name
, _cache
, _reqQueue
, _snoopRespQueue
),
2469 _reqQueue(*_cache
, *this, _snoopRespQueue
, _label
),
2470 _snoopRespQueue(*_cache
, *this, true, _label
), cache(_cache
)
2475 WriteAllocator::updateMode(Addr write_addr
, unsigned write_size
,
2478 // check if we are continuing where the last write ended
2479 if (nextAddr
== write_addr
) {
2480 delayCtr
[blk_addr
] = delayThreshold
;
2481 // stop if we have already saturated
2482 if (mode
!= WriteMode::NO_ALLOCATE
) {
2483 byteCount
+= write_size
;
2484 // switch to streaming mode if we have passed the lower
2486 if (mode
== WriteMode::ALLOCATE
&&
2487 byteCount
> coalesceLimit
) {
2488 mode
= WriteMode::COALESCE
;
2489 DPRINTF(Cache
, "Switched to write coalescing\n");
2490 } else if (mode
== WriteMode::COALESCE
&&
2491 byteCount
> noAllocateLimit
) {
2492 // and continue and switch to non-allocating mode if we
2493 // pass the upper threshold
2494 mode
= WriteMode::NO_ALLOCATE
;
2495 DPRINTF(Cache
, "Switched to write-no-allocate\n");
2499 // we did not see a write matching the previous one, start
2501 byteCount
= write_size
;
2502 mode
= WriteMode::ALLOCATE
;
2503 resetDelay(blk_addr
);
2505 nextAddr
= write_addr
+ write_size
;
2509 WriteAllocatorParams::create()
2511 return new WriteAllocator(this);