2 * Copyright (c) 2012-2013, 2015-2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Erik Hallnor
48 * Miss Status and Handling Register (MSHR) definitions.
51 #include "mem/cache/mshr.hh"
56 #include "base/logging.hh"
57 #include "base/trace.hh"
58 #include "base/types.hh"
59 #include "debug/Cache.hh"
60 #include "mem/cache/base.hh"
61 #include "mem/request.hh"
62 #include "sim/core.hh"
64 MSHR::MSHR() : downstreamPending(false),
65 pendingModified(false),
66 postInvalidate(false), postDowngrade(false),
67 wasWholeLineWrite(false), isForward(false)
71 MSHR::TargetList::TargetList()
72 : needsWritable(false), hasUpgrade(false), allocOnFill(false),
78 MSHR::TargetList::updateFlags(PacketPtr pkt
, Target::Source source
,
81 if (source
!= Target::FromSnoop
) {
82 if (pkt
->needsWritable()) {
86 // StoreCondReq is effectively an upgrade if it's in an MSHR
87 // since it would have been failed already if we didn't have a
89 if (pkt
->isUpgrade() || pkt
->cmd
== MemCmd::StoreCondReq
) {
93 // potentially re-evaluate whether we should allocate on a fill or
95 allocOnFill
= allocOnFill
|| alloc_on_fill
;
97 if (source
!= Target::FromPrefetcher
) {
98 hasFromCache
= hasFromCache
|| pkt
->fromCache();
100 updateWriteFlags(pkt
);
106 MSHR::TargetList::populateFlags()
109 for (auto& t
: *this) {
110 updateFlags(t
.pkt
, t
.source
, t
.allocOnFill
);
115 MSHR::TargetList::updateWriteFlags(PacketPtr pkt
)
117 if (isWholeLineWrite()) {
118 // if we have already seen writes for the full block
119 // stop here, this might be a full line write followed
120 // by other compatible requests (e.g., reads)
124 if (canMergeWrites
) {
125 if (!pkt
->isWrite()) {
126 // We won't allow further merging if this hasn't
128 canMergeWrites
= false;
132 // Avoid merging requests with special flags (e.g.,
134 const Request::FlagsType no_merge_flags
=
135 Request::UNCACHEABLE
| Request::STRICT_ORDER
|
136 Request::MMAPPED_IPR
| Request::PRIVILEGED
|
137 Request::LLSC
| Request::MEM_SWAP
|
138 Request::MEM_SWAP_COND
| Request::SECURE
;
139 const auto &req_flags
= pkt
->req
->getFlags();
140 bool compat_write
= !req_flags
.isSet(no_merge_flags
);
142 // if this is the first write, it might be a whole
143 // line write and even if we can't merge any
144 // subsequent write requests, we still need to service
145 // it as a whole line write (e.g., SECURE whole line
147 bool first_write
= empty();
148 if (first_write
|| compat_write
) {
149 auto offset
= pkt
->getOffset(blkSize
);
150 auto begin
= writesBitmap
.begin() + offset
;
151 std::fill(begin
, begin
+ pkt
->getSize(), true);
154 // We won't allow further merging if this has been a
156 canMergeWrites
&= compat_write
;
161 MSHR::TargetList::add(PacketPtr pkt
, Tick readyTime
,
162 Counter order
, Target::Source source
, bool markPending
,
165 updateFlags(pkt
, source
, alloc_on_fill
);
167 // Iterate over the SenderState stack and see if we find
168 // an MSHR entry. If we do, set the downstreamPending
169 // flag. Otherwise, do nothing.
170 MSHR
*mshr
= pkt
->findNextSenderState
<MSHR
>();
171 if (mshr
!= nullptr) {
172 assert(!mshr
->downstreamPending
);
173 mshr
->downstreamPending
= true;
175 // No need to clear downstreamPending later
180 emplace_back(pkt
, readyTime
, order
, source
, markPending
, alloc_on_fill
);
185 replaceUpgrade(PacketPtr pkt
)
187 // remember if the current packet has data allocated
188 bool has_data
= pkt
->hasData() || pkt
->hasRespData();
190 if (pkt
->cmd
== MemCmd::UpgradeReq
) {
191 pkt
->cmd
= MemCmd::ReadExReq
;
192 DPRINTF(Cache
, "Replacing UpgradeReq with ReadExReq\n");
193 } else if (pkt
->cmd
== MemCmd::SCUpgradeReq
) {
194 pkt
->cmd
= MemCmd::SCUpgradeFailReq
;
195 DPRINTF(Cache
, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
196 } else if (pkt
->cmd
== MemCmd::StoreCondReq
) {
197 pkt
->cmd
= MemCmd::StoreCondFailReq
;
198 DPRINTF(Cache
, "Replacing StoreCondReq with StoreCondFailReq\n");
202 // there is no sensible way of setting the data field if the
203 // new command actually would carry data
204 assert(!pkt
->hasData());
206 if (pkt
->hasRespData()) {
207 // we went from a packet that had no data (neither request,
208 // nor response), to one that does, and therefore we need to
209 // actually allocate space for the data payload
217 MSHR::TargetList::replaceUpgrades()
222 for (auto& t
: *this) {
223 replaceUpgrade(t
.pkt
);
231 MSHR::TargetList::clearDownstreamPending(MSHR::TargetList::iterator begin
,
232 MSHR::TargetList::iterator end
)
234 for (auto t
= begin
; t
!= end
; t
++) {
235 if (t
->markedPending
) {
236 // Iterate over the SenderState stack and see if we find
237 // an MSHR entry. If we find one, clear the
238 // downstreamPending flag by calling
239 // clearDownstreamPending(). This recursively clears the
240 // downstreamPending flag in all caches this packet has
242 MSHR
*mshr
= t
->pkt
->findNextSenderState
<MSHR
>();
243 if (mshr
!= nullptr) {
244 mshr
->clearDownstreamPending();
246 t
->markedPending
= false;
252 MSHR::TargetList::clearDownstreamPending()
254 clearDownstreamPending(begin(), end());
259 MSHR::TargetList::trySatisfyFunctional(PacketPtr pkt
)
261 for (auto& t
: *this) {
262 if (pkt
->trySatisfyFunctional(t
.pkt
)) {
272 MSHR::TargetList::print(std::ostream
&os
, int verbosity
,
273 const std::string
&prefix
) const
275 for (auto& t
: *this) {
278 case Target::FromCPU
:
281 case Target::FromSnoop
:
284 case Target::FromPrefetcher
:
285 s
= "FromPrefetcher";
291 ccprintf(os
, "%s%s: ", prefix
, s
);
292 t
.pkt
->print(os
, verbosity
, "");
299 MSHR::allocate(Addr blk_addr
, unsigned blk_size
, PacketPtr target
,
300 Tick when_ready
, Counter _order
, bool alloc_on_fill
)
304 isSecure
= target
->isSecure();
305 readyTime
= when_ready
;
309 wasWholeLineWrite
= false;
310 _isUncacheable
= target
->req
->isUncacheable();
312 downstreamPending
= false;
314 targets
.init(blkAddr
, blkSize
);
315 deferredTargets
.init(blkAddr
, blkSize
);
317 // Don't know of a case where we would allocate a new MSHR for a
318 // snoop (mem-side request), so set source according to request here
319 Target::Source source
= (target
->cmd
== MemCmd::HardPFReq
) ?
320 Target::FromPrefetcher
: Target::FromCPU
;
321 targets
.add(target
, when_ready
, _order
, source
, true, alloc_on_fill
);
323 // All targets must refer to the same block
324 assert(target
->matchBlockAddr(targets
.front().pkt
, blkSize
));
329 MSHR::clearDownstreamPending()
331 assert(downstreamPending
);
332 downstreamPending
= false;
333 // recursively clear flag on any MSHRs we will be forwarding
335 targets
.clearDownstreamPending();
339 MSHR::markInService(bool pending_modified_resp
)
344 pendingModified
= targets
.needsWritable
|| pending_modified_resp
;
345 postInvalidate
= postDowngrade
= false;
347 if (!downstreamPending
) {
348 // let upstream caches know that the request has made it to a
349 // level where it's going to get a response
350 targets
.clearDownstreamPending();
352 // if the line is not considered a whole-line write when sent
353 // downstream, make sure it is also not considered a whole-line
354 // write when receiving the response, and vice versa
355 wasWholeLineWrite
= isWholeLineWrite();
362 assert(targets
.empty());
363 targets
.resetFlags();
364 assert(deferredTargets
.isReset());
369 * Adds a target to an MSHR
372 MSHR::allocateTarget(PacketPtr pkt
, Tick whenReady
, Counter _order
,
375 // assume we'd never issue a prefetch when we've got an
377 assert(pkt
->cmd
!= MemCmd::HardPFReq
);
379 // if there's a request already in service for this MSHR, we will
380 // have to defer the new target until after the response if any of
381 // the following are true:
382 // - there are other targets already deferred
383 // - there's a pending invalidate to be applied after the response
384 // comes back (but before this target is processed)
385 // - the MSHR's first (and only) non-deferred target is a cache
386 // maintenance packet
387 // - the new target is a cache maintenance packet (this is probably
388 // overly conservative but certainly safe)
389 // - this target requires a writable block and either we're not
390 // getting a writable block back or we have already snooped
391 // another read request that will downgrade our writable block
392 // to non-writable (Shared or Owned)
393 PacketPtr tgt_pkt
= targets
.front().pkt
;
394 if (pkt
->req
->isCacheMaintenance() ||
395 tgt_pkt
->req
->isCacheMaintenance() ||
396 !deferredTargets
.empty() ||
398 (hasPostInvalidate() ||
399 (pkt
->needsWritable() &&
400 (!isPendingModified() || hasPostDowngrade() || isForward
))))) {
401 // need to put on deferred list
402 if (inService
&& hasPostInvalidate())
404 deferredTargets
.add(pkt
, whenReady
, _order
, Target::FromCPU
, true,
407 // No request outstanding, or still OK to append to
408 // outstanding request: append to regular target list. Only
409 // mark pending if current request hasn't been issued yet
410 // (isn't in service).
411 targets
.add(pkt
, whenReady
, _order
, Target::FromCPU
, !inService
,
417 MSHR::handleSnoop(PacketPtr pkt
, Counter _order
)
419 DPRINTF(Cache
, "%s for %s\n", __func__
, pkt
->print());
421 // when we snoop packets the needsWritable and isInvalidate flags
422 // should always be the same, however, this assumes that we never
423 // snoop writes as they are currently not marked as invalidations
424 panic_if((pkt
->needsWritable() != pkt
->isInvalidate()) &&
425 !pkt
->req
->isCacheMaintenance(),
426 "%s got snoop %s where needsWritable, "
427 "does not match isInvalidate", name(), pkt
->print());
429 if (!inService
|| (pkt
->isExpressSnoop() && downstreamPending
)) {
430 // Request has not been issued yet, or it's been issued
431 // locally but is buffered unissued at some downstream cache
432 // which is forwarding us this snoop. Either way, the packet
433 // we're snooping logically precedes this MSHR's request, so
434 // the snoop has no impact on the MSHR, but must be processed
435 // in the standard way by the cache. The only exception is
436 // that if we're an L2+ cache buffering an UpgradeReq from a
437 // higher-level cache, and the snoop is invalidating, then our
438 // buffered upgrades must be converted to read exclusives,
439 // since the upper-level cache no longer has a valid copy.
440 // That is, even though the upper-level cache got out on its
441 // local bus first, some other invalidating transaction
442 // reached the global bus before the upgrade did.
443 if (pkt
->needsWritable() || pkt
->req
->isCacheInvalidate()) {
444 targets
.replaceUpgrades();
445 deferredTargets
.replaceUpgrades();
451 // From here on down, the request issued by this MSHR logically
452 // precedes the request we're snooping.
453 if (pkt
->needsWritable() || pkt
->req
->isCacheInvalidate()) {
454 // snooped request still precedes the re-request we'll have to
455 // issue for deferred targets, if any...
456 deferredTargets
.replaceUpgrades();
459 PacketPtr tgt_pkt
= targets
.front().pkt
;
460 if (hasPostInvalidate() || tgt_pkt
->req
->isCacheInvalidate()) {
461 // a prior snoop has already appended an invalidation or a
462 // cache invalidation operation is in progress, so logically
463 // we don't have the block anymore; no need for further
468 // Start by determining if we will eventually respond or not,
469 // matching the conditions checked in Cache::handleSnoop
470 const bool will_respond
= isPendingModified() && pkt
->needsResponse() &&
472 if (isPendingModified() || pkt
->isInvalidate()) {
473 // We need to save and replay the packet in two cases:
474 // 1. We're awaiting a writable copy (Modified or Exclusive),
475 // so this MSHR is the orgering point, and we need to respond
476 // after we receive data.
477 // 2. It's an invalidation (e.g., UpgradeReq), and we need
478 // to forward the snoop up the hierarchy after the current
479 // transaction completes.
481 // The packet we are snooping may be deleted by the time we
482 // actually process the target, and we consequently need to
483 // save a copy here. Clear flags and also allocate new data as
484 // the original packet data storage may have been deleted by
485 // the time we get to process this packet. In the cases where
486 // we are not responding after handling the snoop we also need
487 // to create a copy of the request to be on the safe side. In
488 // the latter case the cache is responsible for deleting both
489 // the packet and the request as part of handling the deferred
491 PacketPtr cp_pkt
= will_respond
? new Packet(pkt
, true, true) :
492 new Packet(std::make_shared
<Request
>(*pkt
->req
), pkt
->cmd
,
496 // we are the ordering point, and will consequently
497 // respond, and depending on whether the packet
498 // needsWritable or not we either pass a Shared line or a
500 pkt
->setCacheResponding();
502 // inform the cache hierarchy that this cache had the line
503 // in the Modified state, even if the response is passed
504 // as Shared (and thus non-writable)
505 pkt
->setResponderHadWritable();
507 // in the case of an uncacheable request there is no need
508 // to set the responderHadWritable flag, but since the
509 // recipient does not care there is no harm in doing so
510 } else if (isPendingModified() && pkt
->isClean()) {
511 // this cache doesn't respond to the clean request, a
512 // destination xbar will respond to this request, but to
513 // do so it needs to know if it should wait for the
518 targets
.add(cp_pkt
, curTick(), _order
, Target::FromSnoop
,
519 downstreamPending
&& targets
.needsWritable
, false);
521 if (pkt
->needsWritable() || pkt
->isInvalidate()) {
522 // This transaction will take away our pending copy
523 postInvalidate
= true;
527 if (!pkt
->needsWritable() && !pkt
->req
->isUncacheable()) {
528 // This transaction will get a read-shared copy, downgrading
529 // our copy if we had a writable one
530 postDowngrade
= true;
531 // make sure that any downstream cache does not respond with a
532 // writable (and dirty) copy even if it has one, unless it was
533 // explicitly asked for one
534 pkt
->setHasSharers();
541 MSHR::extractServiceableTargets(PacketPtr pkt
)
543 TargetList ready_targets
;
544 ready_targets
.init(blkAddr
, blkSize
);
545 // If the downstream MSHR got an invalidation request then we only
546 // service the first of the FromCPU targets and any other
547 // non-FromCPU target. This way the remaining FromCPU targets
548 // issue a new request and get a fresh copy of the block and we
549 // avoid memory consistency violations.
550 if (pkt
->cmd
== MemCmd::ReadRespWithInvalidate
) {
551 auto it
= targets
.begin();
552 assert((it
->source
== Target::FromCPU
) ||
553 (it
->source
== Target::FromPrefetcher
));
554 ready_targets
.push_back(*it
);
555 it
= targets
.erase(it
);
556 while (it
!= targets
.end()) {
557 if (it
->source
== Target::FromCPU
) {
560 assert(it
->source
== Target::FromSnoop
);
561 ready_targets
.push_back(*it
);
562 it
= targets
.erase(it
);
565 ready_targets
.populateFlags();
567 std::swap(ready_targets
, targets
);
569 targets
.populateFlags();
571 return ready_targets
;
575 MSHR::promoteDeferredTargets()
577 if (targets
.empty() && deferredTargets
.empty()) {
578 // nothing to promote
582 // the deferred targets can be generally promoted unless they
583 // contain a cache maintenance request
585 // find the first target that is a cache maintenance request
586 auto it
= std::find_if(deferredTargets
.begin(), deferredTargets
.end(),
587 [](MSHR::Target
&t
) {
588 return t
.pkt
->req
->isCacheMaintenance();
590 if (it
== deferredTargets
.begin()) {
591 // if the first deferred target is a cache maintenance packet
592 // then we can promote provided the targets list is empty and
593 // we can service it on its own
594 if (targets
.empty()) {
595 targets
.splice(targets
.end(), deferredTargets
, it
);
598 // if a cache maintenance operation exists, we promote all the
599 // deferred targets that precede it, or all deferred targets
601 targets
.splice(targets
.end(), deferredTargets
,
602 deferredTargets
.begin(), it
);
605 deferredTargets
.populateFlags();
606 targets
.populateFlags();
607 order
= targets
.front().order
;
608 readyTime
= std::max(curTick(), targets
.front().readyTime
);
614 MSHR::promoteIf(const std::function
<bool (Target
&)>& pred
)
616 // if any of the deferred targets were upper-level cache
617 // requests marked downstreamPending, need to clear that
618 assert(!downstreamPending
); // not pending here anymore
620 // find the first target does not satisfy the condition
621 auto last_it
= std::find_if_not(deferredTargets
.begin(),
622 deferredTargets
.end(),
625 // for the prefix of the deferredTargets [begin(), last_it) clear
626 // the downstreamPending flag and move them to the target list
627 deferredTargets
.clearDownstreamPending(deferredTargets
.begin(),
629 targets
.splice(targets
.end(), deferredTargets
,
630 deferredTargets
.begin(), last_it
);
631 // We need to update the flags for the target lists after the
633 deferredTargets
.populateFlags();
637 MSHR::promoteReadable()
639 if (!deferredTargets
.empty() && !hasPostInvalidate()) {
640 // We got a non invalidating response, and we have the block
641 // but we have deferred targets which are waiting and they do
642 // not need writable. This can happen if the original request
643 // was for a cache clean operation and we had a copy of the
644 // block. Since we serviced the cache clean operation and we
645 // have the block, there's no need to defer the targets, so
646 // move them up to the regular target list.
648 auto pred
= [](Target
&t
) {
649 assert(t
.source
== Target::FromCPU
);
650 return !t
.pkt
->req
->isCacheInvalidate() &&
651 !t
.pkt
->needsWritable();
658 MSHR::promoteWritable()
660 PacketPtr def_tgt_pkt
= deferredTargets
.front().pkt
;
661 if (deferredTargets
.needsWritable
&&
662 !(hasPostInvalidate() || hasPostDowngrade()) &&
663 !def_tgt_pkt
->req
->isCacheInvalidate()) {
664 // We got a writable response, but we have deferred targets
665 // which are waiting to request a writable copy (not because
666 // of a pending invalidate). This can happen if the original
667 // request was for a read-only block, but we got a writable
668 // response anyway. Since we got the writable copy there's no
669 // need to defer the targets, so move them up to the regular
671 assert(!targets
.needsWritable
);
672 targets
.needsWritable
= true;
674 auto pred
= [](Target
&t
) {
675 assert(t
.source
== Target::FromCPU
);
676 return !t
.pkt
->req
->isCacheInvalidate();
685 MSHR::trySatisfyFunctional(PacketPtr pkt
)
687 // For printing, we treat the MSHR as a whole as single entity.
688 // For other requests, we iterate over the individual targets
689 // since that's where the actual data lies.
690 if (pkt
->isPrint()) {
691 pkt
->trySatisfyFunctional(this, blkAddr
, isSecure
, blkSize
, nullptr);
694 return (targets
.trySatisfyFunctional(pkt
) ||
695 deferredTargets
.trySatisfyFunctional(pkt
));
700 MSHR::sendPacket(BaseCache
&cache
)
702 return cache
.sendMSHRQueuePacket(this);
706 MSHR::print(std::ostream
&os
, int verbosity
, const std::string
&prefix
) const
708 ccprintf(os
, "%s[%#llx:%#llx](%s) %s %s %s state: %s %s %s %s %s %s\n",
709 prefix
, blkAddr
, blkAddr
+ blkSize
- 1,
710 isSecure
? "s" : "ns",
711 isForward
? "Forward" : "",
712 allocOnFill() ? "AllocOnFill" : "",
713 needsWritable() ? "Wrtbl" : "",
714 _isUncacheable
? "Unc" : "",
715 inService
? "InSvc" : "",
716 downstreamPending
? "DwnPend" : "",
717 postInvalidate
? "PostInv" : "",
718 postDowngrade
? "PostDowngr" : "",
719 hasFromCache() ? "HasFromCache" : "");
721 if (!targets
.empty()) {
722 ccprintf(os
, "%s Targets:\n", prefix
);
723 targets
.print(os
, verbosity
, prefix
+ " ");
725 if (!deferredTargets
.empty()) {
726 ccprintf(os
, "%s Deferred Targets:\n", prefix
);
727 deferredTargets
.print(os
, verbosity
, prefix
+ " ");
734 std::ostringstream str
;
740 MSHR::matchBlockAddr(const Addr addr
, const bool is_secure
) const
742 assert(hasTargets());
743 return (blkAddr
== addr
) && (isSecure
== is_secure
);
747 MSHR::matchBlockAddr(const PacketPtr pkt
) const
749 assert(hasTargets());
750 return pkt
->matchBlockAddr(blkAddr
, isSecure
, blkSize
);
754 MSHR::conflictAddr(const QueueEntry
* entry
) const
756 assert(hasTargets());
757 return entry
->matchBlockAddr(blkAddr
, isSecure
);