2 * Copyright (c) 2012-2013 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Erik Hallnor
47 * Miss Status and Handling Register (MSHR) definitions.
55 #include "base/misc.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "mem/cache/cache.hh"
59 #include "mem/cache/mshr.hh"
60 #include "sim/core.hh"
64 MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
65 pendingDirty(false), postInvalidate(false), postDowngrade(false),
66 _isObsolete(false), queue(NULL
), order(0), addr(0), size(0),
67 isSecure(false), inService(false), isForward(false),
68 threadNum(InvalidThreadID
), data(NULL
)
73 MSHR::TargetList::TargetList()
74 : needsExclusive(false), hasUpgrade(false)
79 MSHR::TargetList::add(PacketPtr pkt
, Tick readyTime
,
80 Counter order
, Target::Source source
, bool markPending
)
82 if (source
!= Target::FromSnoop
) {
83 if (pkt
->needsExclusive()) {
84 needsExclusive
= true;
87 // StoreCondReq is effectively an upgrade if it's in an MSHR
88 // since it would have been failed already if we didn't have a
90 if (pkt
->isUpgrade() || pkt
->cmd
== MemCmd::StoreCondReq
) {
96 // Iterate over the SenderState stack and see if we find
97 // an MSHR entry. If we do, set the downstreamPending
98 // flag. Otherwise, do nothing.
99 MSHR
*mshr
= pkt
->findNextSenderState
<MSHR
>();
101 assert(!mshr
->downstreamPending
);
102 mshr
->downstreamPending
= true;
106 push_back(Target(pkt
, readyTime
, order
, source
, markPending
));
111 replaceUpgrade(PacketPtr pkt
)
113 if (pkt
->cmd
== MemCmd::UpgradeReq
) {
114 pkt
->cmd
= MemCmd::ReadExReq
;
115 DPRINTF(Cache
, "Replacing UpgradeReq with ReadExReq\n");
116 } else if (pkt
->cmd
== MemCmd::SCUpgradeReq
) {
117 pkt
->cmd
= MemCmd::SCUpgradeFailReq
;
118 DPRINTF(Cache
, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
119 } else if (pkt
->cmd
== MemCmd::StoreCondReq
) {
120 pkt
->cmd
= MemCmd::StoreCondFailReq
;
121 DPRINTF(Cache
, "Replacing StoreCondReq with StoreCondFailReq\n");
127 MSHR::TargetList::replaceUpgrades()
132 Iterator end_i
= end();
133 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
134 replaceUpgrade(i
->pkt
);
142 MSHR::TargetList::clearDownstreamPending()
144 Iterator end_i
= end();
145 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
146 if (i
->markedPending
) {
147 // Iterate over the SenderState stack and see if we find
148 // an MSHR entry. If we find one, clear the
149 // downstreamPending flag by calling
150 // clearDownstreamPending(). This recursively clears the
151 // downstreamPending flag in all caches this packet has
153 MSHR
*mshr
= i
->pkt
->findNextSenderState
<MSHR
>();
155 mshr
->clearDownstreamPending();
163 MSHR::TargetList::checkFunctional(PacketPtr pkt
)
165 Iterator end_i
= end();
166 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
167 if (pkt
->checkFunctional(i
->pkt
)) {
178 print(std::ostream
&os
, int verbosity
, const std::string
&prefix
) const
180 ConstIterator end_i
= end();
181 for (ConstIterator i
= begin(); i
!= end_i
; ++i
) {
184 case Target::FromCPU
:
187 case Target::FromSnoop
:
190 case Target::FromPrefetcher
:
191 s
= "FromPrefetcher";
197 ccprintf(os
, "%s%s: ", prefix
, s
);
198 i
->pkt
->print(os
, verbosity
, "");
204 MSHR::allocate(Addr _addr
, int _size
, PacketPtr target
, Tick whenReady
,
209 isSecure
= target
->isSecure();
210 readyTime
= whenReady
;
214 _isUncacheable
= target
->req
->isUncacheable();
216 downstreamPending
= false;
219 assert(targets
.isReset());
220 // Don't know of a case where we would allocate a new MSHR for a
221 // snoop (mem-side request), so set source according to request here
222 Target::Source source
= (target
->cmd
== MemCmd::HardPFReq
) ?
223 Target::FromPrefetcher
: Target::FromCPU
;
224 targets
.add(target
, whenReady
, _order
, source
, true);
225 assert(deferredTargets
.isReset());
231 MSHR::clearDownstreamPending()
233 assert(downstreamPending
);
234 downstreamPending
= false;
235 // recursively clear flag on any MSHRs we will be forwarding
237 targets
.clearDownstreamPending();
241 MSHR::markInService(PacketPtr pkt
)
244 if (isForwardNoResponse()) {
245 // we just forwarded the request packet & don't expect a
246 // response, so get rid of it
247 assert(getNumTargets() == 1);
254 pendingDirty
= (targets
.needsExclusive
||
255 (!pkt
->sharedAsserted() && pkt
->memInhibitAsserted()));
256 postInvalidate
= postDowngrade
= false;
258 if (!downstreamPending
) {
259 // let upstream caches know that the request has made it to a
260 // level where it's going to get a response
261 targets
.clearDownstreamPending();
270 assert(targets
.empty());
271 targets
.resetFlags();
272 assert(deferredTargets
.isReset());
277 * Adds a target to an MSHR
280 MSHR::allocateTarget(PacketPtr pkt
, Tick whenReady
, Counter _order
)
282 // if there's a request already in service for this MSHR, we will
283 // have to defer the new target until after the response if any of
284 // the following are true:
285 // - there are other targets already deferred
286 // - there's a pending invalidate to be applied after the response
287 // comes back (but before this target is processed)
288 // - this target requires an exclusive block and either we're not
289 // getting an exclusive block back or we have already snooped
290 // another read request that will downgrade our exclusive block
293 // assume we'd never issue a prefetch when we've got an
295 assert(pkt
->cmd
!= MemCmd::HardPFReq
);
298 (!deferredTargets
.empty() || hasPostInvalidate() ||
299 (pkt
->needsExclusive() &&
300 (!isPendingDirty() || hasPostDowngrade() || isForward
)))) {
301 // need to put on deferred list
302 if (hasPostInvalidate())
304 deferredTargets
.add(pkt
, whenReady
, _order
, Target::FromCPU
, true);
306 // No request outstanding, or still OK to append to
307 // outstanding request: append to regular target list. Only
308 // mark pending if current request hasn't been issued yet
309 // (isn't in service).
310 targets
.add(pkt
, whenReady
, _order
, Target::FromCPU
, !inService
);
315 MSHR::handleSnoop(PacketPtr pkt
, Counter _order
)
317 DPRINTF(Cache
, "%s for %s address %x size %d\n", __func__
,
318 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
319 if (!inService
|| (pkt
->isExpressSnoop() && downstreamPending
)) {
320 // Request has not been issued yet, or it's been issued
321 // locally but is buffered unissued at some downstream cache
322 // which is forwarding us this snoop. Either way, the packet
323 // we're snooping logically precedes this MSHR's request, so
324 // the snoop has no impact on the MSHR, but must be processed
325 // in the standard way by the cache. The only exception is
326 // that if we're an L2+ cache buffering an UpgradeReq from a
327 // higher-level cache, and the snoop is invalidating, then our
328 // buffered upgrades must be converted to read exclusives,
329 // since the upper-level cache no longer has a valid copy.
330 // That is, even though the upper-level cache got out on its
331 // local bus first, some other invalidating transaction
332 // reached the global bus before the upgrade did.
333 if (pkt
->needsExclusive()) {
334 targets
.replaceUpgrades();
335 deferredTargets
.replaceUpgrades();
341 // From here on down, the request issued by this MSHR logically
342 // precedes the request we're snooping.
343 if (pkt
->needsExclusive()) {
344 // snooped request still precedes the re-request we'll have to
345 // issue for deferred targets, if any...
346 deferredTargets
.replaceUpgrades();
349 if (hasPostInvalidate()) {
350 // a prior snoop has already appended an invalidation, so
351 // logically we don't have the block anymore; no need for
356 if (isPendingDirty() || pkt
->isInvalidate()) {
357 // We need to save and replay the packet in two cases:
358 // 1. We're awaiting an exclusive copy, so ownership is pending,
359 // and we need to respond after we receive data.
360 // 2. It's an invalidation (e.g., UpgradeReq), and we need
361 // to forward the snoop up the hierarchy after the current
362 // transaction completes.
364 // Actual target device (typ. a memory) will delete the
365 // packet on reception, so we need to save a copy here.
366 PacketPtr cp_pkt
= new Packet(pkt
, true);
367 targets
.add(cp_pkt
, curTick(), _order
, Target::FromSnoop
,
368 downstreamPending
&& targets
.needsExclusive
);
370 if (isPendingDirty()) {
371 pkt
->assertMemInhibit();
372 pkt
->setSupplyExclusive();
375 if (pkt
->needsExclusive()) {
376 // This transaction will take away our pending copy
377 postInvalidate
= true;
381 if (!pkt
->needsExclusive()) {
382 // This transaction will get a read-shared copy, downgrading
383 // our copy if we had an exclusive one
384 postDowngrade
= true;
393 MSHR::promoteDeferredTargets()
395 assert(targets
.empty());
396 if (deferredTargets
.empty()) {
400 // swap targets & deferredTargets lists
401 std::swap(targets
, deferredTargets
);
403 // clear deferredTargets flags
404 deferredTargets
.resetFlags();
406 order
= targets
.front().order
;
407 readyTime
= std::max(curTick(), targets
.front().readyTime
);
414 MSHR::handleFill(Packet
*pkt
, CacheBlk
*blk
)
416 if (!pkt
->sharedAsserted()
417 && !(hasPostInvalidate() || hasPostDowngrade())
418 && deferredTargets
.needsExclusive
) {
419 // We got an exclusive response, but we have deferred targets
420 // which are waiting to request an exclusive copy (not because
421 // of a pending invalidate). This can happen if the original
422 // request was for a read-only (non-exclusive) block, but we
423 // got an exclusive copy anyway because of the E part of the
424 // MOESI/MESI protocol. Since we got the exclusive copy
425 // there's no need to defer the targets, so move them up to
426 // the regular target list.
427 assert(!targets
.needsExclusive
);
428 targets
.needsExclusive
= true;
429 // if any of the deferred targets were upper-level cache
430 // requests marked downstreamPending, need to clear that
431 assert(!downstreamPending
); // not pending here anymore
432 deferredTargets
.clearDownstreamPending();
433 // this clears out deferredTargets too
434 targets
.splice(targets
.end(), deferredTargets
);
435 deferredTargets
.resetFlags();
441 MSHR::checkFunctional(PacketPtr pkt
)
443 // For printing, we treat the MSHR as a whole as single entity.
444 // For other requests, we iterate over the individual targets
445 // since that's where the actual data lies.
446 if (pkt
->isPrint()) {
447 pkt
->checkFunctional(this, addr
, isSecure
, size
, NULL
);
450 return (targets
.checkFunctional(pkt
) ||
451 deferredTargets
.checkFunctional(pkt
));
457 MSHR::print(std::ostream
&os
, int verbosity
, const std::string
&prefix
) const
459 ccprintf(os
, "%s[%x:%x](%s) %s %s %s state: %s %s %s %s %s\n",
460 prefix
, addr
, addr
+size
-1,
461 isSecure
? "s" : "ns",
462 isForward
? "Forward" : "",
463 isForwardNoResponse() ? "ForwNoResp" : "",
464 needsExclusive() ? "Excl" : "",
465 _isUncacheable
? "Unc" : "",
466 inService
? "InSvc" : "",
467 downstreamPending
? "DwnPend" : "",
468 hasPostInvalidate() ? "PostInv" : "",
469 hasPostDowngrade() ? "PostDowngr" : "");
471 ccprintf(os
, "%s Targets:\n", prefix
);
472 targets
.print(os
, verbosity
, prefix
+ " ");
473 if (!deferredTargets
.empty()) {
474 ccprintf(os
, "%s Deferred Targets:\n", prefix
);
475 deferredTargets
.print(os
, verbosity
, prefix
+ " ");