2 * Copyright (c) 2012 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Erik Hallnor
47 * Miss Status and Handling Register (MSHR) definitions.
55 #include "base/misc.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "mem/cache/cache.hh"
59 #include "mem/cache/mshr.hh"
60 #include "sim/core.hh"
64 MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
65 pendingDirty(false), postInvalidate(false),
66 postDowngrade(false), queue(NULL
), order(0), addr(0), size(0),
67 inService(false), isForward(false), threadNum(InvalidThreadID
),
73 MSHR::TargetList::TargetList()
74 : needsExclusive(false), hasUpgrade(false)
79 MSHR::TargetList::add(PacketPtr pkt
, Tick readyTime
,
80 Counter order
, Target::Source source
, bool markPending
)
82 if (source
!= Target::FromSnoop
) {
83 if (pkt
->needsExclusive()) {
84 needsExclusive
= true;
87 // StoreCondReq is effectively an upgrade if it's in an MSHR
88 // since it would have been failed already if we didn't have a
90 if (pkt
->isUpgrade() || pkt
->cmd
== MemCmd::StoreCondReq
) {
96 // Iterate over the SenderState stack and see if we find
97 // an MSHR entry. If we do, set the downstreamPending
98 // flag. Otherwise, do nothing.
99 MSHR
*mshr
= pkt
->findNextSenderState
<MSHR
>();
101 assert(!mshr
->downstreamPending
);
102 mshr
->downstreamPending
= true;
106 push_back(Target(pkt
, readyTime
, order
, source
, markPending
));
111 replaceUpgrade(PacketPtr pkt
)
113 if (pkt
->cmd
== MemCmd::UpgradeReq
) {
114 pkt
->cmd
= MemCmd::ReadExReq
;
115 DPRINTF(Cache
, "Replacing UpgradeReq with ReadExReq\n");
116 } else if (pkt
->cmd
== MemCmd::SCUpgradeReq
) {
117 pkt
->cmd
= MemCmd::SCUpgradeFailReq
;
118 DPRINTF(Cache
, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
119 } else if (pkt
->cmd
== MemCmd::StoreCondReq
) {
120 pkt
->cmd
= MemCmd::StoreCondFailReq
;
121 DPRINTF(Cache
, "Replacing StoreCondReq with StoreCondFailReq\n");
127 MSHR::TargetList::replaceUpgrades()
132 Iterator end_i
= end();
133 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
134 replaceUpgrade(i
->pkt
);
142 MSHR::TargetList::clearDownstreamPending()
144 Iterator end_i
= end();
145 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
146 if (i
->markedPending
) {
147 // Iterate over the SenderState stack and see if we find
148 // an MSHR entry. If we find one, clear the
149 // downstreamPending flag by calling
150 // clearDownstreamPending(). This recursively clears the
151 // downstreamPending flag in all caches this packet has
153 MSHR
*mshr
= i
->pkt
->findNextSenderState
<MSHR
>();
155 mshr
->clearDownstreamPending();
163 MSHR::TargetList::checkFunctional(PacketPtr pkt
)
165 Iterator end_i
= end();
166 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
167 if (pkt
->checkFunctional(i
->pkt
)) {
178 print(std::ostream
&os
, int verbosity
, const std::string
&prefix
) const
180 ConstIterator end_i
= end();
181 for (ConstIterator i
= begin(); i
!= end_i
; ++i
) {
184 case Target::FromCPU
:
187 case Target::FromSnoop
:
190 case Target::FromPrefetcher
:
191 s
= "FromPrefetcher";
197 ccprintf(os
, "%s%s: ", prefix
, s
);
198 i
->pkt
->print(os
, verbosity
, "");
204 MSHR::allocate(Addr _addr
, int _size
, PacketPtr target
,
205 Tick whenReady
, Counter _order
)
209 readyTime
= whenReady
;
213 _isUncacheable
= target
->req
->isUncacheable();
215 downstreamPending
= false;
217 assert(targets
.isReset());
218 // Don't know of a case where we would allocate a new MSHR for a
219 // snoop (mem-side request), so set source according to request here
220 Target::Source source
= (target
->cmd
== MemCmd::HardPFReq
) ?
221 Target::FromPrefetcher
: Target::FromCPU
;
222 targets
.add(target
, whenReady
, _order
, source
, true);
223 assert(deferredTargets
.isReset());
229 MSHR::clearDownstreamPending()
231 assert(downstreamPending
);
232 downstreamPending
= false;
233 // recursively clear flag on any MSHRs we will be forwarding
235 targets
.clearDownstreamPending();
239 MSHR::markInService(PacketPtr pkt
)
242 if (isForwardNoResponse()) {
243 // we just forwarded the request packet & don't expect a
244 // response, so get rid of it
245 assert(getNumTargets() == 1);
250 pendingDirty
= (targets
.needsExclusive
||
251 (!pkt
->sharedAsserted() && pkt
->memInhibitAsserted()));
252 postInvalidate
= postDowngrade
= false;
254 if (!downstreamPending
) {
255 // let upstream caches know that the request has made it to a
256 // level where it's going to get a response
257 targets
.clearDownstreamPending();
266 assert(targets
.empty());
267 targets
.resetFlags();
268 assert(deferredTargets
.isReset());
273 * Adds a target to an MSHR
276 MSHR::allocateTarget(PacketPtr pkt
, Tick whenReady
, Counter _order
)
278 // if there's a request already in service for this MSHR, we will
279 // have to defer the new target until after the response if any of
280 // the following are true:
281 // - there are other targets already deferred
282 // - there's a pending invalidate to be applied after the response
283 // comes back (but before this target is processed)
284 // - this target requires an exclusive block and either we're not
285 // getting an exclusive block back or we have already snooped
286 // another read request that will downgrade our exclusive block
289 // assume we'd never issue a prefetch when we've got an
291 assert(pkt
->cmd
!= MemCmd::HardPFReq
);
294 (!deferredTargets
.empty() || hasPostInvalidate() ||
295 (pkt
->needsExclusive() &&
296 (!isPendingDirty() || hasPostDowngrade() || isForward
)))) {
297 // need to put on deferred list
298 if (hasPostInvalidate())
300 deferredTargets
.add(pkt
, whenReady
, _order
, Target::FromCPU
, true);
302 // No request outstanding, or still OK to append to
303 // outstanding request: append to regular target list. Only
304 // mark pending if current request hasn't been issued yet
305 // (isn't in service).
306 targets
.add(pkt
, whenReady
, _order
, Target::FromCPU
, !inService
);
311 MSHR::handleSnoop(PacketPtr pkt
, Counter _order
)
313 DPRINTF(Cache
, "%s for %s address %x size %d\n", __func__
,
314 pkt
->cmdString(), pkt
->getAddr(), pkt
->getSize());
315 if (!inService
|| (pkt
->isExpressSnoop() && downstreamPending
)) {
316 // Request has not been issued yet, or it's been issued
317 // locally but is buffered unissued at some downstream cache
318 // which is forwarding us this snoop. Either way, the packet
319 // we're snooping logically precedes this MSHR's request, so
320 // the snoop has no impact on the MSHR, but must be processed
321 // in the standard way by the cache. The only exception is
322 // that if we're an L2+ cache buffering an UpgradeReq from a
323 // higher-level cache, and the snoop is invalidating, then our
324 // buffered upgrades must be converted to read exclusives,
325 // since the upper-level cache no longer has a valid copy.
326 // That is, even though the upper-level cache got out on its
327 // local bus first, some other invalidating transaction
328 // reached the global bus before the upgrade did.
329 if (pkt
->needsExclusive()) {
330 targets
.replaceUpgrades();
331 deferredTargets
.replaceUpgrades();
337 // From here on down, the request issued by this MSHR logically
338 // precedes the request we're snooping.
339 if (pkt
->needsExclusive()) {
340 // snooped request still precedes the re-request we'll have to
341 // issue for deferred targets, if any...
342 deferredTargets
.replaceUpgrades();
345 if (hasPostInvalidate()) {
346 // a prior snoop has already appended an invalidation, so
347 // logically we don't have the block anymore; no need for
352 if (isPendingDirty() || pkt
->isInvalidate()) {
353 // We need to save and replay the packet in two cases:
354 // 1. We're awaiting an exclusive copy, so ownership is pending,
355 // and we need to respond after we receive data.
356 // 2. It's an invalidation (e.g., UpgradeReq), and we need
357 // to forward the snoop up the hierarchy after the current
358 // transaction completes.
360 // Actual target device (typ. a memory) will delete the
361 // packet on reception, so we need to save a copy here.
362 PacketPtr cp_pkt
= new Packet(pkt
, true);
363 targets
.add(cp_pkt
, curTick(), _order
, Target::FromSnoop
,
364 downstreamPending
&& targets
.needsExclusive
);
366 if (isPendingDirty()) {
367 pkt
->assertMemInhibit();
368 pkt
->setSupplyExclusive();
371 if (pkt
->needsExclusive()) {
372 // This transaction will take away our pending copy
373 postInvalidate
= true;
377 if (!pkt
->needsExclusive()) {
378 // This transaction will get a read-shared copy, downgrading
379 // our copy if we had an exclusive one
380 postDowngrade
= true;
389 MSHR::promoteDeferredTargets()
391 assert(targets
.empty());
392 if (deferredTargets
.empty()) {
396 // swap targets & deferredTargets lists
397 std::swap(targets
, deferredTargets
);
399 // clear deferredTargets flags
400 deferredTargets
.resetFlags();
402 order
= targets
.front().order
;
403 readyTime
= std::max(curTick(), targets
.front().readyTime
);
410 MSHR::handleFill(Packet
*pkt
, CacheBlk
*blk
)
412 if (!pkt
->sharedAsserted()
413 && !(hasPostInvalidate() || hasPostDowngrade())
414 && deferredTargets
.needsExclusive
) {
415 // We got an exclusive response, but we have deferred targets
416 // which are waiting to request an exclusive copy (not because
417 // of a pending invalidate). This can happen if the original
418 // request was for a read-only (non-exclusive) block, but we
419 // got an exclusive copy anyway because of the E part of the
420 // MOESI/MESI protocol. Since we got the exclusive copy
421 // there's no need to defer the targets, so move them up to
422 // the regular target list.
423 assert(!targets
.needsExclusive
);
424 targets
.needsExclusive
= true;
425 // if any of the deferred targets were upper-level cache
426 // requests marked downstreamPending, need to clear that
427 assert(!downstreamPending
); // not pending here anymore
428 deferredTargets
.clearDownstreamPending();
429 // this clears out deferredTargets too
430 targets
.splice(targets
.end(), deferredTargets
);
431 deferredTargets
.resetFlags();
437 MSHR::checkFunctional(PacketPtr pkt
)
439 // For printing, we treat the MSHR as a whole as single entity.
440 // For other requests, we iterate over the individual targets
441 // since that's where the actual data lies.
442 if (pkt
->isPrint()) {
443 pkt
->checkFunctional(this, addr
, size
, NULL
);
446 return (targets
.checkFunctional(pkt
) ||
447 deferredTargets
.checkFunctional(pkt
));
453 MSHR::print(std::ostream
&os
, int verbosity
, const std::string
&prefix
) const
455 ccprintf(os
, "%s[%x:%x] %s %s %s state: %s %s %s %s %s\n",
456 prefix
, addr
, addr
+size
-1,
457 isForward
? "Forward" : "",
458 isForwardNoResponse() ? "ForwNoResp" : "",
459 needsExclusive() ? "Excl" : "",
460 _isUncacheable
? "Unc" : "",
461 inService
? "InSvc" : "",
462 downstreamPending
? "DwnPend" : "",
463 hasPostInvalidate() ? "PostInv" : "",
464 hasPostDowngrade() ? "PostDowngr" : "");
466 ccprintf(os
, "%s Targets:\n", prefix
);
467 targets
.print(os
, verbosity
, prefix
+ " ");
468 if (!deferredTargets
.empty()) {
469 ccprintf(os
, "%s Deferred Targets:\n", prefix
);
470 deferredTargets
.print(os
, verbosity
, prefix
+ " ");