2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
34 * Miss Status and Handling Register (MSHR) definitions.
42 #include "mem/cache/mshr.hh"
43 #include "sim/core.hh" // for curTick
44 #include "sim/host.hh"
45 #include "base/misc.hh"
46 #include "mem/cache/cache.hh"
55 targets
= new TargetList();
56 deferredTargets
= new TargetList();
60 MSHR::TargetList::TargetList()
61 : needsExclusive(false), hasUpgrade(false)
66 MSHR::TargetList::add(PacketPtr pkt
, Tick readyTime
,
67 Counter order
, Target::Source source
, bool markPending
)
69 if (source
!= Target::FromSnoop
) {
70 if (pkt
->needsExclusive()) {
71 needsExclusive
= true;
74 if (pkt
->cmd
== MemCmd::UpgradeReq
) {
80 MSHR
*mshr
= dynamic_cast<MSHR
*>(pkt
->senderState
);
82 assert(!mshr
->downstreamPending
);
83 mshr
->downstreamPending
= true;
87 push_back(Target(pkt
, readyTime
, order
, source
, markPending
));
92 MSHR::TargetList::replaceUpgrades()
97 Iterator end_i
= end();
98 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
99 if (i
->pkt
->cmd
== MemCmd::UpgradeReq
) {
100 i
->pkt
->cmd
= MemCmd::ReadExReq
;
101 DPRINTF(Cache
, "Replacing UpgradeReq with ReadExReq\n");
110 MSHR::TargetList::clearDownstreamPending()
112 Iterator end_i
= end();
113 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
114 if (i
->markedPending
) {
115 MSHR
*mshr
= dynamic_cast<MSHR
*>(i
->pkt
->senderState
);
117 mshr
->clearDownstreamPending();
125 MSHR::TargetList::checkFunctional(PacketPtr pkt
)
127 Iterator end_i
= end();
128 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
129 if (pkt
->checkFunctional(i
->pkt
)) {
140 print(std::ostream
&os
, int verbosity
, const std::string
&prefix
) const
142 ConstIterator end_i
= end();
143 for (ConstIterator i
= begin(); i
!= end_i
; ++i
) {
146 case Target::FromCPU
: s
= "FromCPU";
147 case Target::FromSnoop
: s
= "FromSnoop";
148 case Target::FromPrefetcher
: s
= "FromPrefetcher";
151 ccprintf(os
, "%s%s: ", prefix
, s
);
152 i
->pkt
->print(os
, verbosity
, "");
158 MSHR::allocate(Addr _addr
, int _size
, PacketPtr target
,
159 Tick whenReady
, Counter _order
)
163 readyTime
= whenReady
;
167 _isUncacheable
= target
->req
->isUncacheable();
169 downstreamPending
= false;
172 assert(targets
->isReset());
173 // Don't know of a case where we would allocate a new MSHR for a
174 // snoop (mem-side request), so set source according to request here
175 Target::Source source
= (target
->cmd
== MemCmd::HardPFReq
) ?
176 Target::FromPrefetcher
: Target::FromCPU
;
177 targets
->add(target
, whenReady
, _order
, source
, true);
178 assert(deferredTargets
->isReset());
179 pendingInvalidate
= false;
180 pendingShared
= false;
186 MSHR::clearDownstreamPending()
188 assert(downstreamPending
);
189 downstreamPending
= false;
190 // recursively clear flag on any MSHRs we will be forwarding
192 targets
->clearDownstreamPending();
196 MSHR::markInService()
199 if (isForwardNoResponse()) {
200 // we just forwarded the request packet & don't expect a
201 // response, so get rid of it
202 assert(getNumTargets() == 1);
207 if (!downstreamPending
) {
208 // let upstream caches know that the request has made it to a
209 // level where it's going to get a response
210 targets
->clearDownstreamPending();
219 assert(targets
->empty());
220 targets
->resetFlags();
221 assert(deferredTargets
->isReset());
222 assert(ntargets
== 0);
229 * Adds a target to an MSHR
232 MSHR::allocateTarget(PacketPtr pkt
, Tick whenReady
, Counter _order
)
234 // if there's a request already in service for this MSHR, we will
235 // have to defer the new target until after the response if any of
236 // the following are true:
237 // - there are other targets already deferred
238 // - there's a pending invalidate to be applied after the response
239 // comes back (but before this target is processed)
240 // - the outstanding request is for a non-exclusive block and this
241 // target requires an exclusive block
243 // assume we'd never issue a prefetch when we've got an
245 assert(pkt
->cmd
!= MemCmd::HardPFReq
);
248 (!deferredTargets
->empty() || pendingInvalidate
||
249 (!targets
->needsExclusive
&& pkt
->needsExclusive()))) {
250 // need to put on deferred list
251 deferredTargets
->add(pkt
, whenReady
, _order
, Target::FromCPU
, true);
253 // No request outstanding, or still OK to append to
254 // outstanding request: append to regular target list. Only
255 // mark pending if current request hasn't been issued yet
256 // (isn't in service).
257 targets
->add(pkt
, whenReady
, _order
, Target::FromCPU
, !inService
);
264 MSHR::handleSnoop(PacketPtr pkt
, Counter _order
)
266 if (!inService
|| (pkt
->isExpressSnoop() && downstreamPending
)) {
267 // Request has not been issued yet, or it's been issued
268 // locally but is buffered unissued at some downstream cache
269 // which is forwarding us this snoop. Either way, the packet
270 // we're snooping logically precedes this MSHR's request, so
271 // the snoop has no impact on the MSHR, but must be processed
272 // in the standard way by the cache. The only exception is
273 // that if we're an L2+ cache buffering an UpgradeReq from a
274 // higher-level cache, and the snoop is invalidating, then our
275 // buffered upgrades must be converted to read exclusives,
276 // since the upper-level cache no longer has a valid copy.
277 // That is, even though the upper-level cache got out on its
278 // local bus first, some other invalidating transaction
279 // reached the global bus before the upgrade did.
280 if (pkt
->needsExclusive()) {
281 targets
->replaceUpgrades();
282 deferredTargets
->replaceUpgrades();
288 // From here on down, the request issued by this MSHR logically
289 // precedes the request we're snooping.
291 if (pkt
->needsExclusive()) {
292 // snooped request still precedes the re-request we'll have to
293 // issue for deferred targets, if any...
294 deferredTargets
->replaceUpgrades();
297 if (pendingInvalidate
) {
298 // a prior snoop has already appended an invalidation, so
299 // logically we don't have the block anymore; no need for
304 if (targets
->needsExclusive
|| pkt
->needsExclusive()) {
305 // actual target device (typ. PhysicalMemory) will delete the
306 // packet on reception, so we need to save a copy here
307 PacketPtr cp_pkt
= new Packet(pkt
, true);
308 targets
->add(cp_pkt
, curTick
, _order
, Target::FromSnoop
,
309 downstreamPending
&& targets
->needsExclusive
);
312 if (targets
->needsExclusive
) {
313 // We're awaiting an exclusive copy, so ownership is pending.
314 // It's up to us to respond once the data arrives.
315 pkt
->assertMemInhibit();
316 pkt
->setSupplyExclusive();
318 // Someone else may respond before we get around to
319 // processing this snoop, which means the copied request
320 // pointer will no longer be valid
324 if (pkt
->needsExclusive()) {
325 // This transaction will take away our pending copy
326 pendingInvalidate
= true;
329 // Read to a read: no conflict, so no need to record as
330 // target, but make sure neither reader thinks he's getting an
332 pendingShared
= true;
341 MSHR::promoteDeferredTargets()
343 assert(targets
->empty());
344 if (deferredTargets
->empty()) {
348 // swap targets & deferredTargets lists
349 TargetList
*tmp
= targets
;
350 targets
= deferredTargets
;
351 deferredTargets
= tmp
;
353 assert(targets
->size() == ntargets
);
355 // clear deferredTargets flags
356 deferredTargets
->resetFlags();
358 pendingInvalidate
= false;
359 pendingShared
= false;
360 order
= targets
->front().order
;
361 readyTime
= std::max(curTick
, targets
->front().readyTime
);
368 MSHR::handleFill(Packet
*pkt
, CacheBlk
*blk
)
371 // we snooped another read while this read was in
372 // service... assert shared line on its behalf
376 if (!pkt
->sharedAsserted() && !pendingInvalidate
377 && deferredTargets
->needsExclusive
) {
378 // We got an exclusive response, but we have deferred targets
379 // which are waiting to request an exclusive copy (not because
380 // of a pending invalidate). This can happen if the original
381 // request was for a read-only (non-exclusive) block, but we
382 // got an exclusive copy anyway because of the E part of the
383 // MOESI/MESI protocol. Since we got the exclusive copy
384 // there's no need to defer the targets, so move them up to
385 // the regular target list.
386 assert(!targets
->needsExclusive
);
387 targets
->needsExclusive
= true;
388 // if any of the deferred targets were upper-level cache
389 // requests marked downstreamPending, need to clear that
390 assert(!downstreamPending
); // not pending here anymore
391 deferredTargets
->clearDownstreamPending();
392 // this clears out deferredTargets too
393 targets
->splice(targets
->end(), *deferredTargets
);
394 deferredTargets
->resetFlags();
400 MSHR::checkFunctional(PacketPtr pkt
)
402 // For printing, we treat the MSHR as a whole as single entity.
403 // For other requests, we iterate over the individual targets
404 // since that's where the actual data lies.
405 if (pkt
->isPrint()) {
406 pkt
->checkFunctional(this, addr
, size
, NULL
);
409 return (targets
->checkFunctional(pkt
) ||
410 deferredTargets
->checkFunctional(pkt
));
416 MSHR::print(std::ostream
&os
, int verbosity
, const std::string
&prefix
) const
418 ccprintf(os
, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
419 prefix
, addr
, addr
+size
-1,
420 isForward
? "Forward" : "",
421 isForwardNoResponse() ? "ForwNoResp" : "",
422 needsExclusive() ? "Excl" : "",
423 _isUncacheable
? "Unc" : "",
424 inService
? "InSvc" : "",
425 downstreamPending
? "DwnPend" : "",
426 pendingInvalidate
? "PendInv" : "",
427 pendingShared
? "PendShared" : "");
429 ccprintf(os
, "%s Targets:\n", prefix
);
430 targets
->print(os
, verbosity
, prefix
+ " ");
431 if (!deferredTargets
->empty()) {
432 ccprintf(os
, "%s Deferred Targets:\n", prefix
);
433 deferredTargets
->print(os
, verbosity
, prefix
+ " ");