2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2010 Advancec Micro Devices, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * Authors: Erik Hallnor
35 * Miss Status and Handling Register (MSHR) definitions.
43 #include "base/misc.hh"
44 #include "base/types.hh"
45 #include "mem/cache/cache.hh"
46 #include "mem/cache/mshr.hh"
47 #include "sim/core.hh"
55 threadNum
= InvalidThreadID
;
56 targets
= new TargetList();
57 deferredTargets
= new TargetList();
61 MSHR::TargetList::TargetList()
62 : needsExclusive(false), hasUpgrade(false)
67 MSHR::TargetList::add(PacketPtr pkt
, Tick readyTime
,
68 Counter order
, Target::Source source
, bool markPending
)
70 if (source
!= Target::FromSnoop
) {
71 if (pkt
->needsExclusive()) {
72 needsExclusive
= true;
75 if (pkt
->isUpgrade()) {
81 MSHR
*mshr
= dynamic_cast<MSHR
*>(pkt
->senderState
);
83 assert(!mshr
->downstreamPending
);
84 mshr
->downstreamPending
= true;
88 push_back(Target(pkt
, readyTime
, order
, source
, markPending
));
93 MSHR::TargetList::replaceUpgrades()
98 Iterator end_i
= end();
99 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
100 if (i
->pkt
->cmd
== MemCmd::UpgradeReq
) {
101 i
->pkt
->cmd
= MemCmd::ReadExReq
;
102 DPRINTF(Cache
, "Replacing UpgradeReq with ReadExReq\n");
103 } else if (i
->pkt
->cmd
== MemCmd::SCUpgradeReq
) {
104 i
->pkt
->cmd
= MemCmd::SCUpgradeFailReq
;
105 DPRINTF(Cache
, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
114 MSHR::TargetList::clearDownstreamPending()
116 Iterator end_i
= end();
117 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
118 if (i
->markedPending
) {
119 MSHR
*mshr
= dynamic_cast<MSHR
*>(i
->pkt
->senderState
);
121 mshr
->clearDownstreamPending();
129 MSHR::TargetList::checkFunctional(PacketPtr pkt
)
131 Iterator end_i
= end();
132 for (Iterator i
= begin(); i
!= end_i
; ++i
) {
133 if (pkt
->checkFunctional(i
->pkt
)) {
144 print(std::ostream
&os
, int verbosity
, const std::string
&prefix
) const
146 ConstIterator end_i
= end();
147 for (ConstIterator i
= begin(); i
!= end_i
; ++i
) {
150 case Target::FromCPU
: s
= "FromCPU";
151 case Target::FromSnoop
: s
= "FromSnoop";
152 case Target::FromPrefetcher
: s
= "FromPrefetcher";
155 ccprintf(os
, "%s%s: ", prefix
, s
);
156 i
->pkt
->print(os
, verbosity
, "");
162 MSHR::allocate(Addr _addr
, int _size
, PacketPtr target
,
163 Tick whenReady
, Counter _order
)
167 readyTime
= whenReady
;
171 _isUncacheable
= target
->req
->isUncacheable();
173 downstreamPending
= false;
176 assert(targets
->isReset());
177 // Don't know of a case where we would allocate a new MSHR for a
178 // snoop (mem-side request), so set source according to request here
179 Target::Source source
= (target
->cmd
== MemCmd::HardPFReq
) ?
180 Target::FromPrefetcher
: Target::FromCPU
;
181 targets
->add(target
, whenReady
, _order
, source
, true);
182 assert(deferredTargets
->isReset());
183 pendingInvalidate
= false;
184 pendingShared
= false;
190 MSHR::clearDownstreamPending()
192 assert(downstreamPending
);
193 downstreamPending
= false;
194 // recursively clear flag on any MSHRs we will be forwarding
196 targets
->clearDownstreamPending();
200 MSHR::markInService()
203 if (isForwardNoResponse()) {
204 // we just forwarded the request packet & don't expect a
205 // response, so get rid of it
206 assert(getNumTargets() == 1);
211 if (!downstreamPending
) {
212 // let upstream caches know that the request has made it to a
213 // level where it's going to get a response
214 targets
->clearDownstreamPending();
223 assert(targets
->empty());
224 targets
->resetFlags();
225 assert(deferredTargets
->isReset());
226 assert(ntargets
== 0);
233 * Adds a target to an MSHR
236 MSHR::allocateTarget(PacketPtr pkt
, Tick whenReady
, Counter _order
)
238 // if there's a request already in service for this MSHR, we will
239 // have to defer the new target until after the response if any of
240 // the following are true:
241 // - there are other targets already deferred
242 // - there's a pending invalidate to be applied after the response
243 // comes back (but before this target is processed)
244 // - the outstanding request is for a non-exclusive block and this
245 // target requires an exclusive block
247 // assume we'd never issue a prefetch when we've got an
249 assert(pkt
->cmd
!= MemCmd::HardPFReq
);
252 (!deferredTargets
->empty() || pendingInvalidate
||
253 (!targets
->needsExclusive
&& pkt
->needsExclusive()))) {
254 // need to put on deferred list
255 deferredTargets
->add(pkt
, whenReady
, _order
, Target::FromCPU
, true);
257 // No request outstanding, or still OK to append to
258 // outstanding request: append to regular target list. Only
259 // mark pending if current request hasn't been issued yet
260 // (isn't in service).
261 targets
->add(pkt
, whenReady
, _order
, Target::FromCPU
, !inService
);
268 MSHR::handleSnoop(PacketPtr pkt
, Counter _order
)
270 if (!inService
|| (pkt
->isExpressSnoop() && downstreamPending
)) {
271 // Request has not been issued yet, or it's been issued
272 // locally but is buffered unissued at some downstream cache
273 // which is forwarding us this snoop. Either way, the packet
274 // we're snooping logically precedes this MSHR's request, so
275 // the snoop has no impact on the MSHR, but must be processed
276 // in the standard way by the cache. The only exception is
277 // that if we're an L2+ cache buffering an UpgradeReq from a
278 // higher-level cache, and the snoop is invalidating, then our
279 // buffered upgrades must be converted to read exclusives,
280 // since the upper-level cache no longer has a valid copy.
281 // That is, even though the upper-level cache got out on its
282 // local bus first, some other invalidating transaction
283 // reached the global bus before the upgrade did.
284 if (pkt
->needsExclusive()) {
285 targets
->replaceUpgrades();
286 deferredTargets
->replaceUpgrades();
292 // From here on down, the request issued by this MSHR logically
293 // precedes the request we're snooping.
295 if (pkt
->needsExclusive()) {
296 // snooped request still precedes the re-request we'll have to
297 // issue for deferred targets, if any...
298 deferredTargets
->replaceUpgrades();
301 if (pendingInvalidate
) {
302 // a prior snoop has already appended an invalidation, so
303 // logically we don't have the block anymore; no need for
308 if (targets
->needsExclusive
|| pkt
->needsExclusive()) {
309 // actual target device (typ. PhysicalMemory) will delete the
310 // packet on reception, so we need to save a copy here
311 PacketPtr cp_pkt
= new Packet(pkt
, true);
312 targets
->add(cp_pkt
, curTick
, _order
, Target::FromSnoop
,
313 downstreamPending
&& targets
->needsExclusive
);
316 if (targets
->needsExclusive
) {
317 // We're awaiting an exclusive copy, so ownership is pending.
318 // It's up to us to respond once the data arrives.
319 pkt
->assertMemInhibit();
320 pkt
->setSupplyExclusive();
322 // Someone else may respond before we get around to
323 // processing this snoop, which means the copied request
324 // pointer will no longer be valid
328 if (pkt
->needsExclusive()) {
329 // This transaction will take away our pending copy
330 pendingInvalidate
= true;
333 // Read to a read: no conflict, so no need to record as
334 // target, but make sure neither reader thinks he's getting an
336 pendingShared
= true;
345 MSHR::promoteDeferredTargets()
347 assert(targets
->empty());
348 if (deferredTargets
->empty()) {
352 // swap targets & deferredTargets lists
353 TargetList
*tmp
= targets
;
354 targets
= deferredTargets
;
355 deferredTargets
= tmp
;
357 assert(targets
->size() == ntargets
);
359 // clear deferredTargets flags
360 deferredTargets
->resetFlags();
362 pendingInvalidate
= false;
363 pendingShared
= false;
364 order
= targets
->front().order
;
365 readyTime
= std::max(curTick
, targets
->front().readyTime
);
372 MSHR::handleFill(Packet
*pkt
, CacheBlk
*blk
)
375 // we snooped another read while this read was in
376 // service... assert shared line on its behalf
380 if (!pkt
->sharedAsserted() && !pendingInvalidate
381 && deferredTargets
->needsExclusive
) {
382 // We got an exclusive response, but we have deferred targets
383 // which are waiting to request an exclusive copy (not because
384 // of a pending invalidate). This can happen if the original
385 // request was for a read-only (non-exclusive) block, but we
386 // got an exclusive copy anyway because of the E part of the
387 // MOESI/MESI protocol. Since we got the exclusive copy
388 // there's no need to defer the targets, so move them up to
389 // the regular target list.
390 assert(!targets
->needsExclusive
);
391 targets
->needsExclusive
= true;
392 // if any of the deferred targets were upper-level cache
393 // requests marked downstreamPending, need to clear that
394 assert(!downstreamPending
); // not pending here anymore
395 deferredTargets
->clearDownstreamPending();
396 // this clears out deferredTargets too
397 targets
->splice(targets
->end(), *deferredTargets
);
398 deferredTargets
->resetFlags();
404 MSHR::checkFunctional(PacketPtr pkt
)
406 // For printing, we treat the MSHR as a whole as single entity.
407 // For other requests, we iterate over the individual targets
408 // since that's where the actual data lies.
409 if (pkt
->isPrint()) {
410 pkt
->checkFunctional(this, addr
, size
, NULL
);
413 return (targets
->checkFunctional(pkt
) ||
414 deferredTargets
->checkFunctional(pkt
));
420 MSHR::print(std::ostream
&os
, int verbosity
, const std::string
&prefix
) const
422 ccprintf(os
, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
423 prefix
, addr
, addr
+size
-1,
424 isForward
? "Forward" : "",
425 isForwardNoResponse() ? "ForwNoResp" : "",
426 needsExclusive() ? "Excl" : "",
427 _isUncacheable
? "Unc" : "",
428 inService
? "InSvc" : "",
429 downstreamPending
? "DwnPend" : "",
430 pendingInvalidate
? "PendInv" : "",
431 pendingShared
? "PendShared" : "");
433 ccprintf(os
, "%s Targets:\n", prefix
);
434 targets
->print(os
, verbosity
, prefix
+ " ");
435 if (!deferredTargets
->empty()) {
436 ccprintf(os
, "%s Deferred Targets:\n", prefix
);
437 deferredTargets
->print(os
, verbosity
, prefix
+ " ");