mem: Spring cleaning of MSHR and MSHRQueue
[gem5.git] / src / mem / cache / mshr.cc
1 /*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 */
44
45 /**
46 * @file
47 * Miss Status and Handling Register (MSHR) definitions.
48 */
49
50 #include <algorithm>
51 #include <cassert>
52 #include <string>
53 #include <vector>
54
55 #include "base/misc.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "mem/cache/cache.hh"
59 #include "mem/cache/mshr.hh"
60 #include "sim/core.hh"
61
62 using namespace std;
63
64 MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
65 pendingDirty(false), postInvalidate(false),
66 postDowngrade(false), queue(NULL), order(0), addr(0), size(0),
67 inService(false), isForward(false), threadNum(InvalidThreadID),
68 data(NULL)
69 {
70 }
71
72
73 MSHR::TargetList::TargetList()
74 : needsExclusive(false), hasUpgrade(false)
75 {}
76
77
78 inline void
79 MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
80 Counter order, Target::Source source, bool markPending)
81 {
82 if (source != Target::FromSnoop) {
83 if (pkt->needsExclusive()) {
84 needsExclusive = true;
85 }
86
87 // StoreCondReq is effectively an upgrade if it's in an MSHR
88 // since it would have been failed already if we didn't have a
89 // read-only copy
90 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
91 hasUpgrade = true;
92 }
93 }
94
95 if (markPending) {
96 // Iterate over the SenderState stack and see if we find
97 // an MSHR entry. If we do, set the downstreamPending
98 // flag. Otherwise, do nothing.
99 MSHR *mshr = pkt->findNextSenderState<MSHR>();
100 if (mshr != NULL) {
101 assert(!mshr->downstreamPending);
102 mshr->downstreamPending = true;
103 }
104 }
105
106 push_back(Target(pkt, readyTime, order, source, markPending));
107 }
108
109
110 static void
111 replaceUpgrade(PacketPtr pkt)
112 {
113 if (pkt->cmd == MemCmd::UpgradeReq) {
114 pkt->cmd = MemCmd::ReadExReq;
115 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
116 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
117 pkt->cmd = MemCmd::SCUpgradeFailReq;
118 DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
119 } else if (pkt->cmd == MemCmd::StoreCondReq) {
120 pkt->cmd = MemCmd::StoreCondFailReq;
121 DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
122 }
123 }
124
125
126 void
127 MSHR::TargetList::replaceUpgrades()
128 {
129 if (!hasUpgrade)
130 return;
131
132 Iterator end_i = end();
133 for (Iterator i = begin(); i != end_i; ++i) {
134 replaceUpgrade(i->pkt);
135 }
136
137 hasUpgrade = false;
138 }
139
140
141 void
142 MSHR::TargetList::clearDownstreamPending()
143 {
144 Iterator end_i = end();
145 for (Iterator i = begin(); i != end_i; ++i) {
146 if (i->markedPending) {
147 // Iterate over the SenderState stack and see if we find
148 // an MSHR entry. If we find one, clear the
149 // downstreamPending flag by calling
150 // clearDownstreamPending(). This recursively clears the
151 // downstreamPending flag in all caches this packet has
152 // passed through.
153 MSHR *mshr = i->pkt->findNextSenderState<MSHR>();
154 if (mshr != NULL) {
155 mshr->clearDownstreamPending();
156 }
157 }
158 }
159 }
160
161
162 bool
163 MSHR::TargetList::checkFunctional(PacketPtr pkt)
164 {
165 Iterator end_i = end();
166 for (Iterator i = begin(); i != end_i; ++i) {
167 if (pkt->checkFunctional(i->pkt)) {
168 return true;
169 }
170 }
171
172 return false;
173 }
174
175
176 void
177 MSHR::TargetList::
178 print(std::ostream &os, int verbosity, const std::string &prefix) const
179 {
180 ConstIterator end_i = end();
181 for (ConstIterator i = begin(); i != end_i; ++i) {
182 const char *s;
183 switch (i->source) {
184 case Target::FromCPU:
185 s = "FromCPU";
186 break;
187 case Target::FromSnoop:
188 s = "FromSnoop";
189 break;
190 case Target::FromPrefetcher:
191 s = "FromPrefetcher";
192 break;
193 default:
194 s = "";
195 break;
196 }
197 ccprintf(os, "%s%s: ", prefix, s);
198 i->pkt->print(os, verbosity, "");
199 }
200 }
201
202
203 void
204 MSHR::allocate(Addr _addr, int _size, PacketPtr target,
205 Tick whenReady, Counter _order)
206 {
207 addr = _addr;
208 size = _size;
209 readyTime = whenReady;
210 order = _order;
211 assert(target);
212 isForward = false;
213 _isUncacheable = target->req->isUncacheable();
214 inService = false;
215 downstreamPending = false;
216 threadNum = 0;
217 assert(targets.isReset());
218 // Don't know of a case where we would allocate a new MSHR for a
219 // snoop (mem-side request), so set source according to request here
220 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
221 Target::FromPrefetcher : Target::FromCPU;
222 targets.add(target, whenReady, _order, source, true);
223 assert(deferredTargets.isReset());
224 data = NULL;
225 }
226
227
228 void
229 MSHR::clearDownstreamPending()
230 {
231 assert(downstreamPending);
232 downstreamPending = false;
233 // recursively clear flag on any MSHRs we will be forwarding
234 // responses to
235 targets.clearDownstreamPending();
236 }
237
238 bool
239 MSHR::markInService(PacketPtr pkt)
240 {
241 assert(!inService);
242 if (isForwardNoResponse()) {
243 // we just forwarded the request packet & don't expect a
244 // response, so get rid of it
245 assert(getNumTargets() == 1);
246 popTarget();
247 return true;
248 }
249 inService = true;
250 pendingDirty = (targets.needsExclusive ||
251 (!pkt->sharedAsserted() && pkt->memInhibitAsserted()));
252 postInvalidate = postDowngrade = false;
253
254 if (!downstreamPending) {
255 // let upstream caches know that the request has made it to a
256 // level where it's going to get a response
257 targets.clearDownstreamPending();
258 }
259 return false;
260 }
261
262
263 void
264 MSHR::deallocate()
265 {
266 assert(targets.empty());
267 targets.resetFlags();
268 assert(deferredTargets.isReset());
269 inService = false;
270 }
271
272 /*
273 * Adds a target to an MSHR
274 */
275 void
276 MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
277 {
278 // if there's a request already in service for this MSHR, we will
279 // have to defer the new target until after the response if any of
280 // the following are true:
281 // - there are other targets already deferred
282 // - there's a pending invalidate to be applied after the response
283 // comes back (but before this target is processed)
284 // - this target requires an exclusive block and either we're not
285 // getting an exclusive block back or we have already snooped
286 // another read request that will downgrade our exclusive block
287 // to shared
288
289 // assume we'd never issue a prefetch when we've got an
290 // outstanding miss
291 assert(pkt->cmd != MemCmd::HardPFReq);
292
293 if (inService &&
294 (!deferredTargets.empty() || hasPostInvalidate() ||
295 (pkt->needsExclusive() &&
296 (!isPendingDirty() || hasPostDowngrade() || isForward)))) {
297 // need to put on deferred list
298 if (hasPostInvalidate())
299 replaceUpgrade(pkt);
300 deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true);
301 } else {
302 // No request outstanding, or still OK to append to
303 // outstanding request: append to regular target list. Only
304 // mark pending if current request hasn't been issued yet
305 // (isn't in service).
306 targets.add(pkt, whenReady, _order, Target::FromCPU, !inService);
307 }
308 }
309
310 bool
311 MSHR::handleSnoop(PacketPtr pkt, Counter _order)
312 {
313 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
314 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
315 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
316 // Request has not been issued yet, or it's been issued
317 // locally but is buffered unissued at some downstream cache
318 // which is forwarding us this snoop. Either way, the packet
319 // we're snooping logically precedes this MSHR's request, so
320 // the snoop has no impact on the MSHR, but must be processed
321 // in the standard way by the cache. The only exception is
322 // that if we're an L2+ cache buffering an UpgradeReq from a
323 // higher-level cache, and the snoop is invalidating, then our
324 // buffered upgrades must be converted to read exclusives,
325 // since the upper-level cache no longer has a valid copy.
326 // That is, even though the upper-level cache got out on its
327 // local bus first, some other invalidating transaction
328 // reached the global bus before the upgrade did.
329 if (pkt->needsExclusive()) {
330 targets.replaceUpgrades();
331 deferredTargets.replaceUpgrades();
332 }
333
334 return false;
335 }
336
337 // From here on down, the request issued by this MSHR logically
338 // precedes the request we're snooping.
339 if (pkt->needsExclusive()) {
340 // snooped request still precedes the re-request we'll have to
341 // issue for deferred targets, if any...
342 deferredTargets.replaceUpgrades();
343 }
344
345 if (hasPostInvalidate()) {
346 // a prior snoop has already appended an invalidation, so
347 // logically we don't have the block anymore; no need for
348 // further snooping.
349 return true;
350 }
351
352 if (isPendingDirty() || pkt->isInvalidate()) {
353 // We need to save and replay the packet in two cases:
354 // 1. We're awaiting an exclusive copy, so ownership is pending,
355 // and we need to respond after we receive data.
356 // 2. It's an invalidation (e.g., UpgradeReq), and we need
357 // to forward the snoop up the hierarchy after the current
358 // transaction completes.
359
360 // Actual target device (typ. a memory) will delete the
361 // packet on reception, so we need to save a copy here.
362 PacketPtr cp_pkt = new Packet(pkt, true);
363 targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
364 downstreamPending && targets.needsExclusive);
365
366 if (isPendingDirty()) {
367 pkt->assertMemInhibit();
368 pkt->setSupplyExclusive();
369 }
370
371 if (pkt->needsExclusive()) {
372 // This transaction will take away our pending copy
373 postInvalidate = true;
374 }
375 }
376
377 if (!pkt->needsExclusive()) {
378 // This transaction will get a read-shared copy, downgrading
379 // our copy if we had an exclusive one
380 postDowngrade = true;
381 pkt->assertShared();
382 }
383
384 return true;
385 }
386
387
388 bool
389 MSHR::promoteDeferredTargets()
390 {
391 assert(targets.empty());
392 if (deferredTargets.empty()) {
393 return false;
394 }
395
396 // swap targets & deferredTargets lists
397 std::swap(targets, deferredTargets);
398
399 // clear deferredTargets flags
400 deferredTargets.resetFlags();
401
402 order = targets.front().order;
403 readyTime = std::max(curTick(), targets.front().readyTime);
404
405 return true;
406 }
407
408
409 void
410 MSHR::handleFill(Packet *pkt, CacheBlk *blk)
411 {
412 if (!pkt->sharedAsserted()
413 && !(hasPostInvalidate() || hasPostDowngrade())
414 && deferredTargets.needsExclusive) {
415 // We got an exclusive response, but we have deferred targets
416 // which are waiting to request an exclusive copy (not because
417 // of a pending invalidate). This can happen if the original
418 // request was for a read-only (non-exclusive) block, but we
419 // got an exclusive copy anyway because of the E part of the
420 // MOESI/MESI protocol. Since we got the exclusive copy
421 // there's no need to defer the targets, so move them up to
422 // the regular target list.
423 assert(!targets.needsExclusive);
424 targets.needsExclusive = true;
425 // if any of the deferred targets were upper-level cache
426 // requests marked downstreamPending, need to clear that
427 assert(!downstreamPending); // not pending here anymore
428 deferredTargets.clearDownstreamPending();
429 // this clears out deferredTargets too
430 targets.splice(targets.end(), deferredTargets);
431 deferredTargets.resetFlags();
432 }
433 }
434
435
436 bool
437 MSHR::checkFunctional(PacketPtr pkt)
438 {
439 // For printing, we treat the MSHR as a whole as single entity.
440 // For other requests, we iterate over the individual targets
441 // since that's where the actual data lies.
442 if (pkt->isPrint()) {
443 pkt->checkFunctional(this, addr, size, NULL);
444 return false;
445 } else {
446 return (targets.checkFunctional(pkt) ||
447 deferredTargets.checkFunctional(pkt));
448 }
449 }
450
451
452 void
453 MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
454 {
455 ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s %s\n",
456 prefix, addr, addr+size-1,
457 isForward ? "Forward" : "",
458 isForwardNoResponse() ? "ForwNoResp" : "",
459 needsExclusive() ? "Excl" : "",
460 _isUncacheable ? "Unc" : "",
461 inService ? "InSvc" : "",
462 downstreamPending ? "DwnPend" : "",
463 hasPostInvalidate() ? "PostInv" : "",
464 hasPostDowngrade() ? "PostDowngr" : "");
465
466 ccprintf(os, "%s Targets:\n", prefix);
467 targets.print(os, verbosity, prefix + " ");
468 if (!deferredTargets.empty()) {
469 ccprintf(os, "%s Deferred Targets:\n", prefix);
470 deferredTargets.print(os, verbosity, prefix + " ");
471 }
472 }
473
474 std::string
475 MSHR::print() const
476 {
477 ostringstream str;
478 print(str);
479 return str.str();
480 }