mem: have WriteInvalidate obsolete MSHRs
[gem5.git] / src / mem / cache / mshr.cc
1 /*
2 * Copyright (c) 2012-2013 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 */
44
45 /**
46 * @file
47 * Miss Status and Handling Register (MSHR) definitions.
48 */
49
50 #include <algorithm>
51 #include <cassert>
52 #include <string>
53 #include <vector>
54
55 #include "base/misc.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "mem/cache/cache.hh"
59 #include "mem/cache/mshr.hh"
60 #include "sim/core.hh"
61
62 using namespace std;
63
64 MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
65 pendingDirty(false), postInvalidate(false), postDowngrade(false),
66 _isObsolete(false), queue(NULL), order(0), addr(0), size(0),
67 isSecure(false), inService(false), isForward(false),
68 threadNum(InvalidThreadID), data(NULL)
69 {
70 }
71
72
73 MSHR::TargetList::TargetList()
74 : needsExclusive(false), hasUpgrade(false)
75 {}
76
77
78 inline void
79 MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
80 Counter order, Target::Source source, bool markPending)
81 {
82 if (source != Target::FromSnoop) {
83 if (pkt->needsExclusive()) {
84 needsExclusive = true;
85 }
86
87 // StoreCondReq is effectively an upgrade if it's in an MSHR
88 // since it would have been failed already if we didn't have a
89 // read-only copy
90 if (pkt->isUpgrade() || pkt->cmd == MemCmd::StoreCondReq) {
91 hasUpgrade = true;
92 }
93 }
94
95 if (markPending) {
96 // Iterate over the SenderState stack and see if we find
97 // an MSHR entry. If we do, set the downstreamPending
98 // flag. Otherwise, do nothing.
99 MSHR *mshr = pkt->findNextSenderState<MSHR>();
100 if (mshr != NULL) {
101 assert(!mshr->downstreamPending);
102 mshr->downstreamPending = true;
103 }
104 }
105
106 push_back(Target(pkt, readyTime, order, source, markPending));
107 }
108
109
110 static void
111 replaceUpgrade(PacketPtr pkt)
112 {
113 if (pkt->cmd == MemCmd::UpgradeReq) {
114 pkt->cmd = MemCmd::ReadExReq;
115 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
116 } else if (pkt->cmd == MemCmd::SCUpgradeReq) {
117 pkt->cmd = MemCmd::SCUpgradeFailReq;
118 DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
119 } else if (pkt->cmd == MemCmd::StoreCondReq) {
120 pkt->cmd = MemCmd::StoreCondFailReq;
121 DPRINTF(Cache, "Replacing StoreCondReq with StoreCondFailReq\n");
122 }
123 }
124
125
126 void
127 MSHR::TargetList::replaceUpgrades()
128 {
129 if (!hasUpgrade)
130 return;
131
132 Iterator end_i = end();
133 for (Iterator i = begin(); i != end_i; ++i) {
134 replaceUpgrade(i->pkt);
135 }
136
137 hasUpgrade = false;
138 }
139
140
141 void
142 MSHR::TargetList::clearDownstreamPending()
143 {
144 Iterator end_i = end();
145 for (Iterator i = begin(); i != end_i; ++i) {
146 if (i->markedPending) {
147 // Iterate over the SenderState stack and see if we find
148 // an MSHR entry. If we find one, clear the
149 // downstreamPending flag by calling
150 // clearDownstreamPending(). This recursively clears the
151 // downstreamPending flag in all caches this packet has
152 // passed through.
153 MSHR *mshr = i->pkt->findNextSenderState<MSHR>();
154 if (mshr != NULL) {
155 mshr->clearDownstreamPending();
156 }
157 }
158 }
159 }
160
161
162 bool
163 MSHR::TargetList::checkFunctional(PacketPtr pkt)
164 {
165 Iterator end_i = end();
166 for (Iterator i = begin(); i != end_i; ++i) {
167 if (pkt->checkFunctional(i->pkt)) {
168 return true;
169 }
170 }
171
172 return false;
173 }
174
175
176 void
177 MSHR::TargetList::
178 print(std::ostream &os, int verbosity, const std::string &prefix) const
179 {
180 ConstIterator end_i = end();
181 for (ConstIterator i = begin(); i != end_i; ++i) {
182 const char *s;
183 switch (i->source) {
184 case Target::FromCPU:
185 s = "FromCPU";
186 break;
187 case Target::FromSnoop:
188 s = "FromSnoop";
189 break;
190 case Target::FromPrefetcher:
191 s = "FromPrefetcher";
192 break;
193 default:
194 s = "";
195 break;
196 }
197 ccprintf(os, "%s%s: ", prefix, s);
198 i->pkt->print(os, verbosity, "");
199 }
200 }
201
202
203 void
204 MSHR::allocate(Addr _addr, int _size, PacketPtr target, Tick whenReady,
205 Counter _order)
206 {
207 addr = _addr;
208 size = _size;
209 isSecure = target->isSecure();
210 readyTime = whenReady;
211 order = _order;
212 assert(target);
213 isForward = false;
214 _isUncacheable = target->req->isUncacheable();
215 inService = false;
216 downstreamPending = false;
217 _isObsolete = false;
218 threadNum = 0;
219 assert(targets.isReset());
220 // Don't know of a case where we would allocate a new MSHR for a
221 // snoop (mem-side request), so set source according to request here
222 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
223 Target::FromPrefetcher : Target::FromCPU;
224 targets.add(target, whenReady, _order, source, true);
225 assert(deferredTargets.isReset());
226 data = NULL;
227 }
228
229
230 void
231 MSHR::clearDownstreamPending()
232 {
233 assert(downstreamPending);
234 downstreamPending = false;
235 // recursively clear flag on any MSHRs we will be forwarding
236 // responses to
237 targets.clearDownstreamPending();
238 }
239
240 bool
241 MSHR::markInService(PacketPtr pkt)
242 {
243 assert(!inService);
244 if (isForwardNoResponse()) {
245 // we just forwarded the request packet & don't expect a
246 // response, so get rid of it
247 assert(getNumTargets() == 1);
248 popTarget();
249 return true;
250 }
251
252 assert(pkt != NULL);
253 inService = true;
254 pendingDirty = (targets.needsExclusive ||
255 (!pkt->sharedAsserted() && pkt->memInhibitAsserted()));
256 postInvalidate = postDowngrade = false;
257
258 if (!downstreamPending) {
259 // let upstream caches know that the request has made it to a
260 // level where it's going to get a response
261 targets.clearDownstreamPending();
262 }
263 return false;
264 }
265
266
267 void
268 MSHR::deallocate()
269 {
270 assert(targets.empty());
271 targets.resetFlags();
272 assert(deferredTargets.isReset());
273 inService = false;
274 }
275
276 /*
277 * Adds a target to an MSHR
278 */
279 void
280 MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
281 {
282 // if there's a request already in service for this MSHR, we will
283 // have to defer the new target until after the response if any of
284 // the following are true:
285 // - there are other targets already deferred
286 // - there's a pending invalidate to be applied after the response
287 // comes back (but before this target is processed)
288 // - this target requires an exclusive block and either we're not
289 // getting an exclusive block back or we have already snooped
290 // another read request that will downgrade our exclusive block
291 // to shared
292
293 // assume we'd never issue a prefetch when we've got an
294 // outstanding miss
295 assert(pkt->cmd != MemCmd::HardPFReq);
296
297 if (inService &&
298 (!deferredTargets.empty() || hasPostInvalidate() ||
299 (pkt->needsExclusive() &&
300 (!isPendingDirty() || hasPostDowngrade() || isForward)))) {
301 // need to put on deferred list
302 if (hasPostInvalidate())
303 replaceUpgrade(pkt);
304 deferredTargets.add(pkt, whenReady, _order, Target::FromCPU, true);
305 } else {
306 // No request outstanding, or still OK to append to
307 // outstanding request: append to regular target list. Only
308 // mark pending if current request hasn't been issued yet
309 // (isn't in service).
310 targets.add(pkt, whenReady, _order, Target::FromCPU, !inService);
311 }
312 }
313
314 bool
315 MSHR::handleSnoop(PacketPtr pkt, Counter _order)
316 {
317 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
318 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
319 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
320 // Request has not been issued yet, or it's been issued
321 // locally but is buffered unissued at some downstream cache
322 // which is forwarding us this snoop. Either way, the packet
323 // we're snooping logically precedes this MSHR's request, so
324 // the snoop has no impact on the MSHR, but must be processed
325 // in the standard way by the cache. The only exception is
326 // that if we're an L2+ cache buffering an UpgradeReq from a
327 // higher-level cache, and the snoop is invalidating, then our
328 // buffered upgrades must be converted to read exclusives,
329 // since the upper-level cache no longer has a valid copy.
330 // That is, even though the upper-level cache got out on its
331 // local bus first, some other invalidating transaction
332 // reached the global bus before the upgrade did.
333 if (pkt->needsExclusive()) {
334 targets.replaceUpgrades();
335 deferredTargets.replaceUpgrades();
336 }
337
338 return false;
339 }
340
341 // From here on down, the request issued by this MSHR logically
342 // precedes the request we're snooping.
343 if (pkt->needsExclusive()) {
344 // snooped request still precedes the re-request we'll have to
345 // issue for deferred targets, if any...
346 deferredTargets.replaceUpgrades();
347 }
348
349 if (hasPostInvalidate()) {
350 // a prior snoop has already appended an invalidation, so
351 // logically we don't have the block anymore; no need for
352 // further snooping.
353 return true;
354 }
355
356 if (isPendingDirty() || pkt->isInvalidate()) {
357 // We need to save and replay the packet in two cases:
358 // 1. We're awaiting an exclusive copy, so ownership is pending,
359 // and we need to respond after we receive data.
360 // 2. It's an invalidation (e.g., UpgradeReq), and we need
361 // to forward the snoop up the hierarchy after the current
362 // transaction completes.
363
364 // Actual target device (typ. a memory) will delete the
365 // packet on reception, so we need to save a copy here.
366 PacketPtr cp_pkt = new Packet(pkt, true);
367 targets.add(cp_pkt, curTick(), _order, Target::FromSnoop,
368 downstreamPending && targets.needsExclusive);
369
370 if (isPendingDirty()) {
371 pkt->assertMemInhibit();
372 pkt->setSupplyExclusive();
373 }
374
375 if (pkt->needsExclusive()) {
376 // This transaction will take away our pending copy
377 postInvalidate = true;
378 }
379 }
380
381 if (!pkt->needsExclusive()) {
382 // This transaction will get a read-shared copy, downgrading
383 // our copy if we had an exclusive one
384 postDowngrade = true;
385 pkt->assertShared();
386 }
387
388 return true;
389 }
390
391
392 bool
393 MSHR::promoteDeferredTargets()
394 {
395 assert(targets.empty());
396 if (deferredTargets.empty()) {
397 return false;
398 }
399
400 // swap targets & deferredTargets lists
401 std::swap(targets, deferredTargets);
402
403 // clear deferredTargets flags
404 deferredTargets.resetFlags();
405
406 order = targets.front().order;
407 readyTime = std::max(curTick(), targets.front().readyTime);
408
409 return true;
410 }
411
412
413 void
414 MSHR::handleFill(Packet *pkt, CacheBlk *blk)
415 {
416 if (!pkt->sharedAsserted()
417 && !(hasPostInvalidate() || hasPostDowngrade())
418 && deferredTargets.needsExclusive) {
419 // We got an exclusive response, but we have deferred targets
420 // which are waiting to request an exclusive copy (not because
421 // of a pending invalidate). This can happen if the original
422 // request was for a read-only (non-exclusive) block, but we
423 // got an exclusive copy anyway because of the E part of the
424 // MOESI/MESI protocol. Since we got the exclusive copy
425 // there's no need to defer the targets, so move them up to
426 // the regular target list.
427 assert(!targets.needsExclusive);
428 targets.needsExclusive = true;
429 // if any of the deferred targets were upper-level cache
430 // requests marked downstreamPending, need to clear that
431 assert(!downstreamPending); // not pending here anymore
432 deferredTargets.clearDownstreamPending();
433 // this clears out deferredTargets too
434 targets.splice(targets.end(), deferredTargets);
435 deferredTargets.resetFlags();
436 }
437 }
438
439
440 bool
441 MSHR::checkFunctional(PacketPtr pkt)
442 {
443 // For printing, we treat the MSHR as a whole as single entity.
444 // For other requests, we iterate over the individual targets
445 // since that's where the actual data lies.
446 if (pkt->isPrint()) {
447 pkt->checkFunctional(this, addr, isSecure, size, NULL);
448 return false;
449 } else {
450 return (targets.checkFunctional(pkt) ||
451 deferredTargets.checkFunctional(pkt));
452 }
453 }
454
455
456 void
457 MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
458 {
459 ccprintf(os, "%s[%x:%x](%s) %s %s %s state: %s %s %s %s %s\n",
460 prefix, addr, addr+size-1,
461 isSecure ? "s" : "ns",
462 isForward ? "Forward" : "",
463 isForwardNoResponse() ? "ForwNoResp" : "",
464 needsExclusive() ? "Excl" : "",
465 _isUncacheable ? "Unc" : "",
466 inService ? "InSvc" : "",
467 downstreamPending ? "DwnPend" : "",
468 hasPostInvalidate() ? "PostInv" : "",
469 hasPostDowngrade() ? "PostDowngr" : "");
470
471 ccprintf(os, "%s Targets:\n", prefix);
472 targets.print(os, verbosity, prefix + " ");
473 if (!deferredTargets.empty()) {
474 ccprintf(os, "%s Deferred Targets:\n", prefix);
475 deferredTargets.print(os, verbosity, prefix + " ");
476 }
477 }
478
479 std::string
480 MSHR::print() const
481 {
482 ostringstream str;
483 print(str);
484 return str.str();
485 }