cache: fail store conditionals when upgrade loses race
[gem5.git] / src / mem / cache / mshr.cc
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * Copyright (c) 2010 Advancec Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * Authors: Erik Hallnor
30 * Dave Greene
31 */
32
33 /**
34 * @file
35 * Miss Status and Handling Register (MSHR) definitions.
36 */
37
38 #include <algorithm>
39 #include <cassert>
40 #include <string>
41 #include <vector>
42
43 #include "base/misc.hh"
44 #include "base/types.hh"
45 #include "mem/cache/cache.hh"
46 #include "mem/cache/mshr.hh"
47 #include "sim/core.hh"
48
49 using namespace std;
50
51 MSHR::MSHR()
52 {
53 inService = false;
54 ntargets = 0;
55 threadNum = InvalidThreadID;
56 targets = new TargetList();
57 deferredTargets = new TargetList();
58 }
59
60
61 MSHR::TargetList::TargetList()
62 : needsExclusive(false), hasUpgrade(false)
63 {}
64
65
66 inline void
67 MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
68 Counter order, Target::Source source, bool markPending)
69 {
70 if (source != Target::FromSnoop) {
71 if (pkt->needsExclusive()) {
72 needsExclusive = true;
73 }
74
75 if (pkt->isUpgrade()) {
76 hasUpgrade = true;
77 }
78 }
79
80 if (markPending) {
81 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
82 if (mshr != NULL) {
83 assert(!mshr->downstreamPending);
84 mshr->downstreamPending = true;
85 }
86 }
87
88 push_back(Target(pkt, readyTime, order, source, markPending));
89 }
90
91
92 void
93 MSHR::TargetList::replaceUpgrades()
94 {
95 if (!hasUpgrade)
96 return;
97
98 Iterator end_i = end();
99 for (Iterator i = begin(); i != end_i; ++i) {
100 if (i->pkt->cmd == MemCmd::UpgradeReq) {
101 i->pkt->cmd = MemCmd::ReadExReq;
102 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
103 } else if (i->pkt->cmd == MemCmd::SCUpgradeReq) {
104 i->pkt->cmd = MemCmd::SCUpgradeFailReq;
105 DPRINTF(Cache, "Replacing SCUpgradeReq with SCUpgradeFailReq\n");
106 }
107 }
108
109 hasUpgrade = false;
110 }
111
112
113 void
114 MSHR::TargetList::clearDownstreamPending()
115 {
116 Iterator end_i = end();
117 for (Iterator i = begin(); i != end_i; ++i) {
118 if (i->markedPending) {
119 MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
120 if (mshr != NULL) {
121 mshr->clearDownstreamPending();
122 }
123 }
124 }
125 }
126
127
128 bool
129 MSHR::TargetList::checkFunctional(PacketPtr pkt)
130 {
131 Iterator end_i = end();
132 for (Iterator i = begin(); i != end_i; ++i) {
133 if (pkt->checkFunctional(i->pkt)) {
134 return true;
135 }
136 }
137
138 return false;
139 }
140
141
142 void
143 MSHR::TargetList::
144 print(std::ostream &os, int verbosity, const std::string &prefix) const
145 {
146 ConstIterator end_i = end();
147 for (ConstIterator i = begin(); i != end_i; ++i) {
148 const char *s;
149 switch (i->source) {
150 case Target::FromCPU: s = "FromCPU";
151 case Target::FromSnoop: s = "FromSnoop";
152 case Target::FromPrefetcher: s = "FromPrefetcher";
153 default: s = "";
154 }
155 ccprintf(os, "%s%s: ", prefix, s);
156 i->pkt->print(os, verbosity, "");
157 }
158 }
159
160
161 void
162 MSHR::allocate(Addr _addr, int _size, PacketPtr target,
163 Tick whenReady, Counter _order)
164 {
165 addr = _addr;
166 size = _size;
167 readyTime = whenReady;
168 order = _order;
169 assert(target);
170 isForward = false;
171 _isUncacheable = target->req->isUncacheable();
172 inService = false;
173 downstreamPending = false;
174 threadNum = 0;
175 ntargets = 1;
176 assert(targets->isReset());
177 // Don't know of a case where we would allocate a new MSHR for a
178 // snoop (mem-side request), so set source according to request here
179 Target::Source source = (target->cmd == MemCmd::HardPFReq) ?
180 Target::FromPrefetcher : Target::FromCPU;
181 targets->add(target, whenReady, _order, source, true);
182 assert(deferredTargets->isReset());
183 pendingInvalidate = false;
184 pendingShared = false;
185 data = NULL;
186 }
187
188
189 void
190 MSHR::clearDownstreamPending()
191 {
192 assert(downstreamPending);
193 downstreamPending = false;
194 // recursively clear flag on any MSHRs we will be forwarding
195 // responses to
196 targets->clearDownstreamPending();
197 }
198
199 bool
200 MSHR::markInService()
201 {
202 assert(!inService);
203 if (isForwardNoResponse()) {
204 // we just forwarded the request packet & don't expect a
205 // response, so get rid of it
206 assert(getNumTargets() == 1);
207 popTarget();
208 return true;
209 }
210 inService = true;
211 if (!downstreamPending) {
212 // let upstream caches know that the request has made it to a
213 // level where it's going to get a response
214 targets->clearDownstreamPending();
215 }
216 return false;
217 }
218
219
220 void
221 MSHR::deallocate()
222 {
223 assert(targets->empty());
224 targets->resetFlags();
225 assert(deferredTargets->isReset());
226 assert(ntargets == 0);
227 inService = false;
228 //allocIter = NULL;
229 //readyIter = NULL;
230 }
231
232 /*
233 * Adds a target to an MSHR
234 */
235 void
236 MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
237 {
238 // if there's a request already in service for this MSHR, we will
239 // have to defer the new target until after the response if any of
240 // the following are true:
241 // - there are other targets already deferred
242 // - there's a pending invalidate to be applied after the response
243 // comes back (but before this target is processed)
244 // - the outstanding request is for a non-exclusive block and this
245 // target requires an exclusive block
246
247 // assume we'd never issue a prefetch when we've got an
248 // outstanding miss
249 assert(pkt->cmd != MemCmd::HardPFReq);
250
251 if (inService &&
252 (!deferredTargets->empty() || pendingInvalidate ||
253 (!targets->needsExclusive && pkt->needsExclusive()))) {
254 // need to put on deferred list
255 deferredTargets->add(pkt, whenReady, _order, Target::FromCPU, true);
256 } else {
257 // No request outstanding, or still OK to append to
258 // outstanding request: append to regular target list. Only
259 // mark pending if current request hasn't been issued yet
260 // (isn't in service).
261 targets->add(pkt, whenReady, _order, Target::FromCPU, !inService);
262 }
263
264 ++ntargets;
265 }
266
267 bool
268 MSHR::handleSnoop(PacketPtr pkt, Counter _order)
269 {
270 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
271 // Request has not been issued yet, or it's been issued
272 // locally but is buffered unissued at some downstream cache
273 // which is forwarding us this snoop. Either way, the packet
274 // we're snooping logically precedes this MSHR's request, so
275 // the snoop has no impact on the MSHR, but must be processed
276 // in the standard way by the cache. The only exception is
277 // that if we're an L2+ cache buffering an UpgradeReq from a
278 // higher-level cache, and the snoop is invalidating, then our
279 // buffered upgrades must be converted to read exclusives,
280 // since the upper-level cache no longer has a valid copy.
281 // That is, even though the upper-level cache got out on its
282 // local bus first, some other invalidating transaction
283 // reached the global bus before the upgrade did.
284 if (pkt->needsExclusive()) {
285 targets->replaceUpgrades();
286 deferredTargets->replaceUpgrades();
287 }
288
289 return false;
290 }
291
292 // From here on down, the request issued by this MSHR logically
293 // precedes the request we're snooping.
294
295 if (pkt->needsExclusive()) {
296 // snooped request still precedes the re-request we'll have to
297 // issue for deferred targets, if any...
298 deferredTargets->replaceUpgrades();
299 }
300
301 if (pendingInvalidate) {
302 // a prior snoop has already appended an invalidation, so
303 // logically we don't have the block anymore; no need for
304 // further snooping.
305 return true;
306 }
307
308 if (targets->needsExclusive || pkt->needsExclusive()) {
309 // actual target device (typ. PhysicalMemory) will delete the
310 // packet on reception, so we need to save a copy here
311 PacketPtr cp_pkt = new Packet(pkt, true);
312 targets->add(cp_pkt, curTick, _order, Target::FromSnoop,
313 downstreamPending && targets->needsExclusive);
314 ++ntargets;
315
316 if (targets->needsExclusive) {
317 // We're awaiting an exclusive copy, so ownership is pending.
318 // It's up to us to respond once the data arrives.
319 pkt->assertMemInhibit();
320 pkt->setSupplyExclusive();
321 } else {
322 // Someone else may respond before we get around to
323 // processing this snoop, which means the copied request
324 // pointer will no longer be valid
325 cp_pkt->req = NULL;
326 }
327
328 if (pkt->needsExclusive()) {
329 // This transaction will take away our pending copy
330 pendingInvalidate = true;
331 }
332 } else {
333 // Read to a read: no conflict, so no need to record as
334 // target, but make sure neither reader thinks he's getting an
335 // exclusive copy
336 pendingShared = true;
337 pkt->assertShared();
338 }
339
340 return true;
341 }
342
343
344 bool
345 MSHR::promoteDeferredTargets()
346 {
347 assert(targets->empty());
348 if (deferredTargets->empty()) {
349 return false;
350 }
351
352 // swap targets & deferredTargets lists
353 TargetList *tmp = targets;
354 targets = deferredTargets;
355 deferredTargets = tmp;
356
357 assert(targets->size() == ntargets);
358
359 // clear deferredTargets flags
360 deferredTargets->resetFlags();
361
362 pendingInvalidate = false;
363 pendingShared = false;
364 order = targets->front().order;
365 readyTime = std::max(curTick, targets->front().readyTime);
366
367 return true;
368 }
369
370
371 void
372 MSHR::handleFill(Packet *pkt, CacheBlk *blk)
373 {
374 if (pendingShared) {
375 // we snooped another read while this read was in
376 // service... assert shared line on its behalf
377 pkt->assertShared();
378 }
379
380 if (!pkt->sharedAsserted() && !pendingInvalidate
381 && deferredTargets->needsExclusive) {
382 // We got an exclusive response, but we have deferred targets
383 // which are waiting to request an exclusive copy (not because
384 // of a pending invalidate). This can happen if the original
385 // request was for a read-only (non-exclusive) block, but we
386 // got an exclusive copy anyway because of the E part of the
387 // MOESI/MESI protocol. Since we got the exclusive copy
388 // there's no need to defer the targets, so move them up to
389 // the regular target list.
390 assert(!targets->needsExclusive);
391 targets->needsExclusive = true;
392 // if any of the deferred targets were upper-level cache
393 // requests marked downstreamPending, need to clear that
394 assert(!downstreamPending); // not pending here anymore
395 deferredTargets->clearDownstreamPending();
396 // this clears out deferredTargets too
397 targets->splice(targets->end(), *deferredTargets);
398 deferredTargets->resetFlags();
399 }
400 }
401
402
403 bool
404 MSHR::checkFunctional(PacketPtr pkt)
405 {
406 // For printing, we treat the MSHR as a whole as single entity.
407 // For other requests, we iterate over the individual targets
408 // since that's where the actual data lies.
409 if (pkt->isPrint()) {
410 pkt->checkFunctional(this, addr, size, NULL);
411 return false;
412 } else {
413 return (targets->checkFunctional(pkt) ||
414 deferredTargets->checkFunctional(pkt));
415 }
416 }
417
418
419 void
420 MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
421 {
422 ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
423 prefix, addr, addr+size-1,
424 isForward ? "Forward" : "",
425 isForwardNoResponse() ? "ForwNoResp" : "",
426 needsExclusive() ? "Excl" : "",
427 _isUncacheable ? "Unc" : "",
428 inService ? "InSvc" : "",
429 downstreamPending ? "DwnPend" : "",
430 pendingInvalidate ? "PendInv" : "",
431 pendingShared ? "PendShared" : "");
432
433 ccprintf(os, "%s Targets:\n", prefix);
434 targets->print(os, verbosity, prefix + " ");
435 if (!deferredTargets->empty()) {
436 ccprintf(os, "%s Deferred Targets:\n", prefix);
437 deferredTargets->print(os, verbosity, prefix + " ");
438 }
439 }
440
441 MSHR::~MSHR()
442 {
443 }