Rename cache files for brevity and consistency with rest of tree.
[gem5.git] / src / mem / cache / mshr.cc
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 */
31
32 /**
33 * @file
34 * Miss Status and Handling Register (MSHR) definitions.
35 */
36
37 #include <assert.h>
38 #include <string>
39 #include <vector>
40 #include <algorithm>
41
42 #include "mem/cache/miss/mshr.hh"
43 #include "sim/core.hh" // for curTick
44 #include "sim/host.hh"
45 #include "base/misc.hh"
46 #include "mem/cache/cache.hh"
47
48 using namespace std;
49
50 MSHR::MSHR()
51 {
52 inService = false;
53 ntargets = 0;
54 threadNum = -1;
55 targets = new TargetList();
56 deferredTargets = new TargetList();
57 }
58
59
60 MSHR::TargetList::TargetList()
61 : needsExclusive(false), hasUpgrade(false)
62 {}
63
64
65 inline void
66 MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
67 Counter order, bool cpuSide, bool markPending)
68 {
69 if (cpuSide) {
70 if (pkt->needsExclusive()) {
71 needsExclusive = true;
72 }
73
74 if (pkt->cmd == MemCmd::UpgradeReq) {
75 hasUpgrade = true;
76 }
77 }
78
79 if (markPending) {
80 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
81 if (mshr != NULL) {
82 assert(!mshr->downstreamPending);
83 mshr->downstreamPending = true;
84 }
85 }
86
87 push_back(Target(pkt, readyTime, order, cpuSide, markPending));
88 }
89
90
91 void
92 MSHR::TargetList::replaceUpgrades()
93 {
94 if (!hasUpgrade)
95 return;
96
97 Iterator end_i = end();
98 for (Iterator i = begin(); i != end_i; ++i) {
99 if (i->pkt->cmd == MemCmd::UpgradeReq) {
100 i->pkt->cmd = MemCmd::ReadExReq;
101 DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
102 }
103 }
104
105 hasUpgrade = false;
106 }
107
108
109 void
110 MSHR::TargetList::clearDownstreamPending()
111 {
112 Iterator end_i = end();
113 for (Iterator i = begin(); i != end_i; ++i) {
114 if (i->markedPending) {
115 MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
116 if (mshr != NULL) {
117 mshr->clearDownstreamPending();
118 }
119 }
120 }
121 }
122
123
124 bool
125 MSHR::TargetList::checkFunctional(PacketPtr pkt)
126 {
127 Iterator end_i = end();
128 for (Iterator i = begin(); i != end_i; ++i) {
129 if (pkt->checkFunctional(i->pkt)) {
130 return true;
131 }
132 }
133
134 return false;
135 }
136
137
138 void
139 MSHR::TargetList::
140 print(std::ostream &os, int verbosity, const std::string &prefix) const
141 {
142 ConstIterator end_i = end();
143 for (ConstIterator i = begin(); i != end_i; ++i) {
144 ccprintf(os, "%s%s: ", prefix, i->isCpuSide() ? "cpu" : "mem");
145 i->pkt->print(os, verbosity, "");
146 }
147 }
148
149
150 void
151 MSHR::allocate(Addr _addr, int _size, PacketPtr target,
152 Tick whenReady, Counter _order)
153 {
154 addr = _addr;
155 size = _size;
156 readyTime = whenReady;
157 order = _order;
158 assert(target);
159 isCacheFill = false;
160 _isUncacheable = target->req->isUncacheable();
161 inService = false;
162 downstreamPending = false;
163 threadNum = 0;
164 ntargets = 1;
165 // Don't know of a case where we would allocate a new MSHR for a
166 // snoop (mem-side request), so set cpuSide to true here.
167 assert(targets->isReset());
168 targets->add(target, whenReady, _order, true, true);
169 assert(deferredTargets->isReset());
170 pendingInvalidate = false;
171 pendingShared = false;
172 data = NULL;
173 }
174
175
176 void
177 MSHR::clearDownstreamPending()
178 {
179 assert(downstreamPending);
180 downstreamPending = false;
181 // recursively clear flag on any MSHRs we will be forwarding
182 // responses to
183 targets->clearDownstreamPending();
184 }
185
186 bool
187 MSHR::markInService()
188 {
189 assert(!inService);
190 if (isSimpleForward()) {
191 // we just forwarded the request packet & don't expect a
192 // response, so get rid of it
193 assert(getNumTargets() == 1);
194 popTarget();
195 return true;
196 }
197 inService = true;
198 if (!downstreamPending) {
199 // let upstream caches know that the request has made it to a
200 // level where it's going to get a response
201 targets->clearDownstreamPending();
202 }
203 return false;
204 }
205
206
207 void
208 MSHR::deallocate()
209 {
210 assert(targets->empty());
211 targets->resetFlags();
212 assert(deferredTargets->isReset());
213 assert(ntargets == 0);
214 inService = false;
215 //allocIter = NULL;
216 //readyIter = NULL;
217 }
218
219 /*
220 * Adds a target to an MSHR
221 */
222 void
223 MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
224 {
225 // if there's a request already in service for this MSHR, we will
226 // have to defer the new target until after the response if any of
227 // the following are true:
228 // - there are other targets already deferred
229 // - there's a pending invalidate to be applied after the response
230 // comes back (but before this target is processed)
231 // - the outstanding request is for a non-exclusive block and this
232 // target requires an exclusive block
233 if (inService &&
234 (!deferredTargets->empty() || pendingInvalidate ||
235 (!targets->needsExclusive && pkt->needsExclusive()))) {
236 // need to put on deferred list
237 deferredTargets->add(pkt, whenReady, _order, true, true);
238 } else {
239 // No request outstanding, or still OK to append to
240 // outstanding request: append to regular target list. Only
241 // mark pending if current request hasn't been issued yet
242 // (isn't in service).
243 targets->add(pkt, whenReady, _order, true, !inService);
244 }
245
246 ++ntargets;
247 }
248
249 bool
250 MSHR::handleSnoop(PacketPtr pkt, Counter _order)
251 {
252 if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
253 // Request has not been issued yet, or it's been issued
254 // locally but is buffered unissued at some downstream cache
255 // which is forwarding us this snoop. Either way, the packet
256 // we're snooping logically precedes this MSHR's request, so
257 // the snoop has no impact on the MSHR, but must be processed
258 // in the standard way by the cache. The only exception is
259 // that if we're an L2+ cache buffering an UpgradeReq from a
260 // higher-level cache, and the snoop is invalidating, then our
261 // buffered upgrades must be converted to read exclusives,
262 // since the upper-level cache no longer has a valid copy.
263 // That is, even though the upper-level cache got out on its
264 // local bus first, some other invalidating transaction
265 // reached the global bus before the upgrade did.
266 if (pkt->needsExclusive()) {
267 targets->replaceUpgrades();
268 deferredTargets->replaceUpgrades();
269 }
270
271 return false;
272 }
273
274 // From here on down, the request issued by this MSHR logically
275 // precedes the request we're snooping.
276
277 if (pkt->needsExclusive()) {
278 // snooped request still precedes the re-request we'll have to
279 // issue for deferred targets, if any...
280 deferredTargets->replaceUpgrades();
281 }
282
283 if (pendingInvalidate) {
284 // a prior snoop has already appended an invalidation, so
285 // logically we don't have the block anymore; no need for
286 // further snooping.
287 return true;
288 }
289
290 if (targets->needsExclusive || pkt->needsExclusive()) {
291 // actual target device (typ. PhysicalMemory) will delete the
292 // packet on reception, so we need to save a copy here
293 PacketPtr cp_pkt = new Packet(pkt, true);
294 targets->add(cp_pkt, curTick, _order, false,
295 downstreamPending && targets->needsExclusive);
296 ++ntargets;
297
298 if (targets->needsExclusive) {
299 // We're awaiting an exclusive copy, so ownership is pending.
300 // It's up to us to respond once the data arrives.
301 pkt->assertMemInhibit();
302 pkt->setSupplyExclusive();
303 } else {
304 // Someone else may respond before we get around to
305 // processing this snoop, which means the copied request
306 // pointer will no longer be valid
307 cp_pkt->req = NULL;
308 }
309
310 if (pkt->needsExclusive()) {
311 // This transaction will take away our pending copy
312 pendingInvalidate = true;
313 }
314 } else {
315 // Read to a read: no conflict, so no need to record as
316 // target, but make sure neither reader thinks he's getting an
317 // exclusive copy
318 pendingShared = true;
319 pkt->assertShared();
320 }
321
322 return true;
323 }
324
325
326 bool
327 MSHR::promoteDeferredTargets()
328 {
329 assert(targets->empty());
330 if (deferredTargets->empty()) {
331 return false;
332 }
333
334 // swap targets & deferredTargets lists
335 TargetList *tmp = targets;
336 targets = deferredTargets;
337 deferredTargets = tmp;
338
339 assert(targets->size() == ntargets);
340
341 // clear deferredTargets flags
342 deferredTargets->resetFlags();
343
344 pendingInvalidate = false;
345 pendingShared = false;
346 order = targets->front().order;
347 readyTime = std::max(curTick, targets->front().readyTime);
348
349 return true;
350 }
351
352
353 void
354 MSHR::handleFill(Packet *pkt, CacheBlk *blk)
355 {
356 if (pendingShared) {
357 // we snooped another read while this read was in
358 // service... assert shared line on its behalf
359 pkt->assertShared();
360 }
361
362 if (!pkt->sharedAsserted() && !pendingInvalidate
363 && deferredTargets->needsExclusive) {
364 // We got an exclusive response, but we have deferred targets
365 // which are waiting to request an exclusive copy (not because
366 // of a pending invalidate). This can happen if the original
367 // request was for a read-only (non-exclusive) block, but we
368 // got an exclusive copy anyway because of the E part of the
369 // MOESI/MESI protocol. Since we got the exclusive copy
370 // there's no need to defer the targets, so move them up to
371 // the regular target list.
372 assert(!targets->needsExclusive);
373 targets->needsExclusive = true;
374 // if any of the deferred targets were upper-level cache
375 // requests marked downstreamPending, need to clear that
376 assert(!downstreamPending); // not pending here anymore
377 deferredTargets->clearDownstreamPending();
378 // this clears out deferredTargets too
379 targets->splice(targets->end(), *deferredTargets);
380 deferredTargets->resetFlags();
381 }
382 }
383
384
385 bool
386 MSHR::checkFunctional(PacketPtr pkt)
387 {
388 // For printing, we treat the MSHR as a whole as single entity.
389 // For other requests, we iterate over the individual targets
390 // since that's where the actual data lies.
391 if (pkt->isPrint()) {
392 pkt->checkFunctional(this, addr, size, NULL);
393 return false;
394 } else {
395 return (targets->checkFunctional(pkt) ||
396 deferredTargets->checkFunctional(pkt));
397 }
398 }
399
400
401 void
402 MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
403 {
404 ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
405 prefix, addr, addr+size-1,
406 isCacheFill ? "Fill" : "",
407 needsExclusive() ? "Excl" : "",
408 _isUncacheable ? "Unc" : "",
409 inService ? "InSvc" : "",
410 downstreamPending ? "DwnPend" : "",
411 pendingInvalidate ? "PendInv" : "",
412 pendingShared ? "PendShared" : "");
413
414 ccprintf(os, "%s Targets:\n", prefix);
415 targets->print(os, verbosity, prefix + " ");
416 if (!deferredTargets->empty()) {
417 ccprintf(os, "%s Deferred Targets:\n", prefix);
418 deferredTargets->print(os, verbosity, prefix + " ");
419 }
420 }
421
422 MSHR::~MSHR()
423 {
424 }