More Changes, working towards cache.cc compiling. Headers cleaned up.
[gem5.git] / src / mem / cache / miss / miss_queue.cc
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Ron Dreslinski
30 */
31
32 /**
33 * @file
34 * Miss and writeback queue definitions.
35 */
36
37 #include "cpu/exec_context.hh"
38 #include "cpu/smt.hh" //for maxThreadsPerCPU
39 #include "mem/cache/base_cache.hh"
40 #include "mem/cache/miss/miss_queue.hh"
41 #include "mem/cache/prefetch/base_prefetcher.hh"
42
43 using namespace std;
44
45 // simple constructor
46 /**
47 * @todo Remove the +16 from the write buffer constructor once we handle
48 * stalling on writebacks do to compression writes.
49 */
50 MissQueue::MissQueue(int numMSHRs, int numTargets, int write_buffers,
51 bool write_allocate, bool prefetch_miss)
52 : mq(numMSHRs, 4), wb(write_buffers,numMSHRs+1000), numMSHR(numMSHRs),
53 numTarget(numTargets), writeBuffers(write_buffers),
54 writeAllocate(write_allocate), order(0), prefetchMiss(prefetch_miss)
55 {
56 noTargetMSHR = NULL;
57 }
58
59 void
60 MissQueue::regStats(const string &name)
61 {
62 using namespace Stats;
63
64 writebacks
65 .init(maxThreadsPerCPU)
66 .name(name + ".writebacks")
67 .desc("number of writebacks")
68 .flags(total)
69 ;
70
71 // MSHR hit statistics
72 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
73 Packet::Command cmd = (Packet::Command)access_idx;
74 const string &cstr = cmd.toString();
75
76 mshr_hits[access_idx]
77 .init(maxThreadsPerCPU)
78 .name(name + "." + cstr + "_mshr_hits")
79 .desc("number of " + cstr + " MSHR hits")
80 .flags(total | nozero | nonan)
81 ;
82 }
83
84 demandMshrHits
85 .name(name + ".demand_mshr_hits")
86 .desc("number of demand (read+write) MSHR hits")
87 .flags(total)
88 ;
89 demandMshrHits = mshr_hits[Read] + mshr_hits[Write];
90
91 overallMshrHits
92 .name(name + ".overall_mshr_hits")
93 .desc("number of overall MSHR hits")
94 .flags(total)
95 ;
96 overallMshrHits = demandMshrHits + mshr_hits[Soft_Prefetch] +
97 mshr_hits[Hard_Prefetch];
98
99 // MSHR miss statistics
100 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
101 Packet::Command cmd = (Packet::CommandEnum)access_idx;
102 const string &cstr = cmd.toString();
103
104 mshr_misses[access_idx]
105 .init(maxThreadsPerCPU)
106 .name(name + "." + cstr + "_mshr_misses")
107 .desc("number of " + cstr + " MSHR misses")
108 .flags(total | nozero | nonan)
109 ;
110 }
111
112 demandMshrMisses
113 .name(name + ".demand_mshr_misses")
114 .desc("number of demand (read+write) MSHR misses")
115 .flags(total)
116 ;
117 demandMshrMisses = mshr_misses[Read] + mshr_misses[Write];
118
119 overallMshrMisses
120 .name(name + ".overall_mshr_misses")
121 .desc("number of overall MSHR misses")
122 .flags(total)
123 ;
124 overallMshrMisses = demandMshrMisses + mshr_misses[Soft_Prefetch] +
125 mshr_misses[Hard_Prefetch];
126
127 // MSHR miss latency statistics
128 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
129 Packet::Command cmd = (Packet::CommandEnum)access_idx;
130 const string &cstr = cmd.toString();
131
132 mshr_miss_latency[access_idx]
133 .init(maxThreadsPerCPU)
134 .name(name + "." + cstr + "_mshr_miss_latency")
135 .desc("number of " + cstr + " MSHR miss cycles")
136 .flags(total | nozero | nonan)
137 ;
138 }
139
140 demandMshrMissLatency
141 .name(name + ".demand_mshr_miss_latency")
142 .desc("number of demand (read+write) MSHR miss cycles")
143 .flags(total)
144 ;
145 demandMshrMissLatency = mshr_miss_latency[Read] + mshr_miss_latency[Write];
146
147 overallMshrMissLatency
148 .name(name + ".overall_mshr_miss_latency")
149 .desc("number of overall MSHR miss cycles")
150 .flags(total)
151 ;
152 overallMshrMissLatency = demandMshrMissLatency +
153 mshr_miss_latency[Soft_Prefetch] + mshr_miss_latency[Hard_Prefetch];
154
155 // MSHR uncacheable statistics
156 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
157 Packet::Command cmd = (Packet::CommandEnum)access_idx;
158 const string &cstr = cmd.toString();
159
160 mshr_uncacheable[access_idx]
161 .init(maxThreadsPerCPU)
162 .name(name + "." + cstr + "_mshr_uncacheable")
163 .desc("number of " + cstr + " MSHR uncacheable")
164 .flags(total | nozero | nonan)
165 ;
166 }
167
168 overallMshrUncacheable
169 .name(name + ".overall_mshr_uncacheable_misses")
170 .desc("number of overall MSHR uncacheable misses")
171 .flags(total)
172 ;
173 overallMshrUncacheable = mshr_uncacheable[Read] + mshr_uncacheable[Write]
174 + mshr_uncacheable[Soft_Prefetch] + mshr_uncacheable[Hard_Prefetch];
175
176 // MSHR miss latency statistics
177 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
178 Packet::Command cmd = (Packet::CommandEnum)access_idx;
179 const string &cstr = cmd.toString();
180
181 mshr_uncacheable_lat[access_idx]
182 .init(maxThreadsPerCPU)
183 .name(name + "." + cstr + "_mshr_uncacheable_latency")
184 .desc("number of " + cstr + " MSHR uncacheable cycles")
185 .flags(total | nozero | nonan)
186 ;
187 }
188
189 overallMshrUncacheableLatency
190 .name(name + ".overall_mshr_uncacheable_latency")
191 .desc("number of overall MSHR uncacheable cycles")
192 .flags(total)
193 ;
194 overallMshrUncacheableLatency = mshr_uncacheable_lat[Read]
195 + mshr_uncacheable_lat[Write] + mshr_uncacheable_lat[Soft_Prefetch]
196 + mshr_uncacheable_lat[Hard_Prefetch];
197
198 #if 0
199 // MSHR access formulas
200 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
201 Packet::Command cmd = (Packet::CommandEnum)access_idx;
202 const string &cstr = cmd.toString();
203
204 mshrAccesses[access_idx]
205 .name(name + "." + cstr + "_mshr_accesses")
206 .desc("number of " + cstr + " mshr accesses(hits+misses)")
207 .flags(total | nozero | nonan)
208 ;
209 mshrAccesses[access_idx] =
210 mshr_hits[access_idx] + mshr_misses[access_idx]
211 + mshr_uncacheable[access_idx];
212 }
213
214 demandMshrAccesses
215 .name(name + ".demand_mshr_accesses")
216 .desc("number of demand (read+write) mshr accesses")
217 .flags(total | nozero | nonan)
218 ;
219 demandMshrAccesses = demandMshrHits + demandMshrMisses;
220
221 overallMshrAccesses
222 .name(name + ".overall_mshr_accesses")
223 .desc("number of overall (read+write) mshr accesses")
224 .flags(total | nozero | nonan)
225 ;
226 overallMshrAccesses = overallMshrHits + overallMshrMisses
227 + overallMshrUncacheable;
228 #endif
229
230 // MSHR miss rate formulas
231 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
232 Packet::Command cmd = (Packet::CommandEnum)access_idx;
233 const string &cstr = cmd.toString();
234
235 mshrMissRate[access_idx]
236 .name(name + "." + cstr + "_mshr_miss_rate")
237 .desc("mshr miss rate for " + cstr + " accesses")
238 .flags(total | nozero | nonan)
239 ;
240
241 mshrMissRate[access_idx] =
242 mshr_misses[access_idx] / cache->accesses[access_idx];
243 }
244
245 demandMshrMissRate
246 .name(name + ".demand_mshr_miss_rate")
247 .desc("mshr miss rate for demand accesses")
248 .flags(total)
249 ;
250 demandMshrMissRate = demandMshrMisses / cache->demandAccesses;
251
252 overallMshrMissRate
253 .name(name + ".overall_mshr_miss_rate")
254 .desc("mshr miss rate for overall accesses")
255 .flags(total)
256 ;
257 overallMshrMissRate = overallMshrMisses / cache->overallAccesses;
258
259 // mshrMiss latency formulas
260 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
261 Packet::Command cmd = (Packet::CommandEnum)access_idx;
262 const string &cstr = cmd.toString();
263
264 avgMshrMissLatency[access_idx]
265 .name(name + "." + cstr + "_avg_mshr_miss_latency")
266 .desc("average " + cstr + " mshr miss latency")
267 .flags(total | nozero | nonan)
268 ;
269
270 avgMshrMissLatency[access_idx] =
271 mshr_miss_latency[access_idx] / mshr_misses[access_idx];
272 }
273
274 demandAvgMshrMissLatency
275 .name(name + ".demand_avg_mshr_miss_latency")
276 .desc("average overall mshr miss latency")
277 .flags(total)
278 ;
279 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
280
281 overallAvgMshrMissLatency
282 .name(name + ".overall_avg_mshr_miss_latency")
283 .desc("average overall mshr miss latency")
284 .flags(total)
285 ;
286 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
287
288 // mshrUncacheable latency formulas
289 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
290 Packet::Command cmd = (Packet::CommandEnum)access_idx;
291 const string &cstr = cmd.toString();
292
293 avgMshrUncacheableLatency[access_idx]
294 .name(name + "." + cstr + "_avg_mshr_uncacheable_latency")
295 .desc("average " + cstr + " mshr uncacheable latency")
296 .flags(total | nozero | nonan)
297 ;
298
299 avgMshrUncacheableLatency[access_idx] =
300 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
301 }
302
303 overallAvgMshrUncacheableLatency
304 .name(name + ".overall_avg_mshr_uncacheable_latency")
305 .desc("average overall mshr uncacheable latency")
306 .flags(total)
307 ;
308 overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable;
309
310 mshr_cap_events
311 .init(maxThreadsPerCPU)
312 .name(name + ".mshr_cap_events")
313 .desc("number of times MSHR cap was activated")
314 .flags(total)
315 ;
316
317 //software prefetching stats
318 soft_prefetch_mshr_full
319 .init(maxThreadsPerCPU)
320 .name(name + ".soft_prefetch_mshr_full")
321 .desc("number of mshr full events for SW prefetching instrutions")
322 .flags(total)
323 ;
324
325 mshr_no_allocate_misses
326 .name(name +".no_allocate_misses")
327 .desc("Number of misses that were no-allocate")
328 ;
329
330 }
331
332 void
333 MissQueue::setCache(BaseCache *_cache)
334 {
335 cache = _cache;
336 blkSize = cache->getBlockSize();
337 }
338
339 void
340 MissQueue::setPrefetcher(BasePrefetcher *_prefetcher)
341 {
342 prefetcher = _prefetcher;
343 }
344
345 MSHR*
346 MissQueue::allocateMiss(Packet * &pkt, int size, Tick time)
347 {
348 MSHR* mshr = mq.allocate(pkt, size);
349 mshr->order = order++;
350 if (!pkt->req->isUncacheable() ){//&& !pkt->isNoAllocate()) {
351 // Mark this as a cache line fill
352 mshr->pkt->flags |= CACHE_LINE_FILL;
353 }
354 if (mq.isFull()) {
355 cache->setBlocked(Blocked_NoMSHRs);
356 }
357 if (pkt->cmd != Hard_Prefetch) {
358 //If we need to request the bus (not on HW prefetch), do so
359 cache->setMasterRequest(Request_MSHR, time);
360 }
361 return mshr;
362 }
363
364
365 MSHR*
366 MissQueue::allocateWrite(Packet * &pkt, int size, Tick time)
367 {
368 MSHR* mshr = wb.allocate(pkt,pkt->size);
369 mshr->order = order++;
370 if (cache->doData()){
371 if (pkt->isCompressed()) {
372 delete [] mshr->pkt->data;
373 mshr->pkt->actualSize = pkt->actualSize;
374 mshr->pkt->data = new uint8_t[pkt->actualSize];
375 memcpy(mshr->pkt->data, pkt->data, pkt->actualSize);
376 } else {
377 memcpy(mshr->pkt->data, pkt->data, pkt->size);
378 }
379 }
380 if (wb.isFull()) {
381 cache->setBlocked(Blocked_NoWBBuffers);
382 }
383
384 cache->setMasterRequest(Request_WB, time);
385
386 return mshr;
387 }
388
389
390 /**
391 * @todo Remove SW prefetches on mshr hits.
392 */
393 void
394 MissQueue::handleMiss(Packet * &pkt, int blkSize, Tick time)
395 {
396 // if (!cache->isTopLevel())
397 if (prefetchMiss) prefetcher->handleMiss(pkt, time);
398
399 int size = blkSize;
400 Addr blkAddr = pkt->paddr & ~(Addr)(blkSize-1);
401 MSHR* mshr = NULL;
402 if (!pkt->req->isUncacheable()) {
403 mshr = mq.findMatch(blkAddr, pkt->req->asid);
404 if (mshr) {
405 //@todo remove hw_pf here
406 mshr_hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
407 if (mshr->getThreadNum() != pkt->req->getThreadNum()) {
408 mshr->setThreadNum() = -1;
409 }
410 mq.allocateTarget(mshr, pkt);
411 if (mshr->pkt->isNoAllocate() && !pkt->isNoAllocate()) {
412 //We are adding an allocate after a no-allocate
413 mshr->pkt->flags &= ~NO_ALLOCATE;
414 }
415 if (mshr->getNumTargets() == numTarget) {
416 noTargetMSHR = mshr;
417 cache->setBlocked(Blocked_NoTargets);
418 mq.moveToFront(mshr);
419 }
420 return;
421 }
422 if (pkt->isNoAllocate()) {
423 //Count no-allocate requests differently
424 mshr_no_allocate_misses++;
425 }
426 else {
427 mshr_misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
428 }
429 } else {
430 //Count uncacheable accesses
431 mshr_uncacheable[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
432 size = pkt->size;
433 }
434 if (pkt->cmd.isWrite() && (pkt->req->isUncacheable() || !writeAllocate ||
435 pkt->cmd.isNoResponse())) {
436 /**
437 * @todo Add write merging here.
438 */
439 mshr = allocateWrite(pkt, pkt->size, time);
440 return;
441 }
442
443 mshr = allocateMiss(pkt, size, time);
444 }
445
446 MSHR*
447 MissQueue::fetchBlock(Addr addr, int asid, int blk_size, Tick time,
448 Packet * &target)
449 {
450 Addr blkAddr = addr & ~(Addr)(blk_size - 1);
451 assert(mq.findMatch(addr, asid) == NULL);
452 MSHR *mshr = mq.allocateFetch(blkAddr, asid, blk_size, target);
453 mshr->order = order++;
454 mshr->pkt->flags |= CACHE_LINE_FILL;
455 if (mq.isFull()) {
456 cache->setBlocked(Blocked_NoMSHRs);
457 }
458 cache->setMasterRequest(Request_MSHR, time);
459 return mshr;
460 }
461
462 Packet *
463 MissQueue::getPacket()
464 {
465 Packet * pkt = mq.getReq();
466 if (((wb.isFull() && wb.inServiceMSHRs == 0) || !pkt ||
467 pkt->time > curTick) && wb.havePending()) {
468 pkt = wb.getReq();
469 // Need to search for earlier miss.
470 MSHR *mshr = mq.findPending(pkt);
471 if (mshr && mshr->order < pkt->senderState->order) {
472 // Service misses in order until conflict is cleared.
473 return mq.getReq();
474 }
475 }
476 if (pkt) {
477 MSHR* mshr = wb.findPending(pkt);
478 if (mshr /*&& mshr->order < pkt->senderState->order*/) {
479 // The only way this happens is if we are
480 // doing a write and we didn't have permissions
481 // then subsequently saw a writeback(owned got evicted)
482 // We need to make sure to perform the writeback first
483 // To preserve the dirty data, then we can issue the write
484 return wb.getReq();
485 }
486 }
487 else if (!mq.isFull()){
488 //If we have a miss queue slot, we can try a prefetch
489 pkt = prefetcher->getPacket();
490 if (pkt) {
491 //Update statistic on number of prefetches issued (hwpf_mshr_misses)
492 mshr_misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
493 //It will request the bus for the future, but should clear that immedieatley
494 allocateMiss(pkt, pkt->size, curTick);
495 pkt = mq.getReq();
496 assert(pkt); //We should get back a req b/c we just put one in
497 }
498 }
499 return pkt;
500 }
501
502 void
503 MissQueue::setBusCmd(Packet * &pkt, Packet::Command cmd)
504 {
505 assert(pkt->senderState != 0);
506 MSHR * mshr = pkt->senderState;
507 mshr->originalCmd = pkt->cmd;
508 if (pkt->isCacheFill() || pkt->isNoAllocate())
509 pkt->cmd = cmd;
510 }
511
512 void
513 MissQueue::restoreOrigCmd(Packet * &pkt)
514 {
515 pkt->cmd = pkt->senderState->originalCmd;
516 }
517
518 void
519 MissQueue::markInService(Packet * &pkt)
520 {
521 assert(pkt->senderState != 0);
522 bool unblock = false;
523 BlockedCause cause = NUM_BLOCKED_CAUSES;
524
525 /**
526 * @todo Should include MSHRQueue pointer in MSHR to select the correct
527 * one.
528 */
529 if ((!pkt->isCacheFill() && pkt->cmd.isWrite()) || pkt->cmd == Copy) {
530 // Forwarding a write/ writeback, don't need to change
531 // the command
532 unblock = wb.isFull();
533 wb.markInService(pkt->senderState);
534 if (!wb.havePending()){
535 cache->clearMasterRequest(Request_WB);
536 }
537 if (unblock) {
538 // Do we really unblock?
539 unblock = !wb.isFull();
540 cause = Blocked_NoWBBuffers;
541 }
542 } else {
543 unblock = mq.isFull();
544 mq.markInService(pkt->senderState);
545 if (!mq.havePending()){
546 cache->clearMasterRequest(Request_MSHR);
547 }
548 if (pkt->senderState->originalCmd == Hard_Prefetch) {
549 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
550 cache->name());
551 //Also clear pending if need be
552 if (!prefetcher->havePending())
553 {
554 cache->clearMasterRequest(Request_PF);
555 }
556 }
557 if (unblock) {
558 unblock = !mq.isFull();
559 cause = Blocked_NoMSHRs;
560 }
561 }
562 if (unblock) {
563 cache->clearBlocked(cause);
564 }
565 }
566
567
568 void
569 MissQueue::handleResponse(Packet * &pkt, Tick time)
570 {
571 MSHR* mshr = pkt->senderState;
572 if (pkt->senderState->originalCmd == Hard_Prefetch) {
573 DPRINTF(HWPrefetch, "%s:Handling the response to a HW_PF\n",
574 cache->name());
575 }
576 #ifndef NDEBUG
577 int num_targets = mshr->getNumTargets();
578 #endif
579
580 bool unblock = false;
581 bool unblock_target = false;
582 BlockedCause cause = NUM_BLOCKED_CAUSES;
583
584 if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
585 mshr_miss_latency[mshr->originalCmd][pkt->req->getThreadNum()] +=
586 curTick - pkt->time;
587 // targets were handled in the cache tags
588 if (mshr == noTargetMSHR) {
589 // we always clear at least one target
590 unblock_target = true;
591 cause = Blocked_NoTargets;
592 noTargetMSHR = NULL;
593 }
594
595 if (mshr->hasTargets()) {
596 // Didn't satisfy all the targets, need to resend
597 Packet::Command cmd = mshr->getTarget()->cmd;
598 mq.markPending(mshr, cmd);
599 mshr->order = order++;
600 cache->setMasterRequest(Request_MSHR, time);
601 }
602 else {
603 unblock = mq.isFull();
604 mq.deallocate(mshr);
605 if (unblock) {
606 unblock = !mq.isFull();
607 cause = Blocked_NoMSHRs;
608 }
609 }
610 } else {
611 if (pkt->req->isUncacheable()) {
612 mshr_uncacheable_lat[pkt->cmd][pkt->req->getThreadNum()] +=
613 curTick - pkt->time;
614 }
615 if (mshr->hasTargets() && pkt->req->isUncacheable()) {
616 // Should only have 1 target if we had any
617 assert(num_targets == 1);
618 Packet * target = mshr->getTarget();
619 mshr->popTarget();
620 if (cache->doData() && pkt->cmd.isRead()) {
621 memcpy(target->data, pkt->data, target->size);
622 }
623 cache->respond(target, time);
624 assert(!mshr->hasTargets());
625 }
626 else if (mshr->hasTargets()) {
627 //Must be a no_allocate with possibly more than one target
628 assert(mshr->pkt->isNoAllocate());
629 while (mshr->hasTargets()) {
630 Packet * target = mshr->getTarget();
631 mshr->popTarget();
632 if (cache->doData() && pkt->cmd.isRead()) {
633 memcpy(target->data, pkt->data, target->size);
634 }
635 cache->respond(target, time);
636 }
637 }
638
639 if (pkt->cmd.isWrite()) {
640 // If the wrtie buffer is full, we might unblock now
641 unblock = wb.isFull();
642 wb.deallocate(mshr);
643 if (unblock) {
644 // Did we really unblock?
645 unblock = !wb.isFull();
646 cause = Blocked_NoWBBuffers;
647 }
648 } else {
649 unblock = mq.isFull();
650 mq.deallocate(mshr);
651 if (unblock) {
652 unblock = !mq.isFull();
653 cause = Blocked_NoMSHRs;
654 }
655 }
656 }
657 if (unblock || unblock_target) {
658 cache->clearBlocked(cause);
659 }
660 }
661
662 void
663 MissQueue::squash(int req->getThreadNum()ber)
664 {
665 bool unblock = false;
666 BlockedCause cause = NUM_BLOCKED_CAUSES;
667
668 if (noTargetMSHR && noTargetMSHR->setThreadNum() == req->getThreadNum()ber) {
669 noTargetMSHR = NULL;
670 unblock = true;
671 cause = Blocked_NoTargets;
672 }
673 if (mq.isFull()) {
674 unblock = true;
675 cause = Blocked_NoMSHRs;
676 }
677 mq.squash(req->getThreadNum()ber);
678 if (!mq.havePending()) {
679 cache->clearMasterRequest(Request_MSHR);
680 }
681 if (unblock && !mq.isFull()) {
682 cache->clearBlocked(cause);
683 }
684
685 }
686
687 MSHR*
688 MissQueue::findMSHR(Addr addr, int asid) const
689 {
690 return mq.findMatch(addr,asid);
691 }
692
693 bool
694 MissQueue::findWrites(Addr addr, int asid, vector<MSHR*> &writes) const
695 {
696 return wb.findMatches(addr,asid,writes);
697 }
698
699 void
700 MissQueue::doWriteback(Addr addr, int asid,
701 int size, uint8_t *data, bool compressed)
702 {
703 // Generate request
704 Packet * pkt = buildWritebackReq(addr, asid, size, data,
705 compressed);
706
707 writebacks[pkt->req->getThreadNum()]++;
708
709 allocateWrite(pkt, 0, curTick);
710 }
711
712
713 void
714 MissQueue::doWriteback(Packet * &pkt)
715 {
716 writebacks[pkt->req->getThreadNum()]++;
717 allocateWrite(pkt, 0, curTick);
718 }
719
720
721 MSHR*
722 MissQueue::allocateTargetList(Addr addr, int asid)
723 {
724 MSHR* mshr = mq.allocateTargetList(addr, asid, blkSize);
725 mshr->pkt->flags |= CACHE_LINE_FILL;
726 if (mq.isFull()) {
727 cache->setBlocked(Blocked_NoMSHRs);
728 }
729 return mshr;
730 }
731
732 bool
733 MissQueue::havePending()
734 {
735 return mq.havePending() || wb.havePending() || prefetcher->havePending();
736 }