2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
34 * Miss and writeback queue definitions.
37 #include "cpu/exec_context.hh"
38 #include "cpu/smt.hh" //for maxThreadsPerCPU
39 #include "mem/cache/base_cache.hh"
40 #include "mem/cache/miss/miss_queue.hh"
41 #include "mem/cache/prefetch/base_prefetcher.hh"
47 * @todo Remove the +16 from the write buffer constructor once we handle
48 * stalling on writebacks do to compression writes.
50 MissQueue::MissQueue(int numMSHRs
, int numTargets
, int write_buffers
,
51 bool write_allocate
, bool prefetch_miss
)
52 : mq(numMSHRs
, 4), wb(write_buffers
,numMSHRs
+1000), numMSHR(numMSHRs
),
53 numTarget(numTargets
), writeBuffers(write_buffers
),
54 writeAllocate(write_allocate
), order(0), prefetchMiss(prefetch_miss
)
60 MissQueue::regStats(const string
&name
)
62 using namespace Stats
;
65 .init(maxThreadsPerCPU
)
66 .name(name
+ ".writebacks")
67 .desc("number of writebacks")
71 // MSHR hit statistics
72 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
73 Packet::Command cmd
= (Packet::Command
)access_idx
;
74 const string
&cstr
= cmd
.toString();
77 .init(maxThreadsPerCPU
)
78 .name(name
+ "." + cstr
+ "_mshr_hits")
79 .desc("number of " + cstr
+ " MSHR hits")
80 .flags(total
| nozero
| nonan
)
85 .name(name
+ ".demand_mshr_hits")
86 .desc("number of demand (read+write) MSHR hits")
89 demandMshrHits
= mshr_hits
[Read
] + mshr_hits
[Write
];
92 .name(name
+ ".overall_mshr_hits")
93 .desc("number of overall MSHR hits")
96 overallMshrHits
= demandMshrHits
+ mshr_hits
[Soft_Prefetch
] +
97 mshr_hits
[Hard_Prefetch
];
99 // MSHR miss statistics
100 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
101 Packet::Command cmd
= (Packet::CommandEnum
)access_idx
;
102 const string
&cstr
= cmd
.toString();
104 mshr_misses
[access_idx
]
105 .init(maxThreadsPerCPU
)
106 .name(name
+ "." + cstr
+ "_mshr_misses")
107 .desc("number of " + cstr
+ " MSHR misses")
108 .flags(total
| nozero
| nonan
)
113 .name(name
+ ".demand_mshr_misses")
114 .desc("number of demand (read+write) MSHR misses")
117 demandMshrMisses
= mshr_misses
[Read
] + mshr_misses
[Write
];
120 .name(name
+ ".overall_mshr_misses")
121 .desc("number of overall MSHR misses")
124 overallMshrMisses
= demandMshrMisses
+ mshr_misses
[Soft_Prefetch
] +
125 mshr_misses
[Hard_Prefetch
];
127 // MSHR miss latency statistics
128 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
129 Packet::Command cmd
= (Packet::CommandEnum
)access_idx
;
130 const string
&cstr
= cmd
.toString();
132 mshr_miss_latency
[access_idx
]
133 .init(maxThreadsPerCPU
)
134 .name(name
+ "." + cstr
+ "_mshr_miss_latency")
135 .desc("number of " + cstr
+ " MSHR miss cycles")
136 .flags(total
| nozero
| nonan
)
140 demandMshrMissLatency
141 .name(name
+ ".demand_mshr_miss_latency")
142 .desc("number of demand (read+write) MSHR miss cycles")
145 demandMshrMissLatency
= mshr_miss_latency
[Read
] + mshr_miss_latency
[Write
];
147 overallMshrMissLatency
148 .name(name
+ ".overall_mshr_miss_latency")
149 .desc("number of overall MSHR miss cycles")
152 overallMshrMissLatency
= demandMshrMissLatency
+
153 mshr_miss_latency
[Soft_Prefetch
] + mshr_miss_latency
[Hard_Prefetch
];
155 // MSHR uncacheable statistics
156 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
157 Packet::Command cmd
= (Packet::CommandEnum
)access_idx
;
158 const string
&cstr
= cmd
.toString();
160 mshr_uncacheable
[access_idx
]
161 .init(maxThreadsPerCPU
)
162 .name(name
+ "." + cstr
+ "_mshr_uncacheable")
163 .desc("number of " + cstr
+ " MSHR uncacheable")
164 .flags(total
| nozero
| nonan
)
168 overallMshrUncacheable
169 .name(name
+ ".overall_mshr_uncacheable_misses")
170 .desc("number of overall MSHR uncacheable misses")
173 overallMshrUncacheable
= mshr_uncacheable
[Read
] + mshr_uncacheable
[Write
]
174 + mshr_uncacheable
[Soft_Prefetch
] + mshr_uncacheable
[Hard_Prefetch
];
176 // MSHR miss latency statistics
177 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
178 Packet::Command cmd
= (Packet::CommandEnum
)access_idx
;
179 const string
&cstr
= cmd
.toString();
181 mshr_uncacheable_lat
[access_idx
]
182 .init(maxThreadsPerCPU
)
183 .name(name
+ "." + cstr
+ "_mshr_uncacheable_latency")
184 .desc("number of " + cstr
+ " MSHR uncacheable cycles")
185 .flags(total
| nozero
| nonan
)
189 overallMshrUncacheableLatency
190 .name(name
+ ".overall_mshr_uncacheable_latency")
191 .desc("number of overall MSHR uncacheable cycles")
194 overallMshrUncacheableLatency
= mshr_uncacheable_lat
[Read
]
195 + mshr_uncacheable_lat
[Write
] + mshr_uncacheable_lat
[Soft_Prefetch
]
196 + mshr_uncacheable_lat
[Hard_Prefetch
];
199 // MSHR access formulas
200 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
201 Packet::Command cmd
= (Packet::CommandEnum
)access_idx
;
202 const string
&cstr
= cmd
.toString();
204 mshrAccesses
[access_idx
]
205 .name(name
+ "." + cstr
+ "_mshr_accesses")
206 .desc("number of " + cstr
+ " mshr accesses(hits+misses)")
207 .flags(total
| nozero
| nonan
)
209 mshrAccesses
[access_idx
] =
210 mshr_hits
[access_idx
] + mshr_misses
[access_idx
]
211 + mshr_uncacheable
[access_idx
];
215 .name(name
+ ".demand_mshr_accesses")
216 .desc("number of demand (read+write) mshr accesses")
217 .flags(total
| nozero
| nonan
)
219 demandMshrAccesses
= demandMshrHits
+ demandMshrMisses
;
222 .name(name
+ ".overall_mshr_accesses")
223 .desc("number of overall (read+write) mshr accesses")
224 .flags(total
| nozero
| nonan
)
226 overallMshrAccesses
= overallMshrHits
+ overallMshrMisses
227 + overallMshrUncacheable
;
230 // MSHR miss rate formulas
231 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
232 Packet::Command cmd
= (Packet::CommandEnum
)access_idx
;
233 const string
&cstr
= cmd
.toString();
235 mshrMissRate
[access_idx
]
236 .name(name
+ "." + cstr
+ "_mshr_miss_rate")
237 .desc("mshr miss rate for " + cstr
+ " accesses")
238 .flags(total
| nozero
| nonan
)
241 mshrMissRate
[access_idx
] =
242 mshr_misses
[access_idx
] / cache
->accesses
[access_idx
];
246 .name(name
+ ".demand_mshr_miss_rate")
247 .desc("mshr miss rate for demand accesses")
250 demandMshrMissRate
= demandMshrMisses
/ cache
->demandAccesses
;
253 .name(name
+ ".overall_mshr_miss_rate")
254 .desc("mshr miss rate for overall accesses")
257 overallMshrMissRate
= overallMshrMisses
/ cache
->overallAccesses
;
259 // mshrMiss latency formulas
260 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
261 Packet::Command cmd
= (Packet::CommandEnum
)access_idx
;
262 const string
&cstr
= cmd
.toString();
264 avgMshrMissLatency
[access_idx
]
265 .name(name
+ "." + cstr
+ "_avg_mshr_miss_latency")
266 .desc("average " + cstr
+ " mshr miss latency")
267 .flags(total
| nozero
| nonan
)
270 avgMshrMissLatency
[access_idx
] =
271 mshr_miss_latency
[access_idx
] / mshr_misses
[access_idx
];
274 demandAvgMshrMissLatency
275 .name(name
+ ".demand_avg_mshr_miss_latency")
276 .desc("average overall mshr miss latency")
279 demandAvgMshrMissLatency
= demandMshrMissLatency
/ demandMshrMisses
;
281 overallAvgMshrMissLatency
282 .name(name
+ ".overall_avg_mshr_miss_latency")
283 .desc("average overall mshr miss latency")
286 overallAvgMshrMissLatency
= overallMshrMissLatency
/ overallMshrMisses
;
288 // mshrUncacheable latency formulas
289 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
290 Packet::Command cmd
= (Packet::CommandEnum
)access_idx
;
291 const string
&cstr
= cmd
.toString();
293 avgMshrUncacheableLatency
[access_idx
]
294 .name(name
+ "." + cstr
+ "_avg_mshr_uncacheable_latency")
295 .desc("average " + cstr
+ " mshr uncacheable latency")
296 .flags(total
| nozero
| nonan
)
299 avgMshrUncacheableLatency
[access_idx
] =
300 mshr_uncacheable_lat
[access_idx
] / mshr_uncacheable
[access_idx
];
303 overallAvgMshrUncacheableLatency
304 .name(name
+ ".overall_avg_mshr_uncacheable_latency")
305 .desc("average overall mshr uncacheable latency")
308 overallAvgMshrUncacheableLatency
= overallMshrUncacheableLatency
/ overallMshrUncacheable
;
311 .init(maxThreadsPerCPU
)
312 .name(name
+ ".mshr_cap_events")
313 .desc("number of times MSHR cap was activated")
317 //software prefetching stats
318 soft_prefetch_mshr_full
319 .init(maxThreadsPerCPU
)
320 .name(name
+ ".soft_prefetch_mshr_full")
321 .desc("number of mshr full events for SW prefetching instrutions")
325 mshr_no_allocate_misses
326 .name(name
+".no_allocate_misses")
327 .desc("Number of misses that were no-allocate")
333 MissQueue::setCache(BaseCache
*_cache
)
336 blkSize
= cache
->getBlockSize();
340 MissQueue::setPrefetcher(BasePrefetcher
*_prefetcher
)
342 prefetcher
= _prefetcher
;
346 MissQueue::allocateMiss(Packet
* &pkt
, int size
, Tick time
)
348 MSHR
* mshr
= mq
.allocate(pkt
, size
);
349 mshr
->order
= order
++;
350 if (!pkt
->isUncacheable() ){//&& !pkt->isNoAllocate()) {
351 // Mark this as a cache line fill
352 mshr
->pkt
->flags
|= CACHE_LINE_FILL
;
355 cache
->setBlocked(Blocked_NoMSHRs
);
357 if (pkt
->cmd
!= Hard_Prefetch
) {
358 //If we need to request the bus (not on HW prefetch), do so
359 cache
->setMasterRequest(Request_MSHR
, time
);
366 MissQueue::allocateWrite(Packet
* &pkt
, int size
, Tick time
)
368 MSHR
* mshr
= wb
.allocate(pkt
,pkt
->size
);
369 mshr
->order
= order
++;
370 if (cache
->doData()){
371 if (pkt
->isCompressed()) {
372 delete [] mshr
->pkt
->data
;
373 mshr
->pkt
->actualSize
= pkt
->actualSize
;
374 mshr
->pkt
->data
= new uint8_t[pkt
->actualSize
];
375 memcpy(mshr
->pkt
->data
, pkt
->data
, pkt
->actualSize
);
377 memcpy(mshr
->pkt
->data
, pkt
->data
, pkt
->size
);
381 cache
->setBlocked(Blocked_NoWBBuffers
);
384 cache
->setMasterRequest(Request_WB
, time
);
391 * @todo Remove SW prefetches on mshr hits.
394 MissQueue::handleMiss(Packet
* &pkt
, int blkSize
, Tick time
)
396 // if (!cache->isTopLevel())
397 if (prefetchMiss
) prefetcher
->handleMiss(pkt
, time
);
400 Addr blkAddr
= pkt
->paddr
& ~(Addr
)(blkSize
-1);
402 if (!pkt
->isUncacheable()) {
403 mshr
= mq
.findMatch(blkAddr
, pkt
->req
->asid
);
405 //@todo remove hw_pf here
406 mshr_hits
[pkt
->cmd
.toIndex()][pkt
->thread_num
]++;
407 if (mshr
->threadNum
!= pkt
->thread_num
) {
408 mshr
->threadNum
= -1;
410 mq
.allocateTarget(mshr
, pkt
);
411 if (mshr
->pkt
->isNoAllocate() && !pkt
->isNoAllocate()) {
412 //We are adding an allocate after a no-allocate
413 mshr
->pkt
->flags
&= ~NO_ALLOCATE
;
415 if (mshr
->getNumTargets() == numTarget
) {
417 cache
->setBlocked(Blocked_NoTargets
);
418 mq
.moveToFront(mshr
);
422 if (pkt
->isNoAllocate()) {
423 //Count no-allocate requests differently
424 mshr_no_allocate_misses
++;
427 mshr_misses
[pkt
->cmd
.toIndex()][pkt
->thread_num
]++;
430 //Count uncacheable accesses
431 mshr_uncacheable
[pkt
->cmd
.toIndex()][pkt
->thread_num
]++;
434 if (pkt
->cmd
.isWrite() && (pkt
->isUncacheable() || !writeAllocate
||
435 pkt
->cmd
.isNoResponse())) {
437 * @todo Add write merging here.
439 mshr
= allocateWrite(pkt
, pkt
->size
, time
);
443 mshr
= allocateMiss(pkt
, size
, time
);
447 MissQueue::fetchBlock(Addr addr
, int asid
, int blk_size
, Tick time
,
450 Addr blkAddr
= addr
& ~(Addr
)(blk_size
- 1);
451 assert(mq
.findMatch(addr
, asid
) == NULL
);
452 MSHR
*mshr
= mq
.allocateFetch(blkAddr
, asid
, blk_size
, target
);
453 mshr
->order
= order
++;
454 mshr
->pkt
->flags
|= CACHE_LINE_FILL
;
456 cache
->setBlocked(Blocked_NoMSHRs
);
458 cache
->setMasterRequest(Request_MSHR
, time
);
463 MissQueue::getPacket()
465 Packet
* pkt
= mq
.getReq();
466 if (((wb
.isFull() && wb
.inServiceMSHRs
== 0) || !pkt
||
467 pkt
->time
> curTick
) && wb
.havePending()) {
469 // Need to search for earlier miss.
470 MSHR
*mshr
= mq
.findPending(pkt
);
471 if (mshr
&& mshr
->order
< pkt
->senderState
->order
) {
472 // Service misses in order until conflict is cleared.
477 MSHR
* mshr
= wb
.findPending(pkt
);
478 if (mshr
/*&& mshr->order < pkt->senderState->order*/) {
479 // The only way this happens is if we are
480 // doing a write and we didn't have permissions
481 // then subsequently saw a writeback(owned got evicted)
482 // We need to make sure to perform the writeback first
483 // To preserve the dirty data, then we can issue the write
487 else if (!mq
.isFull()){
488 //If we have a miss queue slot, we can try a prefetch
489 pkt
= prefetcher
->getPacket();
491 //Update statistic on number of prefetches issued (hwpf_mshr_misses)
492 mshr_misses
[pkt
->cmd
.toIndex()][pkt
->thread_num
]++;
493 //It will request the bus for the future, but should clear that immedieatley
494 allocateMiss(pkt
, pkt
->size
, curTick
);
496 assert(pkt
); //We should get back a req b/c we just put one in
503 MissQueue::setBusCmd(Packet
* &pkt
, Packet::Command cmd
)
505 assert(pkt
->senderState
!= 0);
506 MSHR
* mshr
= pkt
->senderState
;
507 mshr
->originalCmd
= pkt
->cmd
;
508 if (pkt
->isCacheFill() || pkt
->isNoAllocate())
513 MissQueue::restoreOrigCmd(Packet
* &pkt
)
515 pkt
->cmd
= pkt
->senderState
->originalCmd
;
519 MissQueue::markInService(Packet
* &pkt
)
521 assert(pkt
->senderState
!= 0);
522 bool unblock
= false;
523 BlockedCause cause
= NUM_BLOCKED_CAUSES
;
526 * @todo Should include MSHRQueue pointer in MSHR to select the correct
529 if ((!pkt
->isCacheFill() && pkt
->cmd
.isWrite()) || pkt
->cmd
== Copy
) {
530 // Forwarding a write/ writeback, don't need to change
532 unblock
= wb
.isFull();
533 wb
.markInService(pkt
->senderState
);
534 if (!wb
.havePending()){
535 cache
->clearMasterRequest(Request_WB
);
538 // Do we really unblock?
539 unblock
= !wb
.isFull();
540 cause
= Blocked_NoWBBuffers
;
543 unblock
= mq
.isFull();
544 mq
.markInService(pkt
->senderState
);
545 if (!mq
.havePending()){
546 cache
->clearMasterRequest(Request_MSHR
);
548 if (pkt
->senderState
->originalCmd
== Hard_Prefetch
) {
549 DPRINTF(HWPrefetch
, "%s:Marking a HW_PF in service\n",
551 //Also clear pending if need be
552 if (!prefetcher
->havePending())
554 cache
->clearMasterRequest(Request_PF
);
558 unblock
= !mq
.isFull();
559 cause
= Blocked_NoMSHRs
;
563 cache
->clearBlocked(cause
);
569 MissQueue::handleResponse(Packet
* &pkt
, Tick time
)
571 MSHR
* mshr
= pkt
->senderState
;
572 if (pkt
->senderState
->originalCmd
== Hard_Prefetch
) {
573 DPRINTF(HWPrefetch
, "%s:Handling the response to a HW_PF\n",
577 int num_targets
= mshr
->getNumTargets();
580 bool unblock
= false;
581 bool unblock_target
= false;
582 BlockedCause cause
= NUM_BLOCKED_CAUSES
;
584 if (pkt
->isCacheFill() && !pkt
->isNoAllocate()) {
585 mshr_miss_latency
[mshr
->originalCmd
][pkt
->thread_num
] +=
587 // targets were handled in the cache tags
588 if (mshr
== noTargetMSHR
) {
589 // we always clear at least one target
590 unblock_target
= true;
591 cause
= Blocked_NoTargets
;
595 if (mshr
->hasTargets()) {
596 // Didn't satisfy all the targets, need to resend
597 Packet::Command cmd
= mshr
->getTarget()->cmd
;
598 mq
.markPending(mshr
, cmd
);
599 mshr
->order
= order
++;
600 cache
->setMasterRequest(Request_MSHR
, time
);
603 unblock
= mq
.isFull();
606 unblock
= !mq
.isFull();
607 cause
= Blocked_NoMSHRs
;
611 if (pkt
->isUncacheable()) {
612 mshr_uncacheable_lat
[pkt
->cmd
][pkt
->thread_num
] +=
615 if (mshr
->hasTargets() && pkt
->isUncacheable()) {
616 // Should only have 1 target if we had any
617 assert(num_targets
== 1);
618 Packet
* target
= mshr
->getTarget();
620 if (cache
->doData() && pkt
->cmd
.isRead()) {
621 memcpy(target
->data
, pkt
->data
, target
->size
);
623 cache
->respond(target
, time
);
624 assert(!mshr
->hasTargets());
626 else if (mshr
->hasTargets()) {
627 //Must be a no_allocate with possibly more than one target
628 assert(mshr
->pkt
->isNoAllocate());
629 while (mshr
->hasTargets()) {
630 Packet
* target
= mshr
->getTarget();
632 if (cache
->doData() && pkt
->cmd
.isRead()) {
633 memcpy(target
->data
, pkt
->data
, target
->size
);
635 cache
->respond(target
, time
);
639 if (pkt
->cmd
.isWrite()) {
640 // If the wrtie buffer is full, we might unblock now
641 unblock
= wb
.isFull();
644 // Did we really unblock?
645 unblock
= !wb
.isFull();
646 cause
= Blocked_NoWBBuffers
;
649 unblock
= mq
.isFull();
652 unblock
= !mq
.isFull();
653 cause
= Blocked_NoMSHRs
;
657 if (unblock
|| unblock_target
) {
658 cache
->clearBlocked(cause
);
663 MissQueue::squash(int thread_number
)
665 bool unblock
= false;
666 BlockedCause cause
= NUM_BLOCKED_CAUSES
;
668 if (noTargetMSHR
&& noTargetMSHR
->threadNum
== thread_number
) {
671 cause
= Blocked_NoTargets
;
675 cause
= Blocked_NoMSHRs
;
677 mq
.squash(thread_number
);
678 if (!mq
.havePending()) {
679 cache
->clearMasterRequest(Request_MSHR
);
681 if (unblock
&& !mq
.isFull()) {
682 cache
->clearBlocked(cause
);
688 MissQueue::findMSHR(Addr addr
, int asid
) const
690 return mq
.findMatch(addr
,asid
);
694 MissQueue::findWrites(Addr addr
, int asid
, vector
<MSHR
*> &writes
) const
696 return wb
.findMatches(addr
,asid
,writes
);
700 MissQueue::doWriteback(Addr addr
, int asid
,
701 int size
, uint8_t *data
, bool compressed
)
704 Packet
* pkt
= buildWritebackReq(addr
, asid
, size
, data
,
707 writebacks
[pkt
->thread_num
]++;
709 allocateWrite(pkt
, 0, curTick
);
714 MissQueue::doWriteback(Packet
* &pkt
)
716 writebacks
[pkt
->thread_num
]++;
717 allocateWrite(pkt
, 0, curTick
);
722 MissQueue::allocateTargetList(Addr addr
, int asid
)
724 MSHR
* mshr
= mq
.allocateTargetList(addr
, asid
, blkSize
);
725 mshr
->pkt
->flags
|= CACHE_LINE_FILL
;
727 cache
->setBlocked(Blocked_NoMSHRs
);
733 MissQueue::havePending()
735 return mq
.havePending() || wb
.havePending() || prefetcher
->havePending();