2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
34 * Miss and writeback queue definitions.
37 #include "cpu/smt.hh" //for maxThreadsPerCPU
38 #include "mem/cache/base_cache.hh"
39 #include "mem/cache/miss/miss_queue.hh"
40 #include "mem/cache/prefetch/base_prefetcher.hh"
46 * @todo Remove the +16 from the write buffer constructor once we handle
47 * stalling on writebacks do to compression writes.
49 MissQueue::MissQueue(int numMSHRs
, int numTargets
, int write_buffers
,
50 bool write_allocate
, bool prefetch_miss
)
51 : mq(numMSHRs
, 4), wb(write_buffers
,numMSHRs
+1000), numMSHR(numMSHRs
),
52 numTarget(numTargets
), writeBuffers(write_buffers
),
53 writeAllocate(write_allocate
), order(0), prefetchMiss(prefetch_miss
)
59 MissQueue::regStats(const string
&name
)
61 Request
temp_req((Addr
) NULL
, 4, 0);
62 Packet::Command temp_cmd
= Packet::ReadReq
;
63 Packet
temp_pkt(&temp_req
, temp_cmd
, 0); //@todo FIx command strings so this isn't neccessary
66 using namespace Stats
;
69 .init(maxThreadsPerCPU
)
70 .name(name
+ ".writebacks")
71 .desc("number of writebacks")
75 // MSHR hit statistics
76 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
77 Packet::Command cmd
= (Packet::Command
)access_idx
;
78 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
81 .init(maxThreadsPerCPU
)
82 .name(name
+ "." + cstr
+ "_mshr_hits")
83 .desc("number of " + cstr
+ " MSHR hits")
84 .flags(total
| nozero
| nonan
)
89 .name(name
+ ".demand_mshr_hits")
90 .desc("number of demand (read+write) MSHR hits")
93 demandMshrHits
= mshr_hits
[Packet::ReadReq
] + mshr_hits
[Packet::WriteReq
];
96 .name(name
+ ".overall_mshr_hits")
97 .desc("number of overall MSHR hits")
100 overallMshrHits
= demandMshrHits
+ mshr_hits
[Packet::SoftPFReq
] +
101 mshr_hits
[Packet::HardPFReq
];
103 // MSHR miss statistics
104 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
105 Packet::Command cmd
= (Packet::Command
)access_idx
;
106 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
108 mshr_misses
[access_idx
]
109 .init(maxThreadsPerCPU
)
110 .name(name
+ "." + cstr
+ "_mshr_misses")
111 .desc("number of " + cstr
+ " MSHR misses")
112 .flags(total
| nozero
| nonan
)
117 .name(name
+ ".demand_mshr_misses")
118 .desc("number of demand (read+write) MSHR misses")
121 demandMshrMisses
= mshr_misses
[Packet::ReadReq
] + mshr_misses
[Packet::WriteReq
];
124 .name(name
+ ".overall_mshr_misses")
125 .desc("number of overall MSHR misses")
128 overallMshrMisses
= demandMshrMisses
+ mshr_misses
[Packet::SoftPFReq
] +
129 mshr_misses
[Packet::HardPFReq
];
131 // MSHR miss latency statistics
132 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
133 Packet::Command cmd
= (Packet::Command
)access_idx
;
134 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
136 mshr_miss_latency
[access_idx
]
137 .init(maxThreadsPerCPU
)
138 .name(name
+ "." + cstr
+ "_mshr_miss_latency")
139 .desc("number of " + cstr
+ " MSHR miss cycles")
140 .flags(total
| nozero
| nonan
)
144 demandMshrMissLatency
145 .name(name
+ ".demand_mshr_miss_latency")
146 .desc("number of demand (read+write) MSHR miss cycles")
149 demandMshrMissLatency
= mshr_miss_latency
[Packet::ReadReq
]
150 + mshr_miss_latency
[Packet::WriteReq
];
152 overallMshrMissLatency
153 .name(name
+ ".overall_mshr_miss_latency")
154 .desc("number of overall MSHR miss cycles")
157 overallMshrMissLatency
= demandMshrMissLatency
+
158 mshr_miss_latency
[Packet::SoftPFReq
] + mshr_miss_latency
[Packet::HardPFReq
];
160 // MSHR uncacheable statistics
161 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
162 Packet::Command cmd
= (Packet::Command
)access_idx
;
163 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
165 mshr_uncacheable
[access_idx
]
166 .init(maxThreadsPerCPU
)
167 .name(name
+ "." + cstr
+ "_mshr_uncacheable")
168 .desc("number of " + cstr
+ " MSHR uncacheable")
169 .flags(total
| nozero
| nonan
)
173 overallMshrUncacheable
174 .name(name
+ ".overall_mshr_uncacheable_misses")
175 .desc("number of overall MSHR uncacheable misses")
178 overallMshrUncacheable
= mshr_uncacheable
[Packet::ReadReq
]
179 + mshr_uncacheable
[Packet::WriteReq
] + mshr_uncacheable
[Packet::SoftPFReq
]
180 + mshr_uncacheable
[Packet::HardPFReq
];
182 // MSHR miss latency statistics
183 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
184 Packet::Command cmd
= (Packet::Command
)access_idx
;
185 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
187 mshr_uncacheable_lat
[access_idx
]
188 .init(maxThreadsPerCPU
)
189 .name(name
+ "." + cstr
+ "_mshr_uncacheable_latency")
190 .desc("number of " + cstr
+ " MSHR uncacheable cycles")
191 .flags(total
| nozero
| nonan
)
195 overallMshrUncacheableLatency
196 .name(name
+ ".overall_mshr_uncacheable_latency")
197 .desc("number of overall MSHR uncacheable cycles")
200 overallMshrUncacheableLatency
= mshr_uncacheable_lat
[Packet::ReadReq
]
201 + mshr_uncacheable_lat
[Packet::WriteReq
]
202 + mshr_uncacheable_lat
[Packet::SoftPFReq
]
203 + mshr_uncacheable_lat
[Packet::HardPFReq
];
206 // MSHR access formulas
207 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
208 Packet::Command cmd
= (Packet::Command
)access_idx
;
209 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
211 mshrAccesses
[access_idx
]
212 .name(name
+ "." + cstr
+ "_mshr_accesses")
213 .desc("number of " + cstr
+ " mshr accesses(hits+misses)")
214 .flags(total
| nozero
| nonan
)
216 mshrAccesses
[access_idx
] =
217 mshr_hits
[access_idx
] + mshr_misses
[access_idx
]
218 + mshr_uncacheable
[access_idx
];
222 .name(name
+ ".demand_mshr_accesses")
223 .desc("number of demand (read+write) mshr accesses")
224 .flags(total
| nozero
| nonan
)
226 demandMshrAccesses
= demandMshrHits
+ demandMshrMisses
;
229 .name(name
+ ".overall_mshr_accesses")
230 .desc("number of overall (read+write) mshr accesses")
231 .flags(total
| nozero
| nonan
)
233 overallMshrAccesses
= overallMshrHits
+ overallMshrMisses
234 + overallMshrUncacheable
;
237 // MSHR miss rate formulas
238 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
239 Packet::Command cmd
= (Packet::Command
)access_idx
;
240 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
242 mshrMissRate
[access_idx
]
243 .name(name
+ "." + cstr
+ "_mshr_miss_rate")
244 .desc("mshr miss rate for " + cstr
+ " accesses")
245 .flags(total
| nozero
| nonan
)
248 mshrMissRate
[access_idx
] =
249 mshr_misses
[access_idx
] / cache
->accesses
[access_idx
];
253 .name(name
+ ".demand_mshr_miss_rate")
254 .desc("mshr miss rate for demand accesses")
257 demandMshrMissRate
= demandMshrMisses
/ cache
->demandAccesses
;
260 .name(name
+ ".overall_mshr_miss_rate")
261 .desc("mshr miss rate for overall accesses")
264 overallMshrMissRate
= overallMshrMisses
/ cache
->overallAccesses
;
266 // mshrMiss latency formulas
267 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
268 Packet::Command cmd
= (Packet::Command
)access_idx
;
269 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
271 avgMshrMissLatency
[access_idx
]
272 .name(name
+ "." + cstr
+ "_avg_mshr_miss_latency")
273 .desc("average " + cstr
+ " mshr miss latency")
274 .flags(total
| nozero
| nonan
)
277 avgMshrMissLatency
[access_idx
] =
278 mshr_miss_latency
[access_idx
] / mshr_misses
[access_idx
];
281 demandAvgMshrMissLatency
282 .name(name
+ ".demand_avg_mshr_miss_latency")
283 .desc("average overall mshr miss latency")
286 demandAvgMshrMissLatency
= demandMshrMissLatency
/ demandMshrMisses
;
288 overallAvgMshrMissLatency
289 .name(name
+ ".overall_avg_mshr_miss_latency")
290 .desc("average overall mshr miss latency")
293 overallAvgMshrMissLatency
= overallMshrMissLatency
/ overallMshrMisses
;
295 // mshrUncacheable latency formulas
296 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
297 Packet::Command cmd
= (Packet::Command
)access_idx
;
298 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
300 avgMshrUncacheableLatency
[access_idx
]
301 .name(name
+ "." + cstr
+ "_avg_mshr_uncacheable_latency")
302 .desc("average " + cstr
+ " mshr uncacheable latency")
303 .flags(total
| nozero
| nonan
)
306 avgMshrUncacheableLatency
[access_idx
] =
307 mshr_uncacheable_lat
[access_idx
] / mshr_uncacheable
[access_idx
];
310 overallAvgMshrUncacheableLatency
311 .name(name
+ ".overall_avg_mshr_uncacheable_latency")
312 .desc("average overall mshr uncacheable latency")
315 overallAvgMshrUncacheableLatency
= overallMshrUncacheableLatency
/ overallMshrUncacheable
;
318 .init(maxThreadsPerCPU
)
319 .name(name
+ ".mshr_cap_events")
320 .desc("number of times MSHR cap was activated")
324 //software prefetching stats
325 soft_prefetch_mshr_full
326 .init(maxThreadsPerCPU
)
327 .name(name
+ ".soft_prefetch_mshr_full")
328 .desc("number of mshr full events for SW prefetching instrutions")
332 mshr_no_allocate_misses
333 .name(name
+".no_allocate_misses")
334 .desc("Number of misses that were no-allocate")
340 MissQueue::setCache(BaseCache
*_cache
)
343 blkSize
= cache
->getBlockSize();
347 MissQueue::setPrefetcher(BasePrefetcher
*_prefetcher
)
349 prefetcher
= _prefetcher
;
353 MissQueue::allocateMiss(Packet
* &pkt
, int size
, Tick time
)
355 MSHR
* mshr
= mq
.allocate(pkt
, blkSize
);
356 mshr
->order
= order
++;
357 if (!pkt
->req
->isUncacheable() ){//&& !pkt->isNoAllocate()) {
358 // Mark this as a cache line fill
359 mshr
->pkt
->flags
|= CACHE_LINE_FILL
;
362 cache
->setBlocked(Blocked_NoMSHRs
);
364 if (pkt
->cmd
!= Packet::HardPFReq
) {
365 //If we need to request the bus (not on HW prefetch), do so
366 cache
->setMasterRequest(Request_MSHR
, time
);
373 MissQueue::allocateWrite(Packet
* &pkt
, int size
, Tick time
)
375 MSHR
* mshr
= wb
.allocate(pkt
,size
);
376 mshr
->order
= order
++;
378 //REMOVING COMPRESSION FOR NOW
380 if (pkt
->isCompressed()) {
381 mshr
->pkt
->deleteData();
382 mshr
->pkt
->actualSize
= pkt
->actualSize
;
383 mshr
->pkt
->data
= new uint8_t[pkt
->actualSize
];
384 memcpy(mshr
->pkt
->data
, pkt
->data
, pkt
->actualSize
);
387 memcpy(mshr
->pkt
->getPtr
<uint8_t>(), pkt
->getPtr
<uint8_t>(), pkt
->getSize());
391 cache
->setBlocked(Blocked_NoWBBuffers
);
394 cache
->setMasterRequest(Request_WB
, time
);
401 * @todo Remove SW prefetches on mshr hits.
404 MissQueue::handleMiss(Packet
* &pkt
, int blkSize
, Tick time
)
406 // if (!cache->isTopLevel())
407 if (prefetchMiss
) prefetcher
->handleMiss(pkt
, time
);
410 Addr blkAddr
= pkt
->getAddr() & ~(Addr
)(blkSize
-1);
412 if (!pkt
->req
->isUncacheable()) {
413 mshr
= mq
.findMatch(blkAddr
);
415 //@todo remove hw_pf here
416 mshr_hits
[pkt
->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
417 if (mshr
->threadNum
!= 0/*pkt->req->getThreadNum()*/) {
418 mshr
->threadNum
= -1;
420 mq
.allocateTarget(mshr
, pkt
);
421 if (mshr
->pkt
->isNoAllocate() && !pkt
->isNoAllocate()) {
422 //We are adding an allocate after a no-allocate
423 mshr
->pkt
->flags
&= ~NO_ALLOCATE
;
425 if (mshr
->getNumTargets() == numTarget
) {
427 cache
->setBlocked(Blocked_NoTargets
);
428 mq
.moveToFront(mshr
);
432 if (pkt
->isNoAllocate()) {
433 //Count no-allocate requests differently
434 mshr_no_allocate_misses
++;
437 mshr_misses
[pkt
->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
440 //Count uncacheable accesses
441 mshr_uncacheable
[pkt
->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
442 size
= pkt
->getSize();
444 if (pkt
->isWrite() && (pkt
->req
->isUncacheable() || !writeAllocate
||
445 !pkt
->needsResponse())) {
447 * @todo Add write merging here.
449 mshr
= allocateWrite(pkt
, pkt
->getSize(), time
);
453 mshr
= allocateMiss(pkt
, blkSize
, time
);
457 MissQueue::fetchBlock(Addr addr
, int blk_size
, Tick time
,
460 Addr blkAddr
= addr
& ~(Addr
)(blk_size
- 1);
461 assert(mq
.findMatch(addr
) == NULL
);
462 MSHR
*mshr
= mq
.allocateFetch(blkAddr
, blk_size
, target
);
463 mshr
->order
= order
++;
464 mshr
->pkt
->flags
|= CACHE_LINE_FILL
;
466 cache
->setBlocked(Blocked_NoMSHRs
);
468 cache
->setMasterRequest(Request_MSHR
, time
);
473 MissQueue::getPacket()
475 Packet
* pkt
= mq
.getReq();
476 if (((wb
.isFull() && wb
.inServiceMSHRs
== 0) || !pkt
||
477 pkt
->time
> curTick
) && wb
.havePending()) {
479 // Need to search for earlier miss.
480 MSHR
*mshr
= mq
.findPending(pkt
);
481 if (mshr
&& mshr
->order
< ((MSHR
*)(pkt
->senderState
))->order
) {
482 // Service misses in order until conflict is cleared.
487 MSHR
* mshr
= wb
.findPending(pkt
);
488 if (mshr
/*&& mshr->order < pkt->senderState->order*/) {
489 // The only way this happens is if we are
490 // doing a write and we didn't have permissions
491 // then subsequently saw a writeback(owned got evicted)
492 // We need to make sure to perform the writeback first
493 // To preserve the dirty data, then we can issue the write
497 else if (!mq
.isFull()){
498 //If we have a miss queue slot, we can try a prefetch
499 pkt
= prefetcher
->getPacket();
501 //Update statistic on number of prefetches issued (hwpf_mshr_misses)
502 mshr_misses
[pkt
->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
503 //It will request the bus for the future, but should clear that immedieatley
504 allocateMiss(pkt
, pkt
->getSize(), curTick
);
506 assert(pkt
); //We should get back a req b/c we just put one in
513 MissQueue::setBusCmd(Packet
* &pkt
, Packet::Command cmd
)
515 assert(pkt
->senderState
!= 0);
516 MSHR
* mshr
= (MSHR
*)pkt
->senderState
;
517 mshr
->originalCmd
= pkt
->cmd
;
518 if (cmd
== Packet::UpgradeReq
|| cmd
== Packet::InvalidateReq
) {
519 pkt
->flags
|= NO_ALLOCATE
;
520 pkt
->flags
&= ~CACHE_LINE_FILL
;
522 else if (!pkt
->req
->isUncacheable() && !pkt
->isNoAllocate() &&
523 (cmd
& (1 << 6)/*NeedsResponse*/)) {
524 pkt
->flags
|= CACHE_LINE_FILL
;
526 if (pkt
->isCacheFill() || pkt
->isNoAllocate())
531 MissQueue::restoreOrigCmd(Packet
* &pkt
)
533 pkt
->cmd
= ((MSHR
*)(pkt
->senderState
))->originalCmd
;
537 MissQueue::markInService(Packet
* &pkt
, MSHR
* mshr
)
539 bool unblock
= false;
540 BlockedCause cause
= NUM_BLOCKED_CAUSES
;
543 * @todo Should include MSHRQueue pointer in MSHR to select the correct
546 if ((!pkt
->isCacheFill() && pkt
->isWrite())) {
547 // Forwarding a write/ writeback, don't need to change
549 unblock
= wb
.isFull();
550 wb
.markInService(mshr
);
551 if (!wb
.havePending()){
552 cache
->clearMasterRequest(Request_WB
);
555 // Do we really unblock?
556 unblock
= !wb
.isFull();
557 cause
= Blocked_NoWBBuffers
;
560 unblock
= mq
.isFull();
561 mq
.markInService(mshr
);
562 if (!mq
.havePending()){
563 cache
->clearMasterRequest(Request_MSHR
);
565 if (mshr
->originalCmd
== Packet::HardPFReq
) {
566 DPRINTF(HWPrefetch
, "%s:Marking a HW_PF in service\n",
568 //Also clear pending if need be
569 if (!prefetcher
->havePending())
571 cache
->clearMasterRequest(Request_PF
);
575 unblock
= !mq
.isFull();
576 cause
= Blocked_NoMSHRs
;
580 cache
->clearBlocked(cause
);
586 MissQueue::handleResponse(Packet
* &pkt
, Tick time
)
588 MSHR
* mshr
= (MSHR
*)pkt
->senderState
;
589 if (((MSHR
*)(pkt
->senderState
))->originalCmd
== Packet::HardPFReq
) {
590 DPRINTF(HWPrefetch
, "%s:Handling the response to a HW_PF\n",
594 int num_targets
= mshr
->getNumTargets();
597 bool unblock
= false;
598 bool unblock_target
= false;
599 BlockedCause cause
= NUM_BLOCKED_CAUSES
;
601 if (pkt
->isCacheFill() && !pkt
->isNoAllocate()) {
602 mshr_miss_latency
[mshr
->originalCmd
][0/*pkt->req->getThreadNum()*/] +=
604 // targets were handled in the cache tags
605 if (mshr
== noTargetMSHR
) {
606 // we always clear at least one target
607 unblock_target
= true;
608 cause
= Blocked_NoTargets
;
612 if (mshr
->hasTargets()) {
613 // Didn't satisfy all the targets, need to resend
614 Packet::Command cmd
= mshr
->getTarget()->cmd
;
615 mq
.markPending(mshr
, cmd
);
616 mshr
->order
= order
++;
617 cache
->setMasterRequest(Request_MSHR
, time
);
620 unblock
= mq
.isFull();
623 unblock
= !mq
.isFull();
624 cause
= Blocked_NoMSHRs
;
628 if (pkt
->req
->isUncacheable()) {
629 mshr_uncacheable_lat
[pkt
->cmd
][0/*pkt->req->getThreadNum()*/] +=
632 if (mshr
->hasTargets() && pkt
->req
->isUncacheable()) {
633 // Should only have 1 target if we had any
634 assert(num_targets
== 1);
635 Packet
* target
= mshr
->getTarget();
638 memcpy(target
->getPtr
<uint8_t>(), pkt
->getPtr
<uint8_t>(),
641 cache
->respond(target
, time
);
642 assert(!mshr
->hasTargets());
644 else if (mshr
->hasTargets()) {
645 //Must be a no_allocate with possibly more than one target
646 assert(mshr
->pkt
->isNoAllocate());
647 while (mshr
->hasTargets()) {
648 Packet
* target
= mshr
->getTarget();
651 memcpy(target
->getPtr
<uint8_t>(), pkt
->getPtr
<uint8_t>(),
654 cache
->respond(target
, time
);
658 if (pkt
->isWrite()) {
659 // If the wrtie buffer is full, we might unblock now
660 unblock
= wb
.isFull();
663 // Did we really unblock?
664 unblock
= !wb
.isFull();
665 cause
= Blocked_NoWBBuffers
;
668 unblock
= mq
.isFull();
671 unblock
= !mq
.isFull();
672 cause
= Blocked_NoMSHRs
;
676 if (unblock
|| unblock_target
) {
677 cache
->clearBlocked(cause
);
682 MissQueue::squash(int threadNum
)
684 bool unblock
= false;
685 BlockedCause cause
= NUM_BLOCKED_CAUSES
;
687 if (noTargetMSHR
&& noTargetMSHR
->threadNum
== threadNum
) {
690 cause
= Blocked_NoTargets
;
694 cause
= Blocked_NoMSHRs
;
696 mq
.squash(threadNum
);
697 if (!mq
.havePending()) {
698 cache
->clearMasterRequest(Request_MSHR
);
700 if (unblock
&& !mq
.isFull()) {
701 cache
->clearBlocked(cause
);
707 MissQueue::findMSHR(Addr addr
) const
709 return mq
.findMatch(addr
);
713 MissQueue::findWrites(Addr addr
, vector
<MSHR
*> &writes
) const
715 return wb
.findMatches(addr
,writes
);
719 MissQueue::doWriteback(Addr addr
,
720 int size
, uint8_t *data
, bool compressed
)
723 Request
* req
= new Request(addr
, size
, 0);
724 Packet
* pkt
= new Packet(req
, Packet::Writeback
, -1);
727 memcpy(pkt
->getPtr
<uint8_t>(), data
, size
);
731 pkt
->flags
|= COMPRESSED
;
734 ///All writebacks charged to same thread @todo figure this out
735 writebacks
[0/*pkt->req->getThreadNum()*/]++;
737 allocateWrite(pkt
, 0, curTick
);
742 MissQueue::doWriteback(Packet
* &pkt
)
744 writebacks
[0/*pkt->req->getThreadNum()*/]++;
745 allocateWrite(pkt
, 0, curTick
);
750 MissQueue::allocateTargetList(Addr addr
)
752 MSHR
* mshr
= mq
.allocateTargetList(addr
, blkSize
);
753 mshr
->pkt
->flags
|= CACHE_LINE_FILL
;
755 cache
->setBlocked(Blocked_NoMSHRs
);
761 MissQueue::havePending()
763 return mq
.havePending() || wb
.havePending() || prefetcher
->havePending();