2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
34 * Miss and writeback queue definitions.
37 #include "cpu/smt.hh" //for maxThreadsPerCPU
38 #include "mem/cache/base_cache.hh"
39 #include "mem/cache/miss/miss_queue.hh"
40 #include "mem/cache/prefetch/base_prefetcher.hh"
46 * @todo Remove the +16 from the write buffer constructor once we handle
47 * stalling on writebacks do to compression writes.
49 MissQueue::MissQueue(int numMSHRs
, int numTargets
, int write_buffers
,
50 bool write_allocate
, bool prefetch_miss
)
51 : mq(numMSHRs
, 4), wb(write_buffers
,numMSHRs
+1000), numMSHR(numMSHRs
),
52 numTarget(numTargets
), writeBuffers(write_buffers
),
53 writeAllocate(write_allocate
), order(0), prefetchMiss(prefetch_miss
)
59 MissQueue::regStats(const string
&name
)
61 Request
temp_req((Addr
) NULL
, 4, 0);
62 Packet::Command temp_cmd
= Packet::ReadReq
;
63 Packet
temp_pkt(&temp_req
, temp_cmd
, 0); //@todo FIx command strings so this isn't neccessary
66 using namespace Stats
;
69 .init(maxThreadsPerCPU
)
70 .name(name
+ ".writebacks")
71 .desc("number of writebacks")
75 // MSHR hit statistics
76 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
77 Packet::Command cmd
= (Packet::Command
)access_idx
;
78 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
81 .init(maxThreadsPerCPU
)
82 .name(name
+ "." + cstr
+ "_mshr_hits")
83 .desc("number of " + cstr
+ " MSHR hits")
84 .flags(total
| nozero
| nonan
)
89 .name(name
+ ".demand_mshr_hits")
90 .desc("number of demand (read+write) MSHR hits")
93 demandMshrHits
= mshr_hits
[Packet::ReadReq
] + mshr_hits
[Packet::WriteReq
];
96 .name(name
+ ".overall_mshr_hits")
97 .desc("number of overall MSHR hits")
100 overallMshrHits
= demandMshrHits
+ mshr_hits
[Packet::SoftPFReq
] +
101 mshr_hits
[Packet::HardPFReq
];
103 // MSHR miss statistics
104 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
105 Packet::Command cmd
= (Packet::Command
)access_idx
;
106 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
108 mshr_misses
[access_idx
]
109 .init(maxThreadsPerCPU
)
110 .name(name
+ "." + cstr
+ "_mshr_misses")
111 .desc("number of " + cstr
+ " MSHR misses")
112 .flags(total
| nozero
| nonan
)
117 .name(name
+ ".demand_mshr_misses")
118 .desc("number of demand (read+write) MSHR misses")
121 demandMshrMisses
= mshr_misses
[Packet::ReadReq
] + mshr_misses
[Packet::WriteReq
];
124 .name(name
+ ".overall_mshr_misses")
125 .desc("number of overall MSHR misses")
128 overallMshrMisses
= demandMshrMisses
+ mshr_misses
[Packet::SoftPFReq
] +
129 mshr_misses
[Packet::HardPFReq
];
131 // MSHR miss latency statistics
132 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
133 Packet::Command cmd
= (Packet::Command
)access_idx
;
134 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
136 mshr_miss_latency
[access_idx
]
137 .init(maxThreadsPerCPU
)
138 .name(name
+ "." + cstr
+ "_mshr_miss_latency")
139 .desc("number of " + cstr
+ " MSHR miss cycles")
140 .flags(total
| nozero
| nonan
)
144 demandMshrMissLatency
145 .name(name
+ ".demand_mshr_miss_latency")
146 .desc("number of demand (read+write) MSHR miss cycles")
149 demandMshrMissLatency
= mshr_miss_latency
[Packet::ReadReq
]
150 + mshr_miss_latency
[Packet::WriteReq
];
152 overallMshrMissLatency
153 .name(name
+ ".overall_mshr_miss_latency")
154 .desc("number of overall MSHR miss cycles")
157 overallMshrMissLatency
= demandMshrMissLatency
+
158 mshr_miss_latency
[Packet::SoftPFReq
] + mshr_miss_latency
[Packet::HardPFReq
];
160 // MSHR uncacheable statistics
161 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
162 Packet::Command cmd
= (Packet::Command
)access_idx
;
163 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
165 mshr_uncacheable
[access_idx
]
166 .init(maxThreadsPerCPU
)
167 .name(name
+ "." + cstr
+ "_mshr_uncacheable")
168 .desc("number of " + cstr
+ " MSHR uncacheable")
169 .flags(total
| nozero
| nonan
)
173 overallMshrUncacheable
174 .name(name
+ ".overall_mshr_uncacheable_misses")
175 .desc("number of overall MSHR uncacheable misses")
178 overallMshrUncacheable
= mshr_uncacheable
[Packet::ReadReq
]
179 + mshr_uncacheable
[Packet::WriteReq
] + mshr_uncacheable
[Packet::SoftPFReq
]
180 + mshr_uncacheable
[Packet::HardPFReq
];
182 // MSHR miss latency statistics
183 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
184 Packet::Command cmd
= (Packet::Command
)access_idx
;
185 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
187 mshr_uncacheable_lat
[access_idx
]
188 .init(maxThreadsPerCPU
)
189 .name(name
+ "." + cstr
+ "_mshr_uncacheable_latency")
190 .desc("number of " + cstr
+ " MSHR uncacheable cycles")
191 .flags(total
| nozero
| nonan
)
195 overallMshrUncacheableLatency
196 .name(name
+ ".overall_mshr_uncacheable_latency")
197 .desc("number of overall MSHR uncacheable cycles")
200 overallMshrUncacheableLatency
= mshr_uncacheable_lat
[Packet::ReadReq
]
201 + mshr_uncacheable_lat
[Packet::WriteReq
]
202 + mshr_uncacheable_lat
[Packet::SoftPFReq
]
203 + mshr_uncacheable_lat
[Packet::HardPFReq
];
206 // MSHR access formulas
207 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
208 Packet::Command cmd
= (Packet::Command
)access_idx
;
209 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
211 mshrAccesses
[access_idx
]
212 .name(name
+ "." + cstr
+ "_mshr_accesses")
213 .desc("number of " + cstr
+ " mshr accesses(hits+misses)")
214 .flags(total
| nozero
| nonan
)
216 mshrAccesses
[access_idx
] =
217 mshr_hits
[access_idx
] + mshr_misses
[access_idx
]
218 + mshr_uncacheable
[access_idx
];
222 .name(name
+ ".demand_mshr_accesses")
223 .desc("number of demand (read+write) mshr accesses")
224 .flags(total
| nozero
| nonan
)
226 demandMshrAccesses
= demandMshrHits
+ demandMshrMisses
;
229 .name(name
+ ".overall_mshr_accesses")
230 .desc("number of overall (read+write) mshr accesses")
231 .flags(total
| nozero
| nonan
)
233 overallMshrAccesses
= overallMshrHits
+ overallMshrMisses
234 + overallMshrUncacheable
;
237 // MSHR miss rate formulas
238 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
239 Packet::Command cmd
= (Packet::Command
)access_idx
;
240 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
242 mshrMissRate
[access_idx
]
243 .name(name
+ "." + cstr
+ "_mshr_miss_rate")
244 .desc("mshr miss rate for " + cstr
+ " accesses")
245 .flags(total
| nozero
| nonan
)
248 mshrMissRate
[access_idx
] =
249 mshr_misses
[access_idx
] / cache
->accesses
[access_idx
];
253 .name(name
+ ".demand_mshr_miss_rate")
254 .desc("mshr miss rate for demand accesses")
257 demandMshrMissRate
= demandMshrMisses
/ cache
->demandAccesses
;
260 .name(name
+ ".overall_mshr_miss_rate")
261 .desc("mshr miss rate for overall accesses")
264 overallMshrMissRate
= overallMshrMisses
/ cache
->overallAccesses
;
266 // mshrMiss latency formulas
267 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
268 Packet::Command cmd
= (Packet::Command
)access_idx
;
269 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
271 avgMshrMissLatency
[access_idx
]
272 .name(name
+ "." + cstr
+ "_avg_mshr_miss_latency")
273 .desc("average " + cstr
+ " mshr miss latency")
274 .flags(total
| nozero
| nonan
)
277 avgMshrMissLatency
[access_idx
] =
278 mshr_miss_latency
[access_idx
] / mshr_misses
[access_idx
];
281 demandAvgMshrMissLatency
282 .name(name
+ ".demand_avg_mshr_miss_latency")
283 .desc("average overall mshr miss latency")
286 demandAvgMshrMissLatency
= demandMshrMissLatency
/ demandMshrMisses
;
288 overallAvgMshrMissLatency
289 .name(name
+ ".overall_avg_mshr_miss_latency")
290 .desc("average overall mshr miss latency")
293 overallAvgMshrMissLatency
= overallMshrMissLatency
/ overallMshrMisses
;
295 // mshrUncacheable latency formulas
296 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
297 Packet::Command cmd
= (Packet::Command
)access_idx
;
298 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
300 avgMshrUncacheableLatency
[access_idx
]
301 .name(name
+ "." + cstr
+ "_avg_mshr_uncacheable_latency")
302 .desc("average " + cstr
+ " mshr uncacheable latency")
303 .flags(total
| nozero
| nonan
)
306 avgMshrUncacheableLatency
[access_idx
] =
307 mshr_uncacheable_lat
[access_idx
] / mshr_uncacheable
[access_idx
];
310 overallAvgMshrUncacheableLatency
311 .name(name
+ ".overall_avg_mshr_uncacheable_latency")
312 .desc("average overall mshr uncacheable latency")
315 overallAvgMshrUncacheableLatency
= overallMshrUncacheableLatency
/ overallMshrUncacheable
;
318 .init(maxThreadsPerCPU
)
319 .name(name
+ ".mshr_cap_events")
320 .desc("number of times MSHR cap was activated")
324 //software prefetching stats
325 soft_prefetch_mshr_full
326 .init(maxThreadsPerCPU
)
327 .name(name
+ ".soft_prefetch_mshr_full")
328 .desc("number of mshr full events for SW prefetching instrutions")
332 mshr_no_allocate_misses
333 .name(name
+".no_allocate_misses")
334 .desc("Number of misses that were no-allocate")
340 MissQueue::setCache(BaseCache
*_cache
)
343 blkSize
= cache
->getBlockSize();
347 MissQueue::setPrefetcher(BasePrefetcher
*_prefetcher
)
349 prefetcher
= _prefetcher
;
353 MissQueue::allocateMiss(Packet
* &pkt
, int size
, Tick time
)
355 MSHR
* mshr
= mq
.allocate(pkt
, blkSize
);
356 mshr
->order
= order
++;
357 if (!pkt
->req
->isUncacheable() ){//&& !pkt->isNoAllocate()) {
358 // Mark this as a cache line fill
359 mshr
->pkt
->flags
|= CACHE_LINE_FILL
;
362 cache
->setBlocked(Blocked_NoMSHRs
);
364 if (pkt
->cmd
!= Packet::HardPFReq
) {
365 //If we need to request the bus (not on HW prefetch), do so
366 cache
->setMasterRequest(Request_MSHR
, time
);
373 MissQueue::allocateWrite(Packet
* &pkt
, int size
, Tick time
)
375 MSHR
* mshr
= wb
.allocate(pkt
,blkSize
);
376 mshr
->order
= order
++;
378 //REMOVING COMPRESSION FOR NOW
380 if (pkt
->isCompressed()) {
381 mshr
->pkt
->deleteData();
382 mshr
->pkt
->actualSize
= pkt
->actualSize
;
383 mshr
->pkt
->data
= new uint8_t[pkt
->actualSize
];
384 memcpy(mshr
->pkt
->data
, pkt
->data
, pkt
->actualSize
);
387 memcpy(mshr
->pkt
->getPtr
<uint8_t>(), pkt
->getPtr
<uint8_t>(), pkt
->getSize());
391 cache
->setBlocked(Blocked_NoWBBuffers
);
394 cache
->setMasterRequest(Request_WB
, time
);
401 * @todo Remove SW prefetches on mshr hits.
404 MissQueue::handleMiss(Packet
* &pkt
, int blkSize
, Tick time
)
406 // if (!cache->isTopLevel())
407 if (prefetchMiss
) prefetcher
->handleMiss(pkt
, time
);
410 Addr blkAddr
= pkt
->getAddr() & ~(Addr
)(blkSize
-1);
412 if (!pkt
->req
->isUncacheable()) {
413 mshr
= mq
.findMatch(blkAddr
);
415 //@todo remove hw_pf here
416 mshr_hits
[pkt
->cmdToIndex()][pkt
->req
->getThreadNum()]++;
417 if (mshr
->threadNum
!= pkt
->req
->getThreadNum()) {
418 mshr
->threadNum
= -1;
420 mq
.allocateTarget(mshr
, pkt
);
421 if (mshr
->pkt
->isNoAllocate() && !pkt
->isNoAllocate()) {
422 //We are adding an allocate after a no-allocate
423 mshr
->pkt
->flags
&= ~NO_ALLOCATE
;
425 if (mshr
->getNumTargets() == numTarget
) {
427 cache
->setBlocked(Blocked_NoTargets
);
428 mq
.moveToFront(mshr
);
432 if (pkt
->isNoAllocate()) {
433 //Count no-allocate requests differently
434 mshr_no_allocate_misses
++;
437 mshr_misses
[pkt
->cmdToIndex()][pkt
->req
->getThreadNum()]++;
440 //Count uncacheable accesses
441 mshr_uncacheable
[pkt
->cmdToIndex()][pkt
->req
->getThreadNum()]++;
442 size
= pkt
->getSize();
444 if (pkt
->isWrite() && (pkt
->req
->isUncacheable() || !writeAllocate
||
445 !pkt
->needsResponse())) {
447 * @todo Add write merging here.
449 mshr
= allocateWrite(pkt
, blkSize
, time
);
453 mshr
= allocateMiss(pkt
, blkSize
, time
);
457 MissQueue::fetchBlock(Addr addr
, int blk_size
, Tick time
,
460 Addr blkAddr
= addr
& ~(Addr
)(blk_size
- 1);
461 assert(mq
.findMatch(addr
) == NULL
);
462 MSHR
*mshr
= mq
.allocateFetch(blkAddr
, blk_size
, target
);
463 mshr
->order
= order
++;
464 mshr
->pkt
->flags
|= CACHE_LINE_FILL
;
466 cache
->setBlocked(Blocked_NoMSHRs
);
468 cache
->setMasterRequest(Request_MSHR
, time
);
473 MissQueue::getPacket()
475 Packet
* pkt
= mq
.getReq();
476 if (((wb
.isFull() && wb
.inServiceMSHRs
== 0) || !pkt
||
477 pkt
->time
> curTick
) && wb
.havePending()) {
479 // Need to search for earlier miss.
480 MSHR
*mshr
= mq
.findPending(pkt
);
481 if (mshr
&& mshr
->order
< ((MSHR
*)(pkt
->senderState
))->order
) {
482 // Service misses in order until conflict is cleared.
487 MSHR
* mshr
= wb
.findPending(pkt
);
488 if (mshr
/*&& mshr->order < pkt->senderState->order*/) {
489 // The only way this happens is if we are
490 // doing a write and we didn't have permissions
491 // then subsequently saw a writeback(owned got evicted)
492 // We need to make sure to perform the writeback first
493 // To preserve the dirty data, then we can issue the write
497 else if (!mq
.isFull()){
498 //If we have a miss queue slot, we can try a prefetch
499 pkt
= prefetcher
->getPacket();
501 //Update statistic on number of prefetches issued (hwpf_mshr_misses)
502 mshr_misses
[pkt
->cmdToIndex()][pkt
->req
->getThreadNum()]++;
503 //It will request the bus for the future, but should clear that immedieatley
504 allocateMiss(pkt
, pkt
->getSize(), curTick
);
506 assert(pkt
); //We should get back a req b/c we just put one in
513 MissQueue::setBusCmd(Packet
* &pkt
, Packet::Command cmd
)
515 assert(pkt
->senderState
!= 0);
516 MSHR
* mshr
= (MSHR
*)pkt
->senderState
;
517 mshr
->originalCmd
= pkt
->cmd
;
518 if (pkt
->isCacheFill() || pkt
->isNoAllocate())
523 MissQueue::restoreOrigCmd(Packet
* &pkt
)
525 pkt
->cmd
= ((MSHR
*)(pkt
->senderState
))->originalCmd
;
529 MissQueue::markInService(Packet
* &pkt
)
531 assert(pkt
->senderState
!= 0);
532 bool unblock
= false;
533 BlockedCause cause
= NUM_BLOCKED_CAUSES
;
536 * @todo Should include MSHRQueue pointer in MSHR to select the correct
539 if ((!pkt
->isCacheFill() && pkt
->isWrite())) {
540 // Forwarding a write/ writeback, don't need to change
542 unblock
= wb
.isFull();
543 wb
.markInService((MSHR
*)pkt
->senderState
);
544 if (!wb
.havePending()){
545 cache
->clearMasterRequest(Request_WB
);
548 // Do we really unblock?
549 unblock
= !wb
.isFull();
550 cause
= Blocked_NoWBBuffers
;
553 unblock
= mq
.isFull();
554 mq
.markInService((MSHR
*)pkt
->senderState
);
555 if (!mq
.havePending()){
556 cache
->clearMasterRequest(Request_MSHR
);
558 if (((MSHR
*)(pkt
->senderState
))->originalCmd
== Packet::HardPFReq
) {
559 DPRINTF(HWPrefetch
, "%s:Marking a HW_PF in service\n",
561 //Also clear pending if need be
562 if (!prefetcher
->havePending())
564 cache
->clearMasterRequest(Request_PF
);
568 unblock
= !mq
.isFull();
569 cause
= Blocked_NoMSHRs
;
573 cache
->clearBlocked(cause
);
579 MissQueue::handleResponse(Packet
* &pkt
, Tick time
)
581 MSHR
* mshr
= (MSHR
*)pkt
->senderState
;
582 if (((MSHR
*)(pkt
->senderState
))->originalCmd
== Packet::HardPFReq
) {
583 DPRINTF(HWPrefetch
, "%s:Handling the response to a HW_PF\n",
587 int num_targets
= mshr
->getNumTargets();
590 bool unblock
= false;
591 bool unblock_target
= false;
592 BlockedCause cause
= NUM_BLOCKED_CAUSES
;
594 if (pkt
->isCacheFill() && !pkt
->isNoAllocate()) {
595 mshr_miss_latency
[mshr
->originalCmd
][pkt
->req
->getThreadNum()] +=
597 // targets were handled in the cache tags
598 if (mshr
== noTargetMSHR
) {
599 // we always clear at least one target
600 unblock_target
= true;
601 cause
= Blocked_NoTargets
;
605 if (mshr
->hasTargets()) {
606 // Didn't satisfy all the targets, need to resend
607 Packet::Command cmd
= mshr
->getTarget()->cmd
;
608 mq
.markPending(mshr
, cmd
);
609 mshr
->order
= order
++;
610 cache
->setMasterRequest(Request_MSHR
, time
);
613 unblock
= mq
.isFull();
616 unblock
= !mq
.isFull();
617 cause
= Blocked_NoMSHRs
;
621 if (pkt
->req
->isUncacheable()) {
622 mshr_uncacheable_lat
[pkt
->cmd
][pkt
->req
->getThreadNum()] +=
625 if (mshr
->hasTargets() && pkt
->req
->isUncacheable()) {
626 // Should only have 1 target if we had any
627 assert(num_targets
== 1);
628 Packet
* target
= mshr
->getTarget();
631 memcpy(target
->getPtr
<uint8_t>(), pkt
->getPtr
<uint8_t>(),
634 cache
->respond(target
, time
);
635 assert(!mshr
->hasTargets());
637 else if (mshr
->hasTargets()) {
638 //Must be a no_allocate with possibly more than one target
639 assert(mshr
->pkt
->isNoAllocate());
640 while (mshr
->hasTargets()) {
641 Packet
* target
= mshr
->getTarget();
644 memcpy(target
->getPtr
<uint8_t>(), pkt
->getPtr
<uint8_t>(),
647 cache
->respond(target
, time
);
651 if (pkt
->isWrite()) {
652 // If the wrtie buffer is full, we might unblock now
653 unblock
= wb
.isFull();
656 // Did we really unblock?
657 unblock
= !wb
.isFull();
658 cause
= Blocked_NoWBBuffers
;
661 unblock
= mq
.isFull();
664 unblock
= !mq
.isFull();
665 cause
= Blocked_NoMSHRs
;
669 if (unblock
|| unblock_target
) {
670 cache
->clearBlocked(cause
);
675 MissQueue::squash(int threadNum
)
677 bool unblock
= false;
678 BlockedCause cause
= NUM_BLOCKED_CAUSES
;
680 if (noTargetMSHR
&& noTargetMSHR
->threadNum
== threadNum
) {
683 cause
= Blocked_NoTargets
;
687 cause
= Blocked_NoMSHRs
;
689 mq
.squash(threadNum
);
690 if (!mq
.havePending()) {
691 cache
->clearMasterRequest(Request_MSHR
);
693 if (unblock
&& !mq
.isFull()) {
694 cache
->clearBlocked(cause
);
700 MissQueue::findMSHR(Addr addr
) const
702 return mq
.findMatch(addr
);
706 MissQueue::findWrites(Addr addr
, vector
<MSHR
*> &writes
) const
708 return wb
.findMatches(addr
,writes
);
712 MissQueue::doWriteback(Addr addr
,
713 int size
, uint8_t *data
, bool compressed
)
716 Request
* req
= new Request(addr
, size
, 0);
717 Packet
* pkt
= new Packet(req
, Packet::Writeback
, -1);
720 memcpy(pkt
->getPtr
<uint8_t>(), data
, size
);
724 pkt
->flags
|= COMPRESSED
;
727 ///All writebacks charged to same thread @todo figure this out
728 writebacks
[pkt
->req
->getThreadNum()]++;
730 allocateWrite(pkt
, 0, curTick
);
735 MissQueue::doWriteback(Packet
* &pkt
)
737 writebacks
[pkt
->req
->getThreadNum()]++;
738 allocateWrite(pkt
, 0, curTick
);
743 MissQueue::allocateTargetList(Addr addr
)
745 MSHR
* mshr
= mq
.allocateTargetList(addr
, blkSize
);
746 mshr
->pkt
->flags
|= CACHE_LINE_FILL
;
748 cache
->setBlocked(Blocked_NoMSHRs
);
754 MissQueue::havePending()
756 return mq
.havePending() || wb
.havePending() || prefetcher
->havePending();