Merge ktlim@zamp:./local/clean/o3-merge/m5
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 */
32
33 /**
34 * @file
35 * Cache definitions.
36 */
37
38 #include <assert.h>
39 #include <math.h>
40
41 #include <cassert>
42 #include <iostream>
43 #include <string>
44
45 #include "sim/host.hh"
46 #include "base/misc.hh"
47 #include "cpu/smt.hh"
48
49 #include "mem/cache/cache.hh"
50 #include "mem/cache/cache_blk.hh"
51 #include "mem/cache/miss/mshr.hh"
52 #include "mem/cache/prefetch/prefetcher.hh"
53
54 #include "sim/sim_events.hh" // for SimExitEvent
55
56 template<class TagStore, class Buffering, class Coherence>
57 bool
58 Cache<TagStore,Buffering,Coherence>::
59 doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
60 {
61 if (isCpuSide)
62 {
63 if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) {
64 pkt->req->setScResult(1);
65 }
66 access(pkt);
67 }
68 else
69 {
70 if (pkt->isResponse())
71 handleResponse(pkt);
72 else
73 snoop(pkt);
74 }
75 return true;
76 }
77
78 template<class TagStore, class Buffering, class Coherence>
79 Tick
80 Cache<TagStore,Buffering,Coherence>::
81 doAtomicAccess(Packet *pkt, bool isCpuSide)
82 {
83 if (isCpuSide)
84 {
85 //Temporary solution to LL/SC
86 if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) {
87 pkt->req->setScResult(1);
88 }
89
90 probe(pkt, true);
91 //TEMP ALWAYS SUCCES FOR NOW
92 pkt->result = Packet::Success;
93 }
94 else
95 {
96 if (pkt->isResponse())
97 handleResponse(pkt);
98 else
99 snoopProbe(pkt, true);
100 }
101 //Fix this timing info
102 return hitLatency;
103 }
104
105 template<class TagStore, class Buffering, class Coherence>
106 void
107 Cache<TagStore,Buffering,Coherence>::
108 doFunctionalAccess(Packet *pkt, bool isCpuSide)
109 {
110 if (isCpuSide)
111 {
112 //TEMP USE CPU?THREAD 0 0
113 pkt->req->setThreadContext(0,0);
114
115 //Temporary solution to LL/SC
116 if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) {
117 assert("Can't handle LL/SC on functional path\n");
118 }
119
120 probe(pkt, true);
121 //TEMP ALWAYS SUCCESFUL FOR NOW
122 pkt->result = Packet::Success;
123 }
124 else
125 {
126 if (pkt->isResponse())
127 handleResponse(pkt);
128 else
129 snoopProbe(pkt, true);
130 }
131 }
132
133 template<class TagStore, class Buffering, class Coherence>
134 void
135 Cache<TagStore,Buffering,Coherence>::
136 recvStatusChange(Port::Status status, bool isCpuSide)
137 {
138
139 }
140
141
142 template<class TagStore, class Buffering, class Coherence>
143 Cache<TagStore,Buffering,Coherence>::
144 Cache(const std::string &_name,
145 Cache<TagStore,Buffering,Coherence>::Params &params)
146 : BaseCache(_name, params.baseParams),
147 prefetchAccess(params.prefetchAccess),
148 tags(params.tags), missQueue(params.missQueue),
149 coherence(params.coherence), prefetcher(params.prefetcher),
150 doCopy(params.doCopy), blockOnCopy(params.blockOnCopy)
151 {
152 //FIX BUS POINTERS
153 // if (params.in == NULL) {
154 topLevelCache = true;
155 // }
156 //PLEASE FIX THIS, BUS SIZES NOT BEING USED
157 tags->setCache(this, blkSize, 1/*params.out->width, params.out->clockRate*/);
158 tags->setPrefetcher(prefetcher);
159 missQueue->setCache(this);
160 missQueue->setPrefetcher(prefetcher);
161 coherence->setCache(this);
162 prefetcher->setCache(this);
163 prefetcher->setTags(tags);
164 prefetcher->setBuffer(missQueue);
165 #if 0
166 invalidatePkt = new Packet;
167 invalidatePkt->cmd = Packet::InvalidateReq;
168 #endif
169 }
170
171 template<class TagStore, class Buffering, class Coherence>
172 void
173 Cache<TagStore,Buffering,Coherence>::regStats()
174 {
175 BaseCache::regStats();
176 tags->regStats(name());
177 missQueue->regStats(name());
178 coherence->regStats(name());
179 prefetcher->regStats(name());
180 }
181
182 template<class TagStore, class Buffering, class Coherence>
183 bool
184 Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
185 {
186 //@todo Add back in MemDebug Calls
187 // MemDebug::cacheAccess(pkt);
188 BlkType *blk = NULL;
189 PacketList writebacks;
190 int size = blkSize;
191 int lat = hitLatency;
192 if (prefetchAccess) {
193 //We are determining prefetches on access stream, call prefetcher
194 prefetcher->handleMiss(pkt, curTick);
195 }
196 if (!pkt->req->isUncacheable()) {
197 if (pkt->isInvalidate() && !pkt->isRead()
198 && !pkt->isWrite()) {
199 //Upgrade or Invalidate
200 //Look into what happens if two slave caches on bus
201 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
202 pkt->getAddr() & (((ULL(1))<<48)-1),
203 pkt->getAddr() & ~((Addr)blkSize - 1));
204
205 //@todo Should this return latency have the hit latency in it?
206 // respond(pkt,curTick+lat);
207 pkt->flags |= SATISFIED;
208 // return MA_HIT; //@todo, return values
209 return true;
210 }
211 blk = tags->handleAccess(pkt, lat, writebacks);
212 } else {
213 size = pkt->getSize();
214 }
215 // If this is a block size write/hint (WH64) allocate the block here
216 // if the coherence protocol allows it.
217 /** @todo make the fast write alloc (wh64) work with coherence. */
218 /** @todo Do we want to do fast writes for writebacks as well? */
219 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
220 (pkt->cmd == Packet::WriteReq || pkt->cmd == Packet::WriteInvalidateReq) ) {
221 // not outstanding misses, can do this
222 MSHR* outstanding_miss = missQueue->findMSHR(pkt->getAddr());
223 if (pkt->cmd == Packet::WriteInvalidateReq || !outstanding_miss) {
224 if (outstanding_miss) {
225 warn("WriteInv doing a fastallocate"
226 "with an outstanding miss to the same address\n");
227 }
228 blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
229 writebacks);
230 ++fastWrites;
231 }
232 }
233 while (!writebacks.empty()) {
234 missQueue->doWriteback(writebacks.front());
235 writebacks.pop_front();
236 }
237 DPRINTF(Cache, "%s %x %s blk_addr: %x pc %x\n", pkt->cmdString(),
238 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
239 pkt->getAddr() & ~((Addr)blkSize - 1), pkt->req->getPC());
240 if (blk) {
241 // Hit
242 hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
243 // clear dirty bit if write through
244 if (pkt->needsResponse())
245 respond(pkt, curTick+lat);
246 // return MA_HIT;
247 return true;
248 }
249
250 // Miss
251 if (!pkt->req->isUncacheable()) {
252 misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
253 /** @todo Move miss count code into BaseCache */
254 if (missCount) {
255 --missCount;
256 if (missCount == 0)
257 new SimLoopExitEvent(curTick, "A cache reached the maximum miss count");
258 }
259 }
260 missQueue->handleMiss(pkt, size, curTick + hitLatency);
261 // return MA_CACHE_MISS;
262 return true;
263 }
264
265
266 template<class TagStore, class Buffering, class Coherence>
267 Packet *
268 Cache<TagStore,Buffering,Coherence>::getPacket()
269 {
270 Packet * pkt = missQueue->getPacket();
271 if (pkt) {
272 if (!pkt->req->isUncacheable()) {
273 if (pkt->cmd == Packet::HardPFReq) misses[Packet::HardPFReq][pkt->req->getThreadNum()]++;
274 BlkType *blk = tags->findBlock(pkt);
275 Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
276 (blk)? blk->status : 0);
277 missQueue->setBusCmd(pkt, cmd);
278 }
279 }
280
281 assert(!doMasterRequest() || missQueue->havePending());
282 assert(!pkt || pkt->time <= curTick);
283 return pkt;
284 }
285
286 template<class TagStore, class Buffering, class Coherence>
287 void
288 Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, bool success)
289 {
290 if (success) {
291 missQueue->markInService(pkt);
292 //Temp Hack for UPGRADES
293 if (pkt->cmd == Packet::UpgradeReq) {
294 handleResponse(pkt);
295 }
296 } else if (pkt && !pkt->req->isUncacheable()) {
297 missQueue->restoreOrigCmd(pkt);
298 }
299 }
300
301 template<class TagStore, class Buffering, class Coherence>
302 void
303 Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
304 {
305 BlkType *blk = NULL;
306 if (pkt->senderState) {
307 // MemDebug::cacheResponse(pkt);
308 DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->getAddr(),
309 pkt->getAddr() & (((ULL(1))<<48)-1));
310
311 if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
312 blk = tags->findBlock(pkt);
313 CacheBlk::State old_state = (blk) ? blk->status : 0;
314 PacketList writebacks;
315 blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
316 coherence->getNewState(pkt,old_state),
317 writebacks);
318 while (!writebacks.empty()) {
319 missQueue->doWriteback(writebacks.front());
320 }
321 }
322 missQueue->handleResponse(pkt, curTick + hitLatency);
323 }
324 }
325
326 template<class TagStore, class Buffering, class Coherence>
327 void
328 Cache<TagStore,Buffering,Coherence>::pseudoFill(Addr addr)
329 {
330 // Need to temporarily move this blk into MSHRs
331 MSHR *mshr = missQueue->allocateTargetList(addr);
332 int lat;
333 PacketList dummy;
334 // Read the data into the mshr
335 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
336 assert(dummy.empty());
337 assert(mshr->pkt->flags & SATISFIED);
338 // can overload order since it isn't used on non pending blocks
339 mshr->order = blk->status;
340 // temporarily remove the block from the cache.
341 tags->invalidateBlk(addr);
342 }
343
344 template<class TagStore, class Buffering, class Coherence>
345 void
346 Cache<TagStore,Buffering,Coherence>::pseudoFill(MSHR *mshr)
347 {
348 // Need to temporarily move this blk into MSHRs
349 assert(mshr->pkt->cmd == Packet::ReadReq);
350 int lat;
351 PacketList dummy;
352 // Read the data into the mshr
353 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
354 assert(dummy.empty());
355 assert(mshr->pkt->flags & SATISFIED);
356 // can overload order since it isn't used on non pending blocks
357 mshr->order = blk->status;
358 // temporarily remove the block from the cache.
359 tags->invalidateBlk(mshr->pkt->getAddr());
360 }
361
362
363 template<class TagStore, class Buffering, class Coherence>
364 Packet *
365 Cache<TagStore,Buffering,Coherence>::getCoherencePacket()
366 {
367 return coherence->getPacket();
368 }
369
370
371 template<class TagStore, class Buffering, class Coherence>
372 void
373 Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
374 {
375
376 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
377 BlkType *blk = tags->findBlock(pkt);
378 MSHR *mshr = missQueue->findMSHR(blk_addr);
379 if (isTopLevel() && coherence->hasProtocol()) { //@todo Move this into handle bus req
380 //If we find an mshr, and it is in service, we need to NACK or invalidate
381 if (mshr) {
382 if (mshr->inService) {
383 if ((mshr->pkt->isInvalidate() || !mshr->pkt->isCacheFill())
384 && (pkt->cmd != Packet::InvalidateReq && pkt->cmd != Packet::WriteInvalidateReq)) {
385 //If the outstanding request was an invalidate (upgrade,readex,..)
386 //Then we need to ACK the request until we get the data
387 //Also NACK if the outstanding request is not a cachefill (writeback)
388 pkt->flags |= NACKED_LINE;
389 return;
390 }
391 else {
392 //The supplier will be someone else, because we are waiting for
393 //the data. This should cause this cache to be forced to go to
394 //the shared state, not the exclusive even though the shared line
395 //won't be asserted. But for now we will just invlidate ourselves
396 //and allow the other cache to go into the exclusive state.
397 //@todo Make it so a read to a pending read doesn't invalidate.
398 //@todo Make it so that a read to a pending read can't be exclusive now.
399
400 //Set the address so find match works
401 invalidatePkt->addrOverride(pkt->getAddr());
402
403 //Append the invalidate on
404 missQueue->addTarget(mshr,invalidatePkt);
405 DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
406 return;
407 }
408 }
409 }
410 //We also need to check the writeback buffers and handle those
411 std::vector<MSHR *> writebacks;
412 if (missQueue->findWrites(blk_addr, writebacks)) {
413 DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
414
415 //Look through writebacks for any non-uncachable writes, use that
416 for (int i=0; i<writebacks.size(); i++) {
417 mshr = writebacks[i];
418
419 if (!mshr->pkt->req->isUncacheable()) {
420 if (pkt->isRead()) {
421 //Only Upgrades don't get here
422 //Supply the data
423 pkt->flags |= SATISFIED;
424
425 //If we are in an exclusive protocol, make it ask again
426 //to get write permissions (upgrade), signal shared
427 pkt->flags |= SHARED_LINE;
428
429 assert(pkt->isRead());
430 Addr offset = pkt->getAddr() & ~(blkSize - 1);
431 assert(offset < blkSize);
432 assert(pkt->getSize() <= blkSize);
433 assert(offset + pkt->getSize() <=blkSize);
434 memcpy(pkt->getPtr<uint8_t>(), mshr->pkt->getPtr<uint8_t>() + offset, pkt->getSize());
435
436 respondToSnoop(pkt);
437 }
438
439 if (pkt->isInvalidate()) {
440 //This must be an upgrade or other cache will take ownership
441 missQueue->markInService(mshr->pkt);
442 }
443 return;
444 }
445 }
446 }
447 }
448 CacheBlk::State new_state;
449 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
450 if (satisfy) {
451 tags->handleSnoop(blk, new_state, pkt);
452 respondToSnoop(pkt);
453 return;
454 }
455 tags->handleSnoop(blk, new_state);
456 }
457
458 template<class TagStore, class Buffering, class Coherence>
459 void
460 Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
461 {
462 //Need to handle the response, if NACKED
463 if (pkt->flags & NACKED_LINE) {
464 //Need to mark it as not in service, and retry for bus
465 assert(0); //Yeah, we saw a NACK come through
466
467 //For now this should never get called, we return false when we see a NACK
468 //instead, by doing this we allow the bus_blocked mechanism to handle the retry
469 //For now it retrys in just 2 cycles, need to figure out how to change that
470 //Eventually we will want to also have success come in as a parameter
471 //Need to make sure that we handle the functionality that happens on successufl
472 //return of the sendAddr function
473 }
474 }
475
476 template<class TagStore, class Buffering, class Coherence>
477 void
478 Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr)
479 {
480 tags->invalidateBlk(addr);
481 }
482
483
484 /**
485 * @todo Fix to not assume write allocate
486 */
487 template<class TagStore, class Buffering, class Coherence>
488 Tick
489 Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
490 {
491 // MemDebug::cacheProbe(pkt);
492 if (!pkt->req->isUncacheable()) {
493 if (pkt->isInvalidate() && !pkt->isRead()
494 && !pkt->isWrite()) {
495 //Upgrade or Invalidate, satisfy it, don't forward
496 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
497 pkt->getAddr() & (((ULL(1))<<48)-1),
498 pkt->getAddr() & ~((Addr)blkSize - 1));
499 pkt->flags |= SATISFIED;
500 return 0;
501 }
502 }
503
504 PacketList writebacks;
505 int lat;
506 BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
507
508 if (!blk) {
509 // Need to check for outstanding misses and writes
510 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
511
512 // There can only be one matching outstanding miss.
513 MSHR* mshr = missQueue->findMSHR(blk_addr);
514
515 // There can be many matching outstanding writes.
516 std::vector<MSHR*> writes;
517 missQueue->findWrites(blk_addr, writes);
518
519 if (!update) {
520 memSidePort->sendFunctional(pkt);
521 // Check for data in MSHR and writebuffer.
522 if (mshr) {
523 warn("Found outstanding miss on an non-update probe");
524 MSHR::TargetList *targets = mshr->getTargetList();
525 MSHR::TargetList::iterator i = targets->begin();
526 MSHR::TargetList::iterator end = targets->end();
527 for (; i != end; ++i) {
528 Packet * target = *i;
529 // If the target contains data, and it overlaps the
530 // probed request, need to update data
531 if (target->isWrite() && target->intersect(pkt)) {
532 uint8_t* pkt_data;
533 uint8_t* write_data;
534 int data_size;
535 if (target->getAddr() < pkt->getAddr()) {
536 int offset = pkt->getAddr() - target->getAddr();
537 pkt_data = pkt->getPtr<uint8_t>();
538 write_data = target->getPtr<uint8_t>() + offset;
539 data_size = target->getSize() - offset;
540 assert(data_size > 0);
541 if (data_size > pkt->getSize())
542 data_size = pkt->getSize();
543 } else {
544 int offset = target->getAddr() - pkt->getAddr();
545 pkt_data = pkt->getPtr<uint8_t>() + offset;
546 write_data = target->getPtr<uint8_t>();
547 data_size = pkt->getSize() - offset;
548 assert(data_size > pkt->getSize());
549 if (data_size > target->getSize())
550 data_size = target->getSize();
551 }
552
553 if (pkt->isWrite()) {
554 memcpy(pkt_data, write_data, data_size);
555 } else {
556 memcpy(write_data, pkt_data, data_size);
557 }
558 }
559 }
560 }
561 for (int i = 0; i < writes.size(); ++i) {
562 Packet * write = writes[i]->pkt;
563 if (write->intersect(pkt)) {
564 warn("Found outstanding write on an non-update probe");
565 uint8_t* pkt_data;
566 uint8_t* write_data;
567 int data_size;
568 if (write->getAddr() < pkt->getAddr()) {
569 int offset = pkt->getAddr() - write->getAddr();
570 pkt_data = pkt->getPtr<uint8_t>();
571 write_data = write->getPtr<uint8_t>() + offset;
572 data_size = write->getSize() - offset;
573 assert(data_size > 0);
574 if (data_size > pkt->getSize())
575 data_size = pkt->getSize();
576 } else {
577 int offset = write->getAddr() - pkt->getAddr();
578 pkt_data = pkt->getPtr<uint8_t>() + offset;
579 write_data = write->getPtr<uint8_t>();
580 data_size = pkt->getSize() - offset;
581 assert(data_size > pkt->getSize());
582 if (data_size > write->getSize())
583 data_size = write->getSize();
584 }
585
586 if (pkt->isWrite()) {
587 memcpy(pkt_data, write_data, data_size);
588 } else {
589 memcpy(write_data, pkt_data, data_size);
590 }
591
592 }
593 }
594 return 0;
595 } else {
596 // update the cache state and statistics
597 if (mshr || !writes.empty()){
598 // Can't handle it, return pktuest unsatisfied.
599 return 0;
600 }
601 if (!pkt->req->isUncacheable()) {
602 // Fetch the cache block to fill
603 BlkType *blk = tags->findBlock(pkt);
604 Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
605 (blk)? blk->status : 0);
606
607 Packet * busPkt = new Packet(pkt->req,temp_cmd, -1, blkSize);
608
609 busPkt->allocate();
610
611 busPkt->time = curTick;
612
613 lat = memSidePort->sendAtomic(busPkt);
614
615 /* if (!(busPkt->flags & SATISFIED)) {
616 // blocked at a higher level, just return
617 return 0;
618 }
619
620 */ misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
621
622 CacheBlk::State old_state = (blk) ? blk->status : 0;
623 tags->handleFill(blk, busPkt,
624 coherence->getNewState(busPkt, old_state),
625 writebacks, pkt);
626 // Handle writebacks if needed
627 while (!writebacks.empty()){
628 memSidePort->sendAtomic(writebacks.front());
629 writebacks.pop_front();
630 }
631 return lat + hitLatency;
632 } else {
633 return memSidePort->sendAtomic(pkt);
634 }
635 }
636 } else {
637 // There was a cache hit.
638 // Handle writebacks if needed
639 while (!writebacks.empty()){
640 memSidePort->sendAtomic(writebacks.front());
641 writebacks.pop_front();
642 }
643
644 if (update) {
645 hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
646 } else if (pkt->isWrite()) {
647 // Still need to change data in all locations.
648 return memSidePort->sendAtomic(pkt);
649 }
650 return curTick + lat;
651 }
652 fatal("Probe not handled.\n");
653 return 0;
654 }
655
656 template<class TagStore, class Buffering, class Coherence>
657 Tick
658 Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt, bool update)
659 {
660 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
661 BlkType *blk = tags->findBlock(pkt);
662 MSHR *mshr = missQueue->findMSHR(blk_addr);
663 CacheBlk::State new_state = 0;
664 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
665 if (satisfy) {
666 tags->handleSnoop(blk, new_state, pkt);
667 return hitLatency;
668 }
669 tags->handleSnoop(blk, new_state);
670 return 0;
671 }
672