Was having difficulty with merging the cache, reverted to an early version and will...
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 */
32
33 /**
34 * @file
35 * Cache definitions.
36 */
37
38 #include <assert.h>
39 #include <math.h>
40
41 #include <cassert>
42 #include <iostream>
43 #include <string>
44
45 #include "sim/host.hh"
46 #include "base/misc.hh"
47 #include "cpu/smt.hh"
48
49 #include "mem/cache/cache.hh"
50 #include "mem/cache/cache_blk.hh"
51 #include "mem/cache/miss/mshr.hh"
52 #include "mem/cache/prefetch/prefetcher.hh"
53
54 #include "mem/bus/bus.hh"
55
56 #include "mem/bus/slave_interface.hh"
57 #include "mem/memory_interface.hh"
58 #include "mem/bus/master_interface.hh"
59
60 #include "mem/mem_debug.hh"
61
62 #include "sim/sim_events.hh" // for SimExitEvent
63
64 using namespace std;
65
66 template<class TagStore, class Buffering, class Coherence>
67 bool
68 Cache<TagStore,Buffering,Coherence>::
69 doTimingAccess(Packet *pkt, MemoryPort *memoryPort, bool isCpuSide)
70 {
71 if (isCpuSide)
72 {
73 access(pkt);
74 }
75 else
76 {
77 if (pkt->isRespnse())
78 handleResponse(pkt);
79 else
80 snoop(pkt);
81 }
82 }
83
84 template<class TagStore, class Buffering, class Coherence>
85 Tick
86 Cache<TagStore,Buffering,Coherence>::
87 doAtomicAccess(Packet *pkt, MemoryPort *memoryPort, bool isCpuSide)
88 {
89 if (isCpuSide)
90 {
91 probe(pkt, true);
92 }
93 else
94 {
95 if (pkt->isRespnse())
96 handleResponse(pkt);
97 else
98 snoopProbe(pkt, true);
99 }
100 }
101
102 template<class TagStore, class Buffering, class Coherence>
103 void
104 Cache<TagStore,Buffering,Coherence>::
105 doFunctionalAccess(Packet *pkt, MemoryPort *memoryPort, bool isCpuSide)
106 {
107 if (isCpuSide)
108 {
109 probe(pkt, false);
110 }
111 else
112 {
113 if (pkt->isRespnse())
114 handleResponse(pkt);
115 else
116 snoopProbe(pkt, false);
117 }
118 }
119
120 template<class TagStore, class Buffering, class Coherence>
121 void
122 Cache<TagStore,Buffering,Coherence>::
123 recvStatusChange(Port::Status status, bool isCpuSide)
124 {
125
126 }
127
128
129 template<class TagStore, class Buffering, class Coherence>
130 Cache<TagStore,Buffering,Coherence>::
131 Cache(const std::string &_name, HierParams *hier_params,
132 Cache<TagStore,Buffering,Coherence>::Params &params)
133 : BaseCache(_name, hier_params, params.baseParams),
134 prefetchAccess(params.prefetchAccess),
135 tags(params.tags), missQueue(params.missQueue),
136 coherence(params.coherence), prefetcher(params.prefetcher),
137 doCopy(params.doCopy), blockOnCopy(params.blockOnCopy)
138 {
139 if (params.in == NULL) {
140 topLevelCache = true;
141 }
142 tags->setCache(this, params.out->width, params.out->clockRate);
143 tags->setPrefetcher(prefetcher);
144 missQueue->setCache(this);
145 missQueue->setPrefetcher(prefetcher);
146 coherence->setCache(this);
147 prefetcher->setCache(this);
148 prefetcher->setTags(tags);
149 prefetcher->setBuffer(missQueue);
150 invalidatePkt = new Packet;
151 invalidatePkt->cmd = Invalidate;
152 }
153
154 template<class TagStore, class Buffering, class Coherence>
155 void
156 Cache<TagStore,Buffering,Coherence>::regStats()
157 {
158 BaseCache::regStats();
159 tags->regStats(name());
160 missQueue->regStats(name());
161 coherence->regStats(name());
162 prefetcher->regStats(name());
163 }
164
165 template<class TagStore, class Buffering, class Coherence>
166 MemAccessResult
167 Cache<TagStore,Buffering,Coherence>::access(Packet &pkt)
168 {
169 MemDebug::cacheAccess(pkt);
170 BlkType *blk = NULL;
171 PacketList* writebacks;
172 int size = blkSize;
173 int lat = hitLatency;
174 if (prefetchAccess) {
175 //We are determining prefetches on access stream, call prefetcher
176 prefetcher->handleMiss(pkt, curTick);
177 }
178 if (!pkt->isUncacheable()) {
179 if (pkt->cmd.isInvalidate() && !pkt->cmd.isRead()
180 && !pkt->cmd.isWrite()) {
181 //Upgrade or Invalidate
182 //Look into what happens if two slave caches on bus
183 DPRINTF(Cache, "%s %d %x ? blk_addr: %x\n", pkt->cmd.toString(),
184 pkt->req->asid, pkt->paddr & (((ULL(1))<<48)-1),
185 pkt->paddr & ~((Addr)blkSize - 1));
186
187 //@todo Should this return latency have the hit latency in it?
188 // respond(pkt,curTick+lat);
189 pkt->flags |= SATISFIED;
190 return MA_HIT;
191 }
192 blk = tags->handleAccess(pkt, lat, writebacks);
193 } else {
194 size = pkt->size;
195 }
196 // If this is a block size write/hint (WH64) allocate the block here
197 // if the coherence protocol allows it.
198 /** @todo make the fast write alloc (wh64) work with coherence. */
199 /** @todo Do we want to do fast writes for writebacks as well? */
200 if (!blk && pkt->size >= blkSize && coherence->allowFastWrites() &&
201 (pkt->cmd == Write || pkt->cmd == WriteInvalidate) ) {
202 // not outstanding misses, can do this
203 MSHR* outstanding_miss = missQueue->findMSHR(pkt->paddr, pkt->req->asid);
204 if (pkt->cmd ==WriteInvalidate || !outstanding_miss) {
205 if (outstanding_miss) {
206 warn("WriteInv doing a fastallocate"
207 "with an outstanding miss to the same address\n");
208 }
209 blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
210 writebacks);
211 ++fastWrites;
212 }
213 }
214 while (!writebacks.empty()) {
215 missQueue->doWriteback(writebacks.front());
216 writebacks.pop_front();
217 }
218 DPRINTF(Cache, "%s %d %x %s blk_addr: %x pc %x\n", pkt->cmd.toString(),
219 pkt->req->asid, pkt->paddr & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
220 pkt->paddr & ~((Addr)blkSize - 1), pkt->pc);
221 if (blk) {
222 // Hit
223 hits[pkt->cmd.toIndex()][pkt->thread_num]++;
224 // clear dirty bit if write through
225 if (!pkt->cmd.isNoResponse())
226 respond(pkt, curTick+lat);
227 return MA_HIT;
228 }
229
230 // Miss
231 if (!pkt->isUncacheable()) {
232 misses[pkt->cmd.toIndex()][pkt->thread_num]++;
233 /** @todo Move miss count code into BaseCache */
234 if (missCount) {
235 --missCount;
236 if (missCount == 0)
237 new SimExitEvent("A cache reached the maximum miss count");
238 }
239 }
240 missQueue->handleMiss(pkt, size, curTick + hitLatency);
241 return MA_CACHE_MISS;
242 }
243
244
245 template<class TagStore, class Buffering, class Coherence>
246 Packet *
247 Cache<TagStore,Buffering,Coherence>::getPacket()
248 {
249 Packet * pkt = missQueue->getPacket();
250 if (pkt) {
251 if (!pkt->isUncacheable()) {
252 if (pkt->cmd == Hard_Prefetch) misses[Hard_Prefetch][pkt->thread_num]++;
253 BlkType *blk = tags->findBlock(pkt);
254 Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
255 (blk)? blk->status : 0);
256 missQueue->setBusCmd(pkt, cmd);
257 }
258 }
259
260 assert(!doMasterPktuest() || missQueue->havePending());
261 assert(!pkt || pkt->time <= curTick);
262 return pkt;
263 }
264
265 template<class TagStore, class Buffering, class Coherence>
266 void
267 Cache<TagStore,Buffering,Coherence>::sendResult(MemPktPtr &pkt, bool success)
268 {
269 if (success) {
270 missQueue->markInService(pkt);
271 //Temp Hack for UPGRADES
272 if (pkt->cmd == Upgrade) {
273 handleResponse(pkt);
274 }
275 } else if (pkt && !pkt->isUncacheable()) {
276 missQueue->restoreOrigCmd(pkt);
277 }
278 }
279
280 template<class TagStore, class Buffering, class Coherence>
281 void
282 Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
283 {
284 BlkType *blk = NULL;
285 if (pkt->senderState) {
286 MemDebug::cacheResponse(pkt);
287 DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->paddr,
288 pkt->paddr & (((ULL(1))<<48)-1));
289
290 if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
291 blk = tags->findBlock(pkt);
292 CacheBlk::State old_state = (blk) ? blk->status : 0;
293 MemPktList writebacks;
294 blk = tags->handleFill(blk, pkt->senderState,
295 coherence->getNewState(pkt,old_state),
296 writebacks);
297 while (!writebacks.empty()) {
298 missQueue->doWriteback(writebacks.front());
299 }
300 }
301 missQueue->handleResponse(pkt, curTick + hitLatency);
302 }
303 }
304
305 template<class TagStore, class Buffering, class Coherence>
306 void
307 Cache<TagStore,Buffering,Coherence>::pseudoFill(Addr addr, int asid)
308 {
309 // Need to temporarily move this blk into MSHRs
310 MSHR *mshr = missQueue->allocateTargetList(addr, asid);
311 int lat;
312 PacketList* dummy;
313 // Read the data into the mshr
314 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
315 assert(dummy.empty());
316 assert(mshr->pkt->isSatisfied());
317 // can overload order since it isn't used on non pending blocks
318 mshr->order = blk->status;
319 // temporarily remove the block from the cache.
320 tags->invalidateBlk(addr, asid);
321 }
322
323 template<class TagStore, class Buffering, class Coherence>
324 void
325 Cache<TagStore,Buffering,Coherence>::pseudoFill(MSHR *mshr)
326 {
327 // Need to temporarily move this blk into MSHRs
328 assert(mshr->pkt->cmd == Read);
329 int lat;
330 PacketList* dummy;
331 // Read the data into the mshr
332 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
333 assert(dummy.empty());
334 assert(mshr->pkt->isSatisfied());
335 // can overload order since it isn't used on non pending blocks
336 mshr->order = blk->status;
337 // temporarily remove the block from the cache.
338 tags->invalidateBlk(mshr->pkt->paddr, mshr->pkt->req->asid);
339 }
340
341
342 template<class TagStore, class Buffering, class Coherence>
343 Packet *
344 Cache<TagStore,Buffering,Coherence>::getCoherenceReq()
345 {
346 return coherence->getPacket();
347 }
348
349
350 template<class TagStore, class Buffering, class Coherence>
351 void
352 Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
353 {
354 Addr blk_addr = pkt->paddr & ~(Addr(blkSize-1));
355 BlkType *blk = tags->findBlock(pkt);
356 MSHR *mshr = missQueue->findMSHR(blk_addr, pkt->req->asid);
357 if (isTopLevel() && coherence->hasProtocol()) { //@todo Move this into handle bus req
358 //If we find an mshr, and it is in service, we need to NACK or invalidate
359 if (mshr) {
360 if (mshr->inService) {
361 if ((mshr->pkt->cmd.isInvalidate() || !mshr->pkt->isCacheFill())
362 && (pkt->cmd != Invalidate && pkt->cmd != WriteInvalidate)) {
363 //If the outstanding request was an invalidate (upgrade,readex,..)
364 //Then we need to ACK the request until we get the data
365 //Also NACK if the outstanding request is not a cachefill (writeback)
366 pkt->flags |= NACKED_LINE;
367 return;
368 }
369 else {
370 //The supplier will be someone else, because we are waiting for
371 //the data. This should cause this cache to be forced to go to
372 //the shared state, not the exclusive even though the shared line
373 //won't be asserted. But for now we will just invlidate ourselves
374 //and allow the other cache to go into the exclusive state.
375 //@todo Make it so a read to a pending read doesn't invalidate.
376 //@todo Make it so that a read to a pending read can't be exclusive now.
377
378 //Set the address so find match works
379 invalidatePkt->paddr = pkt->paddr;
380
381 //Append the invalidate on
382 missQueue->addTarget(mshr,invalidatePkt);
383 DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n", pkt->paddr & (((ULL(1))<<48)-1));
384 return;
385 }
386 }
387 }
388 //We also need to check the writeback buffers and handle those
389 std::vector<MSHR *> writebacks;
390 if (missQueue->findWrites(blk_addr, pkt->req->asid, writebacks)) {
391 DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n", pkt->paddr & (((ULL(1))<<48)-1));
392
393 //Look through writebacks for any non-uncachable writes, use that
394 for (int i=0; i<writebacks.size(); i++) {
395 mshr = writebacks[i];
396
397 if (!mshr->pkt->isUncacheable()) {
398 if (pkt->cmd.isRead()) {
399 //Only Upgrades don't get here
400 //Supply the data
401 pkt->flags |= SATISFIED;
402
403 //If we are in an exclusive protocol, make it ask again
404 //to get write permissions (upgrade), signal shared
405 pkt->flags |= SHARED_LINE;
406
407 if (doData()) {
408 assert(pkt->cmd.isRead());
409
410 assert(pkt->offset < blkSize);
411 assert(pkt->size <= blkSize);
412 assert(pkt->offset + pkt->size <=blkSize);
413 memcpy(pkt->data, mshr->pkt->data + pkt->offset, pkt->size);
414 }
415 respondToSnoop(pkt);
416 }
417
418 if (pkt->cmd.isInvalidate()) {
419 //This must be an upgrade or other cache will take ownership
420 missQueue->markInService(mshr->pkt);
421 }
422 return;
423 }
424 }
425 }
426 }
427 CacheBlk::State new_state;
428 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
429 if (satisfy) {
430 tags->handleSnoop(blk, new_state, pkt);
431 respondToSnoop(pkt);
432 return;
433 }
434 tags->handleSnoop(blk, new_state);
435 }
436
437 template<class TagStore, class Buffering, class Coherence>
438 void
439 Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
440 {
441 //Need to handle the response, if NACKED
442 if (pkt->isNacked()) {
443 //Need to mark it as not in service, and retry for bus
444 assert(0); //Yeah, we saw a NACK come through
445
446 //For now this should never get called, we return false when we see a NACK
447 //instead, by doing this we allow the bus_blocked mechanism to handle the retry
448 //For now it retrys in just 2 cycles, need to figure out how to change that
449 //Eventually we will want to also have success come in as a parameter
450 //Need to make sure that we handle the functionality that happens on successufl
451 //return of the sendAddr function
452 }
453 }
454
455 template<class TagStore, class Buffering, class Coherence>
456 void
457 Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr, int asid)
458 {
459 tags->invalidateBlk(addr,asid);
460 }
461
462
463 /**
464 * @todo Fix to not assume write allocate
465 */
466 template<class TagStore, class Buffering, class Coherence>
467 Tick
468 Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
469 {
470 MemDebug::cacheProbe(pkt);
471
472 if (!pkt->isUncacheable()) {
473 if (pkt->cmd.isInvalidate() && !pkt->cmd.isRead()
474 && !pkt->cmd.isWrite()) {
475 //Upgrade or Invalidate, satisfy it, don't forward
476 DPRINTF(Cache, "%s %d %x ? blk_addr: %x\n", pkt->cmd.toString(),
477 pkt->req->asid, pkt->paddr & (((ULL(1))<<48)-1),
478 pkt->paddr & ~((Addr)blkSize - 1));
479 pkt->flags |= SATISFIED;
480 return 0;
481 }
482 }
483
484 if (!update && !doData()) {
485 // Nothing to do here
486 return mi->sendProbe(pkt,update);
487 }
488
489 PacketList* writebacks;
490 int lat;
491 BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
492
493 if (!blk) {
494 // Need to check for outstanding misses and writes
495 Addr blk_addr = pkt->paddr & ~(blkSize - 1);
496
497 // There can only be one matching outstanding miss.
498 MSHR* mshr = missQueue->findMSHR(blk_addr, pkt->req->asid);
499
500 // There can be many matching outstanding writes.
501 vector<MSHR*> writes;
502 missQueue->findWrites(blk_addr, pkt->req->asid, writes);
503
504 if (!update) {
505 mi->sendProbe(pkt, update);
506 // Check for data in MSHR and writebuffer.
507 if (mshr) {
508 warn("Found outstanding miss on an non-update probe");
509 MSHR::TargetList *targets = mshr->getTargetList();
510 MSHR::TargetList::iterator i = targets->begin();
511 MSHR::TargetList::iterator end = targets->end();
512 for (; i != end; ++i) {
513 Packet * target = *i;
514 // If the target contains data, and it overlaps the
515 // probed request, need to update data
516 if (target->cmd.isWrite() && target->overlaps(pkt)) {
517 uint8_t* pkt_data;
518 uint8_t* write_data;
519 int data_size;
520 if (target->paddr < pkt->paddr) {
521 int offset = pkt->paddr - target->paddr;
522 pkt_data = pkt->data;
523 write_data = target->data + offset;
524 data_size = target->size - offset;
525 assert(data_size > 0);
526 if (data_size > pkt->size)
527 data_size = pkt->size;
528 } else {
529 int offset = target->paddr - pkt->paddr;
530 pkt_data = pkt->data + offset;
531 write_data = target->data;
532 data_size = pkt->size - offset;
533 assert(data_size > pkt->size);
534 if (data_size > target->size)
535 data_size = target->size;
536 }
537
538 if (pkt->cmd.isWrite()) {
539 memcpy(pkt_data, write_data, data_size);
540 } else {
541 memcpy(write_data, pkt_data, data_size);
542 }
543 }
544 }
545 }
546 for (int i = 0; i < writes.size(); ++i) {
547 Packet * write = writes[i]->pkt;
548 if (write->overlaps(pkt)) {
549 warn("Found outstanding write on an non-update probe");
550 uint8_t* pkt_data;
551 uint8_t* write_data;
552 int data_size;
553 if (write->paddr < pkt->paddr) {
554 int offset = pkt->paddr - write->paddr;
555 pkt_data = pkt->data;
556 write_data = write->data + offset;
557 data_size = write->size - offset;
558 assert(data_size > 0);
559 if (data_size > pkt->size)
560 data_size = pkt->size;
561 } else {
562 int offset = write->paddr - pkt->paddr;
563 pkt_data = pkt->data + offset;
564 write_data = write->data;
565 data_size = pkt->size - offset;
566 assert(data_size > pkt->size);
567 if (data_size > write->size)
568 data_size = write->size;
569 }
570
571 if (pkt->cmd.isWrite()) {
572 memcpy(pkt_data, write_data, data_size);
573 } else {
574 memcpy(write_data, pkt_data, data_size);
575 }
576
577 }
578 }
579 return 0;
580 } else {
581 // update the cache state and statistics
582 if (mshr || !writes.empty()){
583 // Can't handle it, return pktuest unsatisfied.
584 return 0;
585 }
586 if (!pkt->isUncacheable()) {
587 // Fetch the cache block to fill
588 Packet * busPkt = new MemPkt();
589 busPkt->paddr = blk_addr;
590 busPkt->size = blkSize;
591 busPkt->data = new uint8_t[blkSize];
592
593 BlkType *blk = tags->findBlock(pkt);
594 busPkt->cmd = coherence->getBusCmd(pkt->cmd,
595 (blk)? blk->status : 0);
596
597 busPkt->req->asid = pkt->req->asid;
598 busPkt->xc = pkt->xc;
599 busPkt->thread_num = pkt->thread_num;
600 busPkt->time = curTick;
601
602 lat = mi->sendProbe(busPkt, update);
603
604 if (!busPkt->isSatisfied()) {
605 // blocked at a higher level, just return
606 return 0;
607 }
608
609 misses[pkt->cmd.toIndex()][pkt->thread_num]++;
610
611 CacheBlk::State old_state = (blk) ? blk->status : 0;
612 tags->handleFill(blk, busPkt,
613 coherence->getNewState(busPkt, old_state),
614 writebacks, pkt);
615 // Handle writebacks if needed
616 while (!writebacks.empty()){
617 mi->sendProbe(writebacks.front(), update);
618 writebacks.pop_front();
619 }
620 return lat + hitLatency;
621 } else {
622 return mi->sendProbe(pkt,update);
623 }
624 }
625 } else {
626 // There was a cache hit.
627 // Handle writebacks if needed
628 while (!writebacks.empty()){
629 mi->sendProbe(writebacks.front(), update);
630 writebacks.pop_front();
631 }
632
633 if (update) {
634 hits[pkt->cmd.toIndex()][pkt->thread_num]++;
635 } else if (pkt->cmd.isWrite()) {
636 // Still need to change data in all locations.
637 return mi->sendProbe(pkt, update);
638 }
639 return curTick + lat;
640 }
641 fatal("Probe not handled.\n");
642 return 0;
643 }
644
645 template<class TagStore, class Buffering, class Coherence>
646 Tick
647 Cache<TagStore,Buffering,Coherence>::snoopProbe(MemPktPtr &pkt, bool update)
648 {
649 Addr blk_addr = pkt->paddr & ~(Addr(blkSize-1));
650 BlkType *blk = tags->findBlock(pkt);
651 MSHR *mshr = missQueue->findMSHR(blk_addr, pkt->req->asid);
652 CacheBlk::State new_state = 0;
653 bool satisfy = coherence->handleBusPktuest(pkt,blk,mshr, new_state);
654 if (satisfy) {
655 tags->handleSnoop(blk, new_state, pkt);
656 return hitLatency;
657 }
658 tags->handleSnoop(blk, new_state);
659 return 0;
660 }
661