First pass at snooping stuff that compiles and doesn't break.
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 */
32
33 /**
34 * @file
35 * Cache definitions.
36 */
37
38 #include <assert.h>
39 #include <math.h>
40
41 #include <cassert>
42 #include <iostream>
43 #include <string>
44
45 #include "sim/host.hh"
46 #include "base/misc.hh"
47 #include "cpu/smt.hh"
48
49 #include "mem/cache/cache.hh"
50 #include "mem/cache/cache_blk.hh"
51 #include "mem/cache/miss/mshr.hh"
52 #include "mem/cache/prefetch/prefetcher.hh"
53
54 #include "sim/sim_events.hh" // for SimExitEvent
55
56 template<class TagStore, class Buffering, class Coherence>
57 bool
58 Cache<TagStore,Buffering,Coherence>::
59 doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
60 {
61 if (isCpuSide)
62 {
63 if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) {
64 pkt->req->setScResult(1);
65 }
66 if (!(pkt->flags & SATISFIED)) {
67 access(pkt);
68 }
69 }
70 else
71 {
72 if (pkt->isResponse())
73 handleResponse(pkt);
74 else {
75 //Check if we are in phase1
76 if (!snoopPhase2) {
77 snoopPhase2 = true;
78 }
79 else {
80 //Check if we should do the snoop
81 if (pkt->flags && SNOOP_COMMIT)
82 snoop(pkt);
83 snoopPhase2 = false;
84 }
85 }
86 }
87 return true;
88 }
89
90 template<class TagStore, class Buffering, class Coherence>
91 Tick
92 Cache<TagStore,Buffering,Coherence>::
93 doAtomicAccess(Packet *pkt, bool isCpuSide)
94 {
95 if (isCpuSide)
96 {
97 //Temporary solution to LL/SC
98 if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) {
99 pkt->req->setScResult(1);
100 }
101
102 probe(pkt, true);
103 //TEMP ALWAYS SUCCES FOR NOW
104 pkt->result = Packet::Success;
105 }
106 else
107 {
108 if (pkt->isResponse())
109 handleResponse(pkt);
110 else
111 snoopProbe(pkt, true);
112 }
113 //Fix this timing info
114 return hitLatency;
115 }
116
117 template<class TagStore, class Buffering, class Coherence>
118 void
119 Cache<TagStore,Buffering,Coherence>::
120 doFunctionalAccess(Packet *pkt, bool isCpuSide)
121 {
122 if (isCpuSide)
123 {
124 //TEMP USE CPU?THREAD 0 0
125 pkt->req->setThreadContext(0,0);
126
127 //Temporary solution to LL/SC
128 if (pkt->isWrite() && (pkt->req->getFlags() & LOCKED)) {
129 assert("Can't handle LL/SC on functional path\n");
130 }
131
132 probe(pkt, false);
133 //TEMP ALWAYS SUCCESFUL FOR NOW
134 pkt->result = Packet::Success;
135 }
136 else
137 {
138 if (pkt->isResponse())
139 handleResponse(pkt);
140 else
141 snoopProbe(pkt, false);
142 }
143 }
144
145 template<class TagStore, class Buffering, class Coherence>
146 void
147 Cache<TagStore,Buffering,Coherence>::
148 recvStatusChange(Port::Status status, bool isCpuSide)
149 {
150
151 }
152
153
154 template<class TagStore, class Buffering, class Coherence>
155 Cache<TagStore,Buffering,Coherence>::
156 Cache(const std::string &_name,
157 Cache<TagStore,Buffering,Coherence>::Params &params)
158 : BaseCache(_name, params.baseParams),
159 prefetchAccess(params.prefetchAccess),
160 tags(params.tags), missQueue(params.missQueue),
161 coherence(params.coherence), prefetcher(params.prefetcher),
162 doCopy(params.doCopy), blockOnCopy(params.blockOnCopy)
163 {
164 //FIX BUS POINTERS
165 // if (params.in == NULL) {
166 topLevelCache = true;
167 // }
168 //PLEASE FIX THIS, BUS SIZES NOT BEING USED
169 tags->setCache(this, blkSize, 1/*params.out->width, params.out->clockRate*/);
170 tags->setPrefetcher(prefetcher);
171 missQueue->setCache(this);
172 missQueue->setPrefetcher(prefetcher);
173 coherence->setCache(this);
174 prefetcher->setCache(this);
175 prefetcher->setTags(tags);
176 prefetcher->setBuffer(missQueue);
177 #if 0
178 invalidatePkt = new Packet;
179 invalidatePkt->cmd = Packet::InvalidateReq;
180 #endif
181 }
182
183 template<class TagStore, class Buffering, class Coherence>
184 void
185 Cache<TagStore,Buffering,Coherence>::regStats()
186 {
187 BaseCache::regStats();
188 tags->regStats(name());
189 missQueue->regStats(name());
190 coherence->regStats(name());
191 prefetcher->regStats(name());
192 }
193
194 template<class TagStore, class Buffering, class Coherence>
195 bool
196 Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
197 {
198 //@todo Add back in MemDebug Calls
199 // MemDebug::cacheAccess(pkt);
200 BlkType *blk = NULL;
201 PacketList writebacks;
202 int size = blkSize;
203 int lat = hitLatency;
204 if (prefetchAccess) {
205 //We are determining prefetches on access stream, call prefetcher
206 prefetcher->handleMiss(pkt, curTick);
207 }
208 if (!pkt->req->isUncacheable()) {
209 if (pkt->isInvalidate() && !pkt->isRead()
210 && !pkt->isWrite()) {
211 //Upgrade or Invalidate
212 //Look into what happens if two slave caches on bus
213 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
214 pkt->getAddr() & (((ULL(1))<<48)-1),
215 pkt->getAddr() & ~((Addr)blkSize - 1));
216
217 //@todo Should this return latency have the hit latency in it?
218 // respond(pkt,curTick+lat);
219 pkt->flags |= SATISFIED;
220 // return MA_HIT; //@todo, return values
221 return true;
222 }
223 blk = tags->handleAccess(pkt, lat, writebacks);
224 } else {
225 size = pkt->getSize();
226 }
227 // If this is a block size write/hint (WH64) allocate the block here
228 // if the coherence protocol allows it.
229 /** @todo make the fast write alloc (wh64) work with coherence. */
230 /** @todo Do we want to do fast writes for writebacks as well? */
231 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
232 (pkt->cmd == Packet::WriteReq || pkt->cmd == Packet::WriteInvalidateReq) ) {
233 // not outstanding misses, can do this
234 MSHR* outstanding_miss = missQueue->findMSHR(pkt->getAddr());
235 if (pkt->cmd == Packet::WriteInvalidateReq || !outstanding_miss) {
236 if (outstanding_miss) {
237 warn("WriteInv doing a fastallocate"
238 "with an outstanding miss to the same address\n");
239 }
240 blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
241 writebacks);
242 ++fastWrites;
243 }
244 }
245 while (!writebacks.empty()) {
246 missQueue->doWriteback(writebacks.front());
247 writebacks.pop_front();
248 }
249 DPRINTF(Cache, "%s %x %s blk_addr: %x pc %x\n", pkt->cmdString(),
250 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
251 pkt->getAddr() & ~((Addr)blkSize - 1), pkt->req->getPC());
252 if (blk) {
253 // Hit
254 hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
255 // clear dirty bit if write through
256 if (pkt->needsResponse())
257 respond(pkt, curTick+lat);
258 // return MA_HIT;
259 return true;
260 }
261
262 // Miss
263 if (!pkt->req->isUncacheable()) {
264 misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
265 /** @todo Move miss count code into BaseCache */
266 if (missCount) {
267 --missCount;
268 if (missCount == 0)
269 new SimLoopExitEvent(curTick, "A cache reached the maximum miss count");
270 }
271 }
272 missQueue->handleMiss(pkt, size, curTick + hitLatency);
273 // return MA_CACHE_MISS;
274 return true;
275 }
276
277
278 template<class TagStore, class Buffering, class Coherence>
279 Packet *
280 Cache<TagStore,Buffering,Coherence>::getPacket()
281 {
282 Packet * pkt = missQueue->getPacket();
283 if (pkt) {
284 if (!pkt->req->isUncacheable()) {
285 if (pkt->cmd == Packet::HardPFReq) misses[Packet::HardPFReq][pkt->req->getThreadNum()]++;
286 BlkType *blk = tags->findBlock(pkt);
287 Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
288 (blk)? blk->status : 0);
289 missQueue->setBusCmd(pkt, cmd);
290 }
291 }
292
293 assert(!doMasterRequest() || missQueue->havePending());
294 assert(!pkt || pkt->time <= curTick);
295 return pkt;
296 }
297
298 template<class TagStore, class Buffering, class Coherence>
299 void
300 Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, bool success)
301 {
302 if (success) {
303 missQueue->markInService(pkt);
304 //Temp Hack for UPGRADES
305 if (pkt->cmd == Packet::UpgradeReq) {
306 handleResponse(pkt);
307 }
308 } else if (pkt && !pkt->req->isUncacheable()) {
309 missQueue->restoreOrigCmd(pkt);
310 }
311 }
312
313 template<class TagStore, class Buffering, class Coherence>
314 void
315 Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
316 {
317 BlkType *blk = NULL;
318 if (pkt->senderState) {
319 // MemDebug::cacheResponse(pkt);
320 DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->getAddr(),
321 pkt->getAddr() & (((ULL(1))<<48)-1));
322
323 if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
324 blk = tags->findBlock(pkt);
325 CacheBlk::State old_state = (blk) ? blk->status : 0;
326 PacketList writebacks;
327 blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
328 coherence->getNewState(pkt,old_state),
329 writebacks);
330 while (!writebacks.empty()) {
331 missQueue->doWriteback(writebacks.front());
332 }
333 }
334 missQueue->handleResponse(pkt, curTick + hitLatency);
335 }
336 }
337
338 template<class TagStore, class Buffering, class Coherence>
339 void
340 Cache<TagStore,Buffering,Coherence>::pseudoFill(Addr addr)
341 {
342 // Need to temporarily move this blk into MSHRs
343 MSHR *mshr = missQueue->allocateTargetList(addr);
344 int lat;
345 PacketList dummy;
346 // Read the data into the mshr
347 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
348 assert(dummy.empty());
349 assert(mshr->pkt->flags & SATISFIED);
350 // can overload order since it isn't used on non pending blocks
351 mshr->order = blk->status;
352 // temporarily remove the block from the cache.
353 tags->invalidateBlk(addr);
354 }
355
356 template<class TagStore, class Buffering, class Coherence>
357 void
358 Cache<TagStore,Buffering,Coherence>::pseudoFill(MSHR *mshr)
359 {
360 // Need to temporarily move this blk into MSHRs
361 assert(mshr->pkt->cmd == Packet::ReadReq);
362 int lat;
363 PacketList dummy;
364 // Read the data into the mshr
365 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
366 assert(dummy.empty());
367 assert(mshr->pkt->flags & SATISFIED);
368 // can overload order since it isn't used on non pending blocks
369 mshr->order = blk->status;
370 // temporarily remove the block from the cache.
371 tags->invalidateBlk(mshr->pkt->getAddr());
372 }
373
374
375 template<class TagStore, class Buffering, class Coherence>
376 Packet *
377 Cache<TagStore,Buffering,Coherence>::getCoherencePacket()
378 {
379 return coherence->getPacket();
380 }
381
382
383 template<class TagStore, class Buffering, class Coherence>
384 void
385 Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
386 {
387 DPRINTF(Cache, "SNOOPING");
388 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
389 BlkType *blk = tags->findBlock(pkt);
390 MSHR *mshr = missQueue->findMSHR(blk_addr);
391 if (isTopLevel() && coherence->hasProtocol()) { //@todo Move this into handle bus req
392 //If we find an mshr, and it is in service, we need to NACK or invalidate
393 if (mshr) {
394 if (mshr->inService) {
395 if ((mshr->pkt->isInvalidate() || !mshr->pkt->isCacheFill())
396 && (pkt->cmd != Packet::InvalidateReq && pkt->cmd != Packet::WriteInvalidateReq)) {
397 //If the outstanding request was an invalidate (upgrade,readex,..)
398 //Then we need to ACK the request until we get the data
399 //Also NACK if the outstanding request is not a cachefill (writeback)
400 pkt->flags |= SATISFIED;
401 pkt->flags |= NACKED_LINE;
402 assert("Don't detect these on the other side yet\n");
403 respondToSnoop(pkt, curTick + hitLatency);
404 return;
405 }
406 else {
407 //The supplier will be someone else, because we are waiting for
408 //the data. This should cause this cache to be forced to go to
409 //the shared state, not the exclusive even though the shared line
410 //won't be asserted. But for now we will just invlidate ourselves
411 //and allow the other cache to go into the exclusive state.
412 //@todo Make it so a read to a pending read doesn't invalidate.
413 //@todo Make it so that a read to a pending read can't be exclusive now.
414
415 //Set the address so find match works
416 assert("Don't have invalidates yet\n");
417 invalidatePkt->addrOverride(pkt->getAddr());
418
419 //Append the invalidate on
420 missQueue->addTarget(mshr,invalidatePkt);
421 DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
422 return;
423 }
424 }
425 }
426 //We also need to check the writeback buffers and handle those
427 std::vector<MSHR *> writebacks;
428 if (missQueue->findWrites(blk_addr, writebacks)) {
429 DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
430
431 //Look through writebacks for any non-uncachable writes, use that
432 for (int i=0; i<writebacks.size(); i++) {
433 mshr = writebacks[i];
434
435 if (!mshr->pkt->req->isUncacheable()) {
436 if (pkt->isRead()) {
437 //Only Upgrades don't get here
438 //Supply the data
439 pkt->flags |= SATISFIED;
440
441 //If we are in an exclusive protocol, make it ask again
442 //to get write permissions (upgrade), signal shared
443 pkt->flags |= SHARED_LINE;
444
445 assert(pkt->isRead());
446 Addr offset = pkt->getAddr() & ~(blkSize - 1);
447 assert(offset < blkSize);
448 assert(pkt->getSize() <= blkSize);
449 assert(offset + pkt->getSize() <=blkSize);
450 memcpy(pkt->getPtr<uint8_t>(), mshr->pkt->getPtr<uint8_t>() + offset, pkt->getSize());
451
452 respondToSnoop(pkt, curTick + hitLatency);
453 }
454
455 if (pkt->isInvalidate()) {
456 //This must be an upgrade or other cache will take ownership
457 missQueue->markInService(mshr->pkt);
458 }
459 return;
460 }
461 }
462 }
463 }
464 CacheBlk::State new_state;
465 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
466 if (satisfy) {
467 tags->handleSnoop(blk, new_state, pkt);
468 respondToSnoop(pkt, curTick + hitLatency);
469 return;
470 }
471 tags->handleSnoop(blk, new_state);
472 }
473
474 template<class TagStore, class Buffering, class Coherence>
475 void
476 Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
477 {
478 //Need to handle the response, if NACKED
479 if (pkt->flags & NACKED_LINE) {
480 //Need to mark it as not in service, and retry for bus
481 assert(0); //Yeah, we saw a NACK come through
482
483 //For now this should never get called, we return false when we see a NACK
484 //instead, by doing this we allow the bus_blocked mechanism to handle the retry
485 //For now it retrys in just 2 cycles, need to figure out how to change that
486 //Eventually we will want to also have success come in as a parameter
487 //Need to make sure that we handle the functionality that happens on successufl
488 //return of the sendAddr function
489 }
490 }
491
492 template<class TagStore, class Buffering, class Coherence>
493 void
494 Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr)
495 {
496 tags->invalidateBlk(addr);
497 }
498
499
500 /**
501 * @todo Fix to not assume write allocate
502 */
503 template<class TagStore, class Buffering, class Coherence>
504 Tick
505 Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
506 {
507 // MemDebug::cacheProbe(pkt);
508 if (!pkt->req->isUncacheable()) {
509 if (pkt->isInvalidate() && !pkt->isRead()
510 && !pkt->isWrite()) {
511 //Upgrade or Invalidate, satisfy it, don't forward
512 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
513 pkt->getAddr() & (((ULL(1))<<48)-1),
514 pkt->getAddr() & ~((Addr)blkSize - 1));
515 pkt->flags |= SATISFIED;
516 return 0;
517 }
518 }
519
520 PacketList writebacks;
521 int lat;
522 BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
523
524 if (!blk) {
525 // Need to check for outstanding misses and writes
526 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
527
528 // There can only be one matching outstanding miss.
529 MSHR* mshr = missQueue->findMSHR(blk_addr);
530
531 // There can be many matching outstanding writes.
532 std::vector<MSHR*> writes;
533 missQueue->findWrites(blk_addr, writes);
534
535 if (!update) {
536 memSidePort->sendFunctional(pkt);
537 // Check for data in MSHR and writebuffer.
538 if (mshr) {
539 warn("Found outstanding miss on an non-update probe");
540 MSHR::TargetList *targets = mshr->getTargetList();
541 MSHR::TargetList::iterator i = targets->begin();
542 MSHR::TargetList::iterator end = targets->end();
543 for (; i != end; ++i) {
544 Packet * target = *i;
545 // If the target contains data, and it overlaps the
546 // probed request, need to update data
547 if (target->isWrite() && target->intersect(pkt)) {
548 uint8_t* pkt_data;
549 uint8_t* write_data;
550 int data_size;
551 if (target->getAddr() < pkt->getAddr()) {
552 int offset = pkt->getAddr() - target->getAddr();
553 pkt_data = pkt->getPtr<uint8_t>();
554 write_data = target->getPtr<uint8_t>() + offset;
555 data_size = target->getSize() - offset;
556 assert(data_size > 0);
557 if (data_size > pkt->getSize())
558 data_size = pkt->getSize();
559 } else {
560 int offset = target->getAddr() - pkt->getAddr();
561 pkt_data = pkt->getPtr<uint8_t>() + offset;
562 write_data = target->getPtr<uint8_t>();
563 data_size = pkt->getSize() - offset;
564 assert(data_size > pkt->getSize());
565 if (data_size > target->getSize())
566 data_size = target->getSize();
567 }
568
569 if (pkt->isWrite()) {
570 memcpy(pkt_data, write_data, data_size);
571 } else {
572 memcpy(write_data, pkt_data, data_size);
573 }
574 }
575 }
576 }
577 for (int i = 0; i < writes.size(); ++i) {
578 Packet * write = writes[i]->pkt;
579 if (write->intersect(pkt)) {
580 warn("Found outstanding write on an non-update probe");
581 uint8_t* pkt_data;
582 uint8_t* write_data;
583 int data_size;
584 if (write->getAddr() < pkt->getAddr()) {
585 int offset = pkt->getAddr() - write->getAddr();
586 pkt_data = pkt->getPtr<uint8_t>();
587 write_data = write->getPtr<uint8_t>() + offset;
588 data_size = write->getSize() - offset;
589 assert(data_size > 0);
590 if (data_size > pkt->getSize())
591 data_size = pkt->getSize();
592 } else {
593 int offset = write->getAddr() - pkt->getAddr();
594 pkt_data = pkt->getPtr<uint8_t>() + offset;
595 write_data = write->getPtr<uint8_t>();
596 data_size = pkt->getSize() - offset;
597 assert(data_size > pkt->getSize());
598 if (data_size > write->getSize())
599 data_size = write->getSize();
600 }
601
602 if (pkt->isWrite()) {
603 memcpy(pkt_data, write_data, data_size);
604 } else {
605 memcpy(write_data, pkt_data, data_size);
606 }
607
608 }
609 }
610 return 0;
611 } else {
612 // update the cache state and statistics
613 if (mshr || !writes.empty()){
614 // Can't handle it, return pktuest unsatisfied.
615 return 0;
616 }
617 if (!pkt->req->isUncacheable()) {
618 // Fetch the cache block to fill
619 BlkType *blk = tags->findBlock(pkt);
620 Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
621 (blk)? blk->status : 0);
622
623 Packet * busPkt = new Packet(pkt->req,temp_cmd, -1, blkSize);
624
625 busPkt->allocate();
626
627 busPkt->time = curTick;
628
629 lat = memSidePort->sendAtomic(busPkt);
630
631 /* if (!(busPkt->flags & SATISFIED)) {
632 // blocked at a higher level, just return
633 return 0;
634 }
635
636 */ misses[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
637
638 CacheBlk::State old_state = (blk) ? blk->status : 0;
639 tags->handleFill(blk, busPkt,
640 coherence->getNewState(busPkt, old_state),
641 writebacks, pkt);
642 // Handle writebacks if needed
643 while (!writebacks.empty()){
644 memSidePort->sendAtomic(writebacks.front());
645 writebacks.pop_front();
646 }
647 return lat + hitLatency;
648 } else {
649 return memSidePort->sendAtomic(pkt);
650 }
651 }
652 } else {
653 // There was a cache hit.
654 // Handle writebacks if needed
655 while (!writebacks.empty()){
656 memSidePort->sendAtomic(writebacks.front());
657 writebacks.pop_front();
658 }
659
660 if (update) {
661 hits[pkt->cmdToIndex()][pkt->req->getThreadNum()]++;
662 } else if (pkt->isWrite()) {
663 // Still need to change data in all locations.
664 return memSidePort->sendAtomic(pkt);
665 }
666 return curTick + lat;
667 }
668 fatal("Probe not handled.\n");
669 return 0;
670 }
671
672 template<class TagStore, class Buffering, class Coherence>
673 Tick
674 Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt, bool update)
675 {
676 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
677 BlkType *blk = tags->findBlock(pkt);
678 MSHR *mshr = missQueue->findMSHR(blk_addr);
679 CacheBlk::State new_state = 0;
680 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
681 if (satisfy) {
682 tags->handleSnoop(blk, new_state, pkt);
683 return hitLatency;
684 }
685 tags->handleSnoop(blk, new_state);
686 return 0;
687 }
688