2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
45 #include "sim/host.hh"
46 #include "base/misc.hh"
49 #include "mem/cache/cache.hh"
50 #include "mem/cache/cache_blk.hh"
51 #include "mem/cache/miss/mshr.hh"
52 #include "mem/cache/prefetch/prefetcher.hh"
54 #include "sim/sim_exit.hh" // for SimExitEvent
56 template<class TagStore, class Buffering, class Coherence>
58 Cache<TagStore,Buffering,Coherence>::
59 doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
63 if (pkt->isWrite() && (pkt->req->isLocked())) {
64 pkt->req->setScResult(1);
71 if (pkt->isResponse())
74 //Check if we should do the snoop
75 if (pkt->flags & SNOOP_COMMIT)
82 template<class TagStore, class Buffering, class Coherence>
84 Cache<TagStore,Buffering,Coherence>::
85 doAtomicAccess(Packet *pkt, bool isCpuSide)
89 //Temporary solution to LL/SC
90 if (pkt->isWrite() && (pkt->req->isLocked())) {
91 pkt->req->setScResult(1);
94 probe(pkt, true, NULL);
95 //TEMP ALWAYS SUCCES FOR NOW
96 pkt->result = Packet::Success;
100 if (pkt->isResponse())
103 return snoopProbe(pkt);
105 //Fix this timing info
109 template<class TagStore, class Buffering, class Coherence>
111 Cache<TagStore,Buffering,Coherence>::
112 doFunctionalAccess(Packet *pkt, bool isCpuSide)
116 //TEMP USE CPU?THREAD 0 0
117 pkt->req->setThreadContext(0,0);
119 //Temporary solution to LL/SC
120 if (pkt->isWrite() && (pkt->req->isLocked())) {
121 assert("Can't handle LL/SC on functional path\n");
124 probe(pkt, false, memSidePort);
125 //TEMP ALWAYS SUCCESFUL FOR NOW
126 pkt->result = Packet::Success;
130 probe(pkt, false, cpuSidePort);
134 template<class TagStore, class Buffering, class Coherence>
136 Cache<TagStore,Buffering,Coherence>::
137 recvStatusChange(Port::Status status, bool isCpuSide)
143 template<class TagStore, class Buffering, class Coherence>
144 Cache<TagStore,Buffering,Coherence>::
145 Cache(const std::string &_name,
146 Cache<TagStore,Buffering,Coherence>::Params ¶ms)
147 : BaseCache(_name, params.baseParams),
148 prefetchAccess(params.prefetchAccess),
149 tags(params.tags), missQueue(params.missQueue),
150 coherence(params.coherence), prefetcher(params.prefetcher),
151 doCopy(params.doCopy), blockOnCopy(params.blockOnCopy),
152 hitLatency(params.hitLatency)
154 tags->setCache(this);
155 tags->setPrefetcher(prefetcher);
156 missQueue->setCache(this);
157 missQueue->setPrefetcher(prefetcher);
158 coherence->setCache(this);
159 prefetcher->setCache(this);
160 prefetcher->setTags(tags);
161 prefetcher->setBuffer(missQueue);
162 invalidateReq = new Request((Addr) NULL, blkSize, 0);
163 invalidatePkt = new Packet(invalidateReq, Packet::InvalidateReq, 0);
166 template<class TagStore, class Buffering, class Coherence>
168 Cache<TagStore,Buffering,Coherence>::regStats()
170 BaseCache::regStats();
171 tags->regStats(name());
172 missQueue->regStats(name());
173 coherence->regStats(name());
174 prefetcher->regStats(name());
177 template<class TagStore, class Buffering, class Coherence>
179 Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
181 //@todo Add back in MemDebug Calls
182 // MemDebug::cacheAccess(pkt);
184 PacketList writebacks;
186 int lat = hitLatency;
187 if (prefetchAccess) {
188 //We are determining prefetches on access stream, call prefetcher
189 prefetcher->handleMiss(pkt, curTick);
191 if (!pkt->req->isUncacheable()) {
192 blk = tags->handleAccess(pkt, lat, writebacks);
194 size = pkt->getSize();
196 // If this is a block size write/hint (WH64) allocate the block here
197 // if the coherence protocol allows it.
198 /** @todo make the fast write alloc (wh64) work with coherence. */
199 /** @todo Do we want to do fast writes for writebacks as well? */
200 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
201 (pkt->cmd == Packet::WriteReq || pkt->cmd == Packet::WriteInvalidateReq) ) {
202 // not outstanding misses, can do this
203 MSHR* outstanding_miss = missQueue->findMSHR(pkt->getAddr());
204 if (pkt->cmd == Packet::WriteInvalidateReq || !outstanding_miss) {
205 if (outstanding_miss) {
206 warn("WriteInv doing a fastallocate"
207 "with an outstanding miss to the same address\n");
209 blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
214 while (!writebacks.empty()) {
215 missQueue->doWriteback(writebacks.front());
216 writebacks.pop_front();
218 DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(),
219 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
220 pkt->getAddr() & ~((Addr)blkSize - 1));
223 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
224 // clear dirty bit if write through
225 if (pkt->needsResponse())
226 respond(pkt, curTick+lat);
227 if (pkt->cmd == Packet::Writeback) {
228 //Signal that you can kill the pkt/req
229 pkt->flags |= SATISFIED;
235 if (!pkt->req->isUncacheable()) {
236 misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
237 /** @todo Move miss count code into BaseCache */
241 exitSimLoop("A cache reached the maximum miss count");
244 missQueue->handleMiss(pkt, size, curTick + hitLatency);
245 // return MA_CACHE_MISS;
250 template<class TagStore, class Buffering, class Coherence>
252 Cache<TagStore,Buffering,Coherence>::getPacket()
254 assert(missQueue->havePending());
255 Packet * pkt = missQueue->getPacket();
257 if (!pkt->req->isUncacheable()) {
258 if (pkt->cmd == Packet::HardPFReq) misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++;
259 BlkType *blk = tags->findBlock(pkt);
260 Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
261 (blk)? blk->status : 0);
262 missQueue->setBusCmd(pkt, cmd);
266 assert(!doMasterRequest() || missQueue->havePending());
267 assert(!pkt || pkt->time <= curTick);
271 template<class TagStore, class Buffering, class Coherence>
273 Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr, bool success)
275 if (success && !(pkt && (pkt->flags & NACKED_LINE))) {
276 if (!mshr->pkt->needsResponse() && !(mshr->pkt->cmd == Packet::UpgradeReq)
277 && (pkt && (pkt->flags & SATISFIED))) {
278 //Writeback, clean up the non copy version of the packet
281 missQueue->markInService(mshr->pkt, mshr);
282 //Temp Hack for UPGRADES
283 if (mshr->pkt && mshr->pkt->cmd == Packet::UpgradeReq) {
284 assert(pkt); //Upgrades need to be fixed
285 pkt->flags &= ~CACHE_LINE_FILL;
286 BlkType *blk = tags->findBlock(pkt);
287 CacheBlk::State old_state = (blk) ? blk->status : 0;
288 CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
289 if (old_state != new_state)
290 DPRINTF(Cache, "Block for blk addr %x moving from state %i to %i\n",
291 pkt->getAddr() & (((ULL(1))<<48)-1), old_state, new_state);
292 //Set the state on the upgrade
293 memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
294 PacketList writebacks;
295 tags->handleFill(blk, mshr, new_state, writebacks, pkt);
296 assert(writebacks.empty());
297 missQueue->handleResponse(pkt, curTick + hitLatency);
299 } else if (pkt && !pkt->req->isUncacheable()) {
300 pkt->flags &= ~NACKED_LINE;
301 pkt->flags &= ~SATISFIED;
302 pkt->flags &= ~SNOOP_COMMIT;
304 //Rmove copy from mshr
308 missQueue->restoreOrigCmd(pkt);
312 template<class TagStore, class Buffering, class Coherence>
314 Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
317 if (pkt->senderState) {
318 //Delete temp copy in MSHR, restore it.
319 delete ((MSHR*)pkt->senderState)->pkt;
320 ((MSHR*)pkt->senderState)->pkt = pkt;
321 if (pkt->result == Packet::Nacked) {
322 //pkt->reinitFromRequest();
323 warn("NACKs from devices not connected to the same bus not implemented\n");
326 if (pkt->result == Packet::BadAddress) {
327 //Make the response a Bad address and send it
329 // MemDebug::cacheResponse(pkt);
330 DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->getAddr(),
331 pkt->getAddr() & (((ULL(1))<<48)-1));
333 if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
334 blk = tags->findBlock(pkt);
335 CacheBlk::State old_state = (blk) ? blk->status : 0;
336 PacketList writebacks;
337 CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
338 if (old_state != new_state)
339 DPRINTF(Cache, "Block for blk addr %x moving from state %i to %i\n",
340 pkt->getAddr() & (((ULL(1))<<48)-1), old_state, new_state);
341 blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
342 new_state, writebacks, pkt);
343 while (!writebacks.empty()) {
344 missQueue->doWriteback(writebacks.front());
345 writebacks.pop_front();
348 missQueue->handleResponse(pkt, curTick + hitLatency);
352 template<class TagStore, class Buffering, class Coherence>
354 Cache<TagStore,Buffering,Coherence>::pseudoFill(Addr addr)
356 // Need to temporarily move this blk into MSHRs
357 MSHR *mshr = missQueue->allocateTargetList(addr);
360 // Read the data into the mshr
361 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
362 assert(dummy.empty());
363 assert(mshr->pkt->flags & SATISFIED);
364 // can overload order since it isn't used on non pending blocks
365 mshr->order = blk->status;
366 // temporarily remove the block from the cache.
367 tags->invalidateBlk(addr);
370 template<class TagStore, class Buffering, class Coherence>
372 Cache<TagStore,Buffering,Coherence>::pseudoFill(MSHR *mshr)
374 // Need to temporarily move this blk into MSHRs
375 assert(mshr->pkt->cmd == Packet::ReadReq);
378 // Read the data into the mshr
379 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
380 assert(dummy.empty());
381 assert(mshr->pkt->flags & SATISFIED);
382 // can overload order since it isn't used on non pending blocks
383 mshr->order = blk->status;
384 // temporarily remove the block from the cache.
385 tags->invalidateBlk(mshr->pkt->getAddr());
389 template<class TagStore, class Buffering, class Coherence>
391 Cache<TagStore,Buffering,Coherence>::getCoherencePacket()
393 return coherence->getPacket();
396 template<class TagStore, class Buffering, class Coherence>
398 Cache<TagStore,Buffering,Coherence>::sendCoherenceResult(Packet* &pkt,
402 coherence->sendResult(pkt, cshr, success);
406 template<class TagStore, class Buffering, class Coherence>
408 Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
410 if (pkt->req->isUncacheable()) {
411 //Can't get a hit on an uncacheable address
412 //Revisit this for multi level coherence
415 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
416 BlkType *blk = tags->findBlock(pkt);
417 MSHR *mshr = missQueue->findMSHR(blk_addr);
418 if (coherence->hasProtocol()) { //@todo Move this into handle bus req
419 //If we find an mshr, and it is in service, we need to NACK or invalidate
421 if (mshr->inService) {
422 if ((mshr->pkt->isInvalidate() || !mshr->pkt->isCacheFill())
423 && (pkt->cmd != Packet::InvalidateReq && pkt->cmd != Packet::WriteInvalidateReq)) {
424 //If the outstanding request was an invalidate (upgrade,readex,..)
425 //Then we need to ACK the request until we get the data
426 //Also NACK if the outstanding request is not a cachefill (writeback)
427 assert(!(pkt->flags & SATISFIED));
428 pkt->flags |= SATISFIED;
429 pkt->flags |= NACKED_LINE;
430 ///@todo NACK's from other levels
431 //warn("NACKs from devices not connected to the same bus not implemented\n");
432 //respondToSnoop(pkt, curTick + hitLatency);
436 //The supplier will be someone else, because we are waiting for
437 //the data. This should cause this cache to be forced to go to
438 //the shared state, not the exclusive even though the shared line
439 //won't be asserted. But for now we will just invlidate ourselves
440 //and allow the other cache to go into the exclusive state.
441 //@todo Make it so a read to a pending read doesn't invalidate.
442 //@todo Make it so that a read to a pending read can't be exclusive now.
444 //Set the address so find match works
445 //panic("Don't have invalidates yet\n");
446 invalidatePkt->addrOverride(pkt->getAddr());
448 //Append the invalidate on
449 missQueue->addTarget(mshr,invalidatePkt);
450 DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
455 //We also need to check the writeback buffers and handle those
456 std::vector<MSHR *> writebacks;
457 if (missQueue->findWrites(blk_addr, writebacks)) {
458 DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
460 //Look through writebacks for any non-uncachable writes, use that
461 for (int i=0; i<writebacks.size(); i++) {
462 mshr = writebacks[i];
464 if (!mshr->pkt->req->isUncacheable()) {
466 //Only Upgrades don't get here
468 assert(!(pkt->flags & SATISFIED));
469 pkt->flags |= SATISFIED;
471 //If we are in an exclusive protocol, make it ask again
472 //to get write permissions (upgrade), signal shared
473 pkt->flags |= SHARED_LINE;
475 assert(pkt->isRead());
476 Addr offset = pkt->getAddr() & (blkSize - 1);
477 assert(offset < blkSize);
478 assert(pkt->getSize() <= blkSize);
479 assert(offset + pkt->getSize() <=blkSize);
480 memcpy(pkt->getPtr<uint8_t>(), mshr->pkt->getPtr<uint8_t>() + offset, pkt->getSize());
482 respondToSnoop(pkt, curTick + hitLatency);
485 if (pkt->isInvalidate()) {
486 //This must be an upgrade or other cache will take ownership
487 missQueue->markInService(mshr->pkt, mshr);
494 CacheBlk::State new_state;
495 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
497 DPRINTF(Cache, "Cache snooped a %s request for addr %x and now supplying data,"
499 pkt->cmdString(), blk_addr, new_state);
501 tags->handleSnoop(blk, new_state, pkt);
502 respondToSnoop(pkt, curTick + hitLatency);
505 if (blk) DPRINTF(Cache, "Cache snooped a %s request for addr %x, new state is %i\n",
506 pkt->cmdString(), blk_addr, new_state);
507 tags->handleSnoop(blk, new_state);
510 template<class TagStore, class Buffering, class Coherence>
512 Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
514 //Need to handle the response, if NACKED
515 if (pkt->flags & NACKED_LINE) {
516 //Need to mark it as not in service, and retry for bus
517 assert(0); //Yeah, we saw a NACK come through
519 //For now this should never get called, we return false when we see a NACK
520 //instead, by doing this we allow the bus_blocked mechanism to handle the retry
521 //For now it retrys in just 2 cycles, need to figure out how to change that
522 //Eventually we will want to also have success come in as a parameter
523 //Need to make sure that we handle the functionality that happens on successufl
524 //return of the sendAddr function
528 template<class TagStore, class Buffering, class Coherence>
530 Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr)
532 tags->invalidateBlk(addr);
537 * @todo Fix to not assume write allocate
539 template<class TagStore, class Buffering, class Coherence>
541 Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update, CachePort* otherSidePort)
543 // MemDebug::cacheProbe(pkt);
544 if (!pkt->req->isUncacheable()) {
545 if (pkt->isInvalidate() && !pkt->isRead()
546 && !pkt->isWrite()) {
547 //Upgrade or Invalidate, satisfy it, don't forward
548 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
549 pkt->getAddr() & (((ULL(1))<<48)-1),
550 pkt->getAddr() & ~((Addr)blkSize - 1));
551 pkt->flags |= SATISFIED;
556 PacketList writebacks;
558 BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
560 DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(),
561 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
562 pkt->getAddr() & ~((Addr)blkSize - 1));
565 // Need to check for outstanding misses and writes
566 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
568 // There can only be one matching outstanding miss.
569 MSHR* mshr = missQueue->findMSHR(blk_addr);
571 // There can be many matching outstanding writes.
572 std::vector<MSHR*> writes;
573 missQueue->findWrites(blk_addr, writes);
576 otherSidePort->sendFunctional(pkt);
578 // Check for data in MSHR and writebuffer.
580 warn("Found outstanding miss on an non-update probe");
581 MSHR::TargetList *targets = mshr->getTargetList();
582 MSHR::TargetList::iterator i = targets->begin();
583 MSHR::TargetList::iterator end = targets->end();
584 for (; i != end; ++i) {
585 Packet * target = *i;
586 // If the target contains data, and it overlaps the
587 // probed request, need to update data
588 if (target->isWrite() && target->intersect(pkt)) {
592 if (target->getAddr() < pkt->getAddr()) {
593 int offset = pkt->getAddr() - target->getAddr();
594 pkt_data = pkt->getPtr<uint8_t>();
595 write_data = target->getPtr<uint8_t>() + offset;
596 data_size = target->getSize() - offset;
597 assert(data_size > 0);
598 if (data_size > pkt->getSize())
599 data_size = pkt->getSize();
601 int offset = target->getAddr() - pkt->getAddr();
602 pkt_data = pkt->getPtr<uint8_t>() + offset;
603 write_data = target->getPtr<uint8_t>();
604 data_size = pkt->getSize() - offset;
605 assert(data_size > pkt->getSize());
606 if (data_size > target->getSize())
607 data_size = target->getSize();
610 if (pkt->isWrite()) {
611 memcpy(pkt_data, write_data, data_size);
613 memcpy(write_data, pkt_data, data_size);
618 for (int i = 0; i < writes.size(); ++i) {
619 Packet * write = writes[i]->pkt;
620 if (write->intersect(pkt)) {
621 warn("Found outstanding write on an non-update probe");
625 if (write->getAddr() < pkt->getAddr()) {
626 int offset = pkt->getAddr() - write->getAddr();
627 pkt_data = pkt->getPtr<uint8_t>();
628 write_data = write->getPtr<uint8_t>() + offset;
629 data_size = write->getSize() - offset;
630 assert(data_size > 0);
631 if (data_size > pkt->getSize())
632 data_size = pkt->getSize();
634 int offset = write->getAddr() - pkt->getAddr();
635 pkt_data = pkt->getPtr<uint8_t>() + offset;
636 write_data = write->getPtr<uint8_t>();
637 data_size = pkt->getSize() - offset;
638 assert(data_size > pkt->getSize());
639 if (data_size > write->getSize())
640 data_size = write->getSize();
643 if (pkt->isWrite()) {
644 memcpy(pkt_data, write_data, data_size);
646 memcpy(write_data, pkt_data, data_size);
653 // update the cache state and statistics
654 if (mshr || !writes.empty()){
655 // Can't handle it, return pktuest unsatisfied.
656 panic("Atomic access ran into outstanding MSHR's or WB's!");
658 if (!pkt->req->isUncacheable()) {
659 // Fetch the cache block to fill
660 BlkType *blk = tags->findBlock(pkt);
661 Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
662 (blk)? blk->status : 0);
664 Packet * busPkt = new Packet(pkt->req,temp_cmd, -1, blkSize);
668 busPkt->time = curTick;
670 DPRINTF(Cache, "Sending a atomic %s for %x blk_addr: %x\n",
672 busPkt->getAddr() & (((ULL(1))<<48)-1),
673 busPkt->getAddr() & ~((Addr)blkSize - 1));
675 lat = memSidePort->sendAtomic(busPkt);
677 //Be sure to flip the response to a request for coherence
678 if (busPkt->needsResponse()) {
679 busPkt->makeAtomicResponse();
682 /* if (!(busPkt->flags & SATISFIED)) {
683 // blocked at a higher level, just return
687 */ misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
689 CacheBlk::State old_state = (blk) ? blk->status : 0;
690 CacheBlk::State new_state = coherence->getNewState(busPkt, old_state);
691 DPRINTF(Cache, "Receive response:%s for blk addr %x in state %i\n",
693 busPkt->getAddr() & (((ULL(1))<<48)-1), old_state);
694 if (old_state != new_state)
695 DPRINTF(Cache, "Block for blk addr %x moving from state %i to %i\n",
696 busPkt->getAddr() & (((ULL(1))<<48)-1), old_state, new_state);
698 tags->handleFill(blk, busPkt,
704 // Handle writebacks if needed
705 while (!writebacks.empty()){
706 Packet *wbPkt = writebacks.front();
707 memSidePort->sendAtomic(wbPkt);
708 writebacks.pop_front();
711 return lat + hitLatency;
713 return memSidePort->sendAtomic(pkt);
717 // There was a cache hit.
718 // Handle writebacks if needed
719 while (!writebacks.empty()){
720 memSidePort->sendAtomic(writebacks.front());
721 writebacks.pop_front();
725 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
726 } else if (pkt->isWrite()) {
727 // Still need to change data in all locations.
728 otherSidePort->sendFunctional(pkt);
732 fatal("Probe not handled.\n");
736 template<class TagStore, class Buffering, class Coherence>
738 Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt)
740 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
741 BlkType *blk = tags->findBlock(pkt);
742 MSHR *mshr = missQueue->findMSHR(blk_addr);
743 CacheBlk::State new_state = 0;
744 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
746 DPRINTF(Cache, "Cache snooped a %s request for addr %x and now supplying data,"
748 pkt->cmdString(), blk_addr, new_state);
750 tags->handleSnoop(blk, new_state, pkt);
753 if (blk) DPRINTF(Cache, "Cache snooped a %s request for addr %x, new state is %i\n",
754 pkt->cmdString(), blk_addr, new_state);
755 tags->handleSnoop(blk, new_state);