One step closet to having NACK's work.
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 */
32
33 /**
34 * @file
35 * Cache definitions.
36 */
37
38 #include <assert.h>
39 #include <math.h>
40
41 #include <cassert>
42 #include <iostream>
43 #include <string>
44
45 #include "sim/host.hh"
46 #include "base/misc.hh"
47 #include "cpu/smt.hh"
48
49 #include "mem/cache/cache.hh"
50 #include "mem/cache/cache_blk.hh"
51 #include "mem/cache/miss/mshr.hh"
52 #include "mem/cache/prefetch/prefetcher.hh"
53
54 #include "sim/sim_exit.hh" // for SimExitEvent
55
56 template<class TagStore, class Buffering, class Coherence>
57 bool
58 Cache<TagStore,Buffering,Coherence>::
59 doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
60 {
61 if (isCpuSide)
62 {
63 if (pkt->isWrite() && (pkt->req->isLocked())) {
64 pkt->req->setScResult(1);
65 }
66 if (!(pkt->flags & SATISFIED)) {
67 access(pkt);
68 }
69 }
70 else
71 {
72 if (pkt->isResponse())
73 handleResponse(pkt);
74 else {
75 //Check if we should do the snoop
76 if (pkt->flags & SNOOP_COMMIT)
77 snoop(pkt);
78 }
79 }
80 return true;
81 }
82
83 template<class TagStore, class Buffering, class Coherence>
84 Tick
85 Cache<TagStore,Buffering,Coherence>::
86 doAtomicAccess(Packet *pkt, bool isCpuSide)
87 {
88 if (isCpuSide)
89 {
90 //Temporary solution to LL/SC
91 if (pkt->isWrite() && (pkt->req->isLocked())) {
92 pkt->req->setScResult(1);
93 }
94
95 probe(pkt, true, NULL);
96 //TEMP ALWAYS SUCCES FOR NOW
97 pkt->result = Packet::Success;
98 }
99 else
100 {
101 if (pkt->isResponse())
102 handleResponse(pkt);
103 else
104 snoopProbe(pkt);
105 }
106 //Fix this timing info
107 return hitLatency;
108 }
109
110 template<class TagStore, class Buffering, class Coherence>
111 void
112 Cache<TagStore,Buffering,Coherence>::
113 doFunctionalAccess(Packet *pkt, bool isCpuSide)
114 {
115 if (isCpuSide)
116 {
117 //TEMP USE CPU?THREAD 0 0
118 pkt->req->setThreadContext(0,0);
119
120 //Temporary solution to LL/SC
121 if (pkt->isWrite() && (pkt->req->isLocked())) {
122 assert("Can't handle LL/SC on functional path\n");
123 }
124
125 probe(pkt, false, memSidePort);
126 //TEMP ALWAYS SUCCESFUL FOR NOW
127 pkt->result = Packet::Success;
128 }
129 else
130 {
131 probe(pkt, false, cpuSidePort);
132 }
133 }
134
135 template<class TagStore, class Buffering, class Coherence>
136 void
137 Cache<TagStore,Buffering,Coherence>::
138 recvStatusChange(Port::Status status, bool isCpuSide)
139 {
140
141 }
142
143
144 template<class TagStore, class Buffering, class Coherence>
145 Cache<TagStore,Buffering,Coherence>::
146 Cache(const std::string &_name,
147 Cache<TagStore,Buffering,Coherence>::Params &params)
148 : BaseCache(_name, params.baseParams),
149 prefetchAccess(params.prefetchAccess),
150 tags(params.tags), missQueue(params.missQueue),
151 coherence(params.coherence), prefetcher(params.prefetcher),
152 doCopy(params.doCopy), blockOnCopy(params.blockOnCopy)
153 {
154 //FIX BUS POINTERS
155 // if (params.in == NULL) {
156 topLevelCache = true;
157 // }
158 //PLEASE FIX THIS, BUS SIZES NOT BEING USED
159 tags->setCache(this, blkSize, 1/*params.out->width, params.out->clockRate*/);
160 tags->setPrefetcher(prefetcher);
161 missQueue->setCache(this);
162 missQueue->setPrefetcher(prefetcher);
163 coherence->setCache(this);
164 prefetcher->setCache(this);
165 prefetcher->setTags(tags);
166 prefetcher->setBuffer(missQueue);
167 #if 0
168 invalidatePkt = new Packet;
169 invalidatePkt->cmd = Packet::InvalidateReq;
170 #endif
171 }
172
173 template<class TagStore, class Buffering, class Coherence>
174 void
175 Cache<TagStore,Buffering,Coherence>::regStats()
176 {
177 BaseCache::regStats();
178 tags->regStats(name());
179 missQueue->regStats(name());
180 coherence->regStats(name());
181 prefetcher->regStats(name());
182 }
183
184 template<class TagStore, class Buffering, class Coherence>
185 bool
186 Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
187 {
188 //@todo Add back in MemDebug Calls
189 // MemDebug::cacheAccess(pkt);
190 BlkType *blk = NULL;
191 PacketList writebacks;
192 int size = blkSize;
193 int lat = hitLatency;
194 if (prefetchAccess) {
195 //We are determining prefetches on access stream, call prefetcher
196 prefetcher->handleMiss(pkt, curTick);
197 }
198 if (!pkt->req->isUncacheable()) {
199 if (pkt->isInvalidate() && !pkt->isRead()
200 && !pkt->isWrite()) {
201 //Upgrade or Invalidate
202 //Look into what happens if two slave caches on bus
203 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
204 pkt->getAddr() & (((ULL(1))<<48)-1),
205 pkt->getAddr() & ~((Addr)blkSize - 1));
206
207 //@todo Should this return latency have the hit latency in it?
208 // respond(pkt,curTick+lat);
209 pkt->flags |= SATISFIED;
210 // return MA_HIT; //@todo, return values
211 return true;
212 }
213 blk = tags->handleAccess(pkt, lat, writebacks);
214 } else {
215 size = pkt->getSize();
216 }
217 // If this is a block size write/hint (WH64) allocate the block here
218 // if the coherence protocol allows it.
219 /** @todo make the fast write alloc (wh64) work with coherence. */
220 /** @todo Do we want to do fast writes for writebacks as well? */
221 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
222 (pkt->cmd == Packet::WriteReq || pkt->cmd == Packet::WriteInvalidateReq) ) {
223 // not outstanding misses, can do this
224 MSHR* outstanding_miss = missQueue->findMSHR(pkt->getAddr());
225 if (pkt->cmd == Packet::WriteInvalidateReq || !outstanding_miss) {
226 if (outstanding_miss) {
227 warn("WriteInv doing a fastallocate"
228 "with an outstanding miss to the same address\n");
229 }
230 blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
231 writebacks);
232 ++fastWrites;
233 }
234 }
235 while (!writebacks.empty()) {
236 missQueue->doWriteback(writebacks.front());
237 writebacks.pop_front();
238 }
239 DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(),
240 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
241 pkt->getAddr() & ~((Addr)blkSize - 1));
242 if (blk) {
243 // Hit
244 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
245 // clear dirty bit if write through
246 if (pkt->needsResponse())
247 respond(pkt, curTick+lat);
248 // return MA_HIT;
249 return true;
250 }
251
252 // Miss
253 if (!pkt->req->isUncacheable()) {
254 misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
255 /** @todo Move miss count code into BaseCache */
256 if (missCount) {
257 --missCount;
258 if (missCount == 0)
259 exitSimLoop("A cache reached the maximum miss count");
260 }
261 }
262 missQueue->handleMiss(pkt, size, curTick + hitLatency);
263 // return MA_CACHE_MISS;
264 return true;
265 }
266
267
268 template<class TagStore, class Buffering, class Coherence>
269 Packet *
270 Cache<TagStore,Buffering,Coherence>::getPacket()
271 {
272 Packet * pkt = missQueue->getPacket();
273 if (pkt) {
274 if (!pkt->req->isUncacheable()) {
275 if (pkt->cmd == Packet::HardPFReq) misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++;
276 BlkType *blk = tags->findBlock(pkt);
277 Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
278 (blk)? blk->status : 0);
279 missQueue->setBusCmd(pkt, cmd);
280 }
281 }
282
283 assert(!doMasterRequest() || missQueue->havePending());
284 assert(!pkt || pkt->time <= curTick);
285 return pkt;
286 }
287
288 template<class TagStore, class Buffering, class Coherence>
289 void
290 Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr, bool success)
291 {
292 if (success) {
293 missQueue->markInService(pkt, mshr);
294 //Temp Hack for UPGRADES
295 if (pkt->cmd == Packet::UpgradeReq) {
296 handleResponse(pkt);
297 }
298 } else if (pkt && !pkt->req->isUncacheable()) {
299 missQueue->restoreOrigCmd(pkt);
300 }
301 }
302
303 template<class TagStore, class Buffering, class Coherence>
304 void
305 Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
306 {
307 BlkType *blk = NULL;
308 if (pkt->senderState) {
309 if (pkt->result == Packet::Nacked) {
310 pkt->reinitFromRequest();
311 panic("Unimplemented NACK of packet\n");
312 }
313 if (pkt->result == Packet::BadAddress) {
314 //Make the response a Bad address and send it
315 }
316 // MemDebug::cacheResponse(pkt);
317 DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->getAddr(),
318 pkt->getAddr() & (((ULL(1))<<48)-1));
319
320 if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
321 blk = tags->findBlock(pkt);
322 CacheBlk::State old_state = (blk) ? blk->status : 0;
323 PacketList writebacks;
324 CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
325 DPRINTF(Cache, "Block for blk addr %x moving from state %i to %i\n",
326 pkt->getAddr() & (((ULL(1))<<48)-1), old_state, new_state);
327 blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
328 new_state, writebacks, pkt);
329 while (!writebacks.empty()) {
330 missQueue->doWriteback(writebacks.front());
331 writebacks.pop_front();
332 }
333 }
334 missQueue->handleResponse(pkt, curTick + hitLatency);
335 }
336 }
337
338 template<class TagStore, class Buffering, class Coherence>
339 void
340 Cache<TagStore,Buffering,Coherence>::pseudoFill(Addr addr)
341 {
342 // Need to temporarily move this blk into MSHRs
343 MSHR *mshr = missQueue->allocateTargetList(addr);
344 int lat;
345 PacketList dummy;
346 // Read the data into the mshr
347 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
348 assert(dummy.empty());
349 assert(mshr->pkt->flags & SATISFIED);
350 // can overload order since it isn't used on non pending blocks
351 mshr->order = blk->status;
352 // temporarily remove the block from the cache.
353 tags->invalidateBlk(addr);
354 }
355
356 template<class TagStore, class Buffering, class Coherence>
357 void
358 Cache<TagStore,Buffering,Coherence>::pseudoFill(MSHR *mshr)
359 {
360 // Need to temporarily move this blk into MSHRs
361 assert(mshr->pkt->cmd == Packet::ReadReq);
362 int lat;
363 PacketList dummy;
364 // Read the data into the mshr
365 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
366 assert(dummy.empty());
367 assert(mshr->pkt->flags & SATISFIED);
368 // can overload order since it isn't used on non pending blocks
369 mshr->order = blk->status;
370 // temporarily remove the block from the cache.
371 tags->invalidateBlk(mshr->pkt->getAddr());
372 }
373
374
375 template<class TagStore, class Buffering, class Coherence>
376 Packet *
377 Cache<TagStore,Buffering,Coherence>::getCoherencePacket()
378 {
379 return coherence->getPacket();
380 }
381
382
383 template<class TagStore, class Buffering, class Coherence>
384 void
385 Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
386 {
387 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
388 BlkType *blk = tags->findBlock(pkt);
389 MSHR *mshr = missQueue->findMSHR(blk_addr);
390 if (isTopLevel() && coherence->hasProtocol()) { //@todo Move this into handle bus req
391 //If we find an mshr, and it is in service, we need to NACK or invalidate
392 if (mshr) {
393 if (mshr->inService) {
394 if ((mshr->pkt->isInvalidate() || !mshr->pkt->isCacheFill())
395 && (pkt->cmd != Packet::InvalidateReq && pkt->cmd != Packet::WriteInvalidateReq)) {
396 //If the outstanding request was an invalidate (upgrade,readex,..)
397 //Then we need to ACK the request until we get the data
398 //Also NACK if the outstanding request is not a cachefill (writeback)
399 assert(!(pkt->flags & SATISFIED));
400 pkt->flags |= SATISFIED;
401 pkt->flags |= NACKED_LINE;
402 respondToSnoop(pkt, curTick + hitLatency);
403 return;
404 }
405 else {
406 //The supplier will be someone else, because we are waiting for
407 //the data. This should cause this cache to be forced to go to
408 //the shared state, not the exclusive even though the shared line
409 //won't be asserted. But for now we will just invlidate ourselves
410 //and allow the other cache to go into the exclusive state.
411 //@todo Make it so a read to a pending read doesn't invalidate.
412 //@todo Make it so that a read to a pending read can't be exclusive now.
413
414 //Set the address so find match works
415 panic("Don't have invalidates yet\n");
416 invalidatePkt->addrOverride(pkt->getAddr());
417
418 //Append the invalidate on
419 missQueue->addTarget(mshr,invalidatePkt);
420 DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
421 return;
422 }
423 }
424 }
425 //We also need to check the writeback buffers and handle those
426 std::vector<MSHR *> writebacks;
427 if (missQueue->findWrites(blk_addr, writebacks)) {
428 DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
429
430 //Look through writebacks for any non-uncachable writes, use that
431 for (int i=0; i<writebacks.size(); i++) {
432 mshr = writebacks[i];
433
434 if (!mshr->pkt->req->isUncacheable()) {
435 if (pkt->isRead()) {
436 //Only Upgrades don't get here
437 //Supply the data
438 assert(!(pkt->flags & SATISFIED));
439 pkt->flags |= SATISFIED;
440
441 //If we are in an exclusive protocol, make it ask again
442 //to get write permissions (upgrade), signal shared
443 pkt->flags |= SHARED_LINE;
444
445 assert(pkt->isRead());
446 Addr offset = pkt->getAddr() & ~(blkSize - 1);
447 assert(offset < blkSize);
448 assert(pkt->getSize() <= blkSize);
449 assert(offset + pkt->getSize() <=blkSize);
450 memcpy(pkt->getPtr<uint8_t>(), mshr->pkt->getPtr<uint8_t>() + offset, pkt->getSize());
451
452 respondToSnoop(pkt, curTick + hitLatency);
453 }
454
455 if (pkt->isInvalidate()) {
456 //This must be an upgrade or other cache will take ownership
457 missQueue->markInService(mshr->pkt, mshr);
458 }
459 return;
460 }
461 }
462 }
463 }
464 CacheBlk::State new_state;
465 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
466 if (satisfy) {
467 DPRINTF(Cache, "Cache snooped a %s request and now supplying data,"
468 "new state is %i\n",
469 pkt->cmdString(), new_state);
470
471 tags->handleSnoop(blk, new_state, pkt);
472 respondToSnoop(pkt, curTick + hitLatency);
473 return;
474 }
475 if (blk) DPRINTF(Cache, "Cache snooped a %s request, new state is %i\n",
476 pkt->cmdString(), new_state);
477 tags->handleSnoop(blk, new_state);
478 }
479
480 template<class TagStore, class Buffering, class Coherence>
481 void
482 Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
483 {
484 //Need to handle the response, if NACKED
485 if (pkt->flags & NACKED_LINE) {
486 //Need to mark it as not in service, and retry for bus
487 assert(0); //Yeah, we saw a NACK come through
488
489 //For now this should never get called, we return false when we see a NACK
490 //instead, by doing this we allow the bus_blocked mechanism to handle the retry
491 //For now it retrys in just 2 cycles, need to figure out how to change that
492 //Eventually we will want to also have success come in as a parameter
493 //Need to make sure that we handle the functionality that happens on successufl
494 //return of the sendAddr function
495 }
496 }
497
498 template<class TagStore, class Buffering, class Coherence>
499 void
500 Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr)
501 {
502 tags->invalidateBlk(addr);
503 }
504
505
506 /**
507 * @todo Fix to not assume write allocate
508 */
509 template<class TagStore, class Buffering, class Coherence>
510 Tick
511 Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update, CachePort* otherSidePort)
512 {
513 // MemDebug::cacheProbe(pkt);
514 if (!pkt->req->isUncacheable()) {
515 if (pkt->isInvalidate() && !pkt->isRead()
516 && !pkt->isWrite()) {
517 //Upgrade or Invalidate, satisfy it, don't forward
518 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
519 pkt->getAddr() & (((ULL(1))<<48)-1),
520 pkt->getAddr() & ~((Addr)blkSize - 1));
521 pkt->flags |= SATISFIED;
522 return 0;
523 }
524 }
525
526 PacketList writebacks;
527 int lat;
528 BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
529
530 if (!blk) {
531 // Need to check for outstanding misses and writes
532 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
533
534 // There can only be one matching outstanding miss.
535 MSHR* mshr = missQueue->findMSHR(blk_addr);
536
537 // There can be many matching outstanding writes.
538 std::vector<MSHR*> writes;
539 missQueue->findWrites(blk_addr, writes);
540
541 if (!update) {
542 otherSidePort->sendFunctional(pkt);
543
544 // Check for data in MSHR and writebuffer.
545 if (mshr) {
546 warn("Found outstanding miss on an non-update probe");
547 MSHR::TargetList *targets = mshr->getTargetList();
548 MSHR::TargetList::iterator i = targets->begin();
549 MSHR::TargetList::iterator end = targets->end();
550 for (; i != end; ++i) {
551 Packet * target = *i;
552 // If the target contains data, and it overlaps the
553 // probed request, need to update data
554 if (target->isWrite() && target->intersect(pkt)) {
555 uint8_t* pkt_data;
556 uint8_t* write_data;
557 int data_size;
558 if (target->getAddr() < pkt->getAddr()) {
559 int offset = pkt->getAddr() - target->getAddr();
560 pkt_data = pkt->getPtr<uint8_t>();
561 write_data = target->getPtr<uint8_t>() + offset;
562 data_size = target->getSize() - offset;
563 assert(data_size > 0);
564 if (data_size > pkt->getSize())
565 data_size = pkt->getSize();
566 } else {
567 int offset = target->getAddr() - pkt->getAddr();
568 pkt_data = pkt->getPtr<uint8_t>() + offset;
569 write_data = target->getPtr<uint8_t>();
570 data_size = pkt->getSize() - offset;
571 assert(data_size > pkt->getSize());
572 if (data_size > target->getSize())
573 data_size = target->getSize();
574 }
575
576 if (pkt->isWrite()) {
577 memcpy(pkt_data, write_data, data_size);
578 } else {
579 memcpy(write_data, pkt_data, data_size);
580 }
581 }
582 }
583 }
584 for (int i = 0; i < writes.size(); ++i) {
585 Packet * write = writes[i]->pkt;
586 if (write->intersect(pkt)) {
587 warn("Found outstanding write on an non-update probe");
588 uint8_t* pkt_data;
589 uint8_t* write_data;
590 int data_size;
591 if (write->getAddr() < pkt->getAddr()) {
592 int offset = pkt->getAddr() - write->getAddr();
593 pkt_data = pkt->getPtr<uint8_t>();
594 write_data = write->getPtr<uint8_t>() + offset;
595 data_size = write->getSize() - offset;
596 assert(data_size > 0);
597 if (data_size > pkt->getSize())
598 data_size = pkt->getSize();
599 } else {
600 int offset = write->getAddr() - pkt->getAddr();
601 pkt_data = pkt->getPtr<uint8_t>() + offset;
602 write_data = write->getPtr<uint8_t>();
603 data_size = pkt->getSize() - offset;
604 assert(data_size > pkt->getSize());
605 if (data_size > write->getSize())
606 data_size = write->getSize();
607 }
608
609 if (pkt->isWrite()) {
610 memcpy(pkt_data, write_data, data_size);
611 } else {
612 memcpy(write_data, pkt_data, data_size);
613 }
614
615 }
616 }
617 return 0;
618 } else {
619 // update the cache state and statistics
620 if (mshr || !writes.empty()){
621 // Can't handle it, return pktuest unsatisfied.
622 panic("Atomic access ran into outstanding MSHR's or WB's!");
623 }
624 if (!pkt->req->isUncacheable()) {
625 // Fetch the cache block to fill
626 BlkType *blk = tags->findBlock(pkt);
627 Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
628 (blk)? blk->status : 0);
629
630 Packet * busPkt = new Packet(pkt->req,temp_cmd, -1, blkSize);
631
632 busPkt->allocate();
633
634 busPkt->time = curTick;
635
636 lat = memSidePort->sendAtomic(busPkt);
637
638 //Be sure to flip the response to a request for coherence
639 if (busPkt->needsResponse()) {
640 busPkt->makeAtomicResponse();
641 }
642
643 /* if (!(busPkt->flags & SATISFIED)) {
644 // blocked at a higher level, just return
645 return 0;
646 }
647
648 */ misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
649
650 CacheBlk::State old_state = (blk) ? blk->status : 0;
651 tags->handleFill(blk, busPkt,
652 coherence->getNewState(busPkt, old_state),
653 writebacks, pkt);
654 // Handle writebacks if needed
655 while (!writebacks.empty()){
656 memSidePort->sendAtomic(writebacks.front());
657 writebacks.pop_front();
658 }
659 return lat + hitLatency;
660 } else {
661 return memSidePort->sendAtomic(pkt);
662 }
663 }
664 } else {
665 // There was a cache hit.
666 // Handle writebacks if needed
667 while (!writebacks.empty()){
668 memSidePort->sendAtomic(writebacks.front());
669 writebacks.pop_front();
670 }
671
672 if (update) {
673 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
674 } else if (pkt->isWrite()) {
675 // Still need to change data in all locations.
676 otherSidePort->sendFunctional(pkt);
677 }
678 return curTick + lat;
679 }
680 fatal("Probe not handled.\n");
681 return 0;
682 }
683
684 template<class TagStore, class Buffering, class Coherence>
685 Tick
686 Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt)
687 {
688 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
689 BlkType *blk = tags->findBlock(pkt);
690 MSHR *mshr = missQueue->findMSHR(blk_addr);
691 CacheBlk::State new_state = 0;
692 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
693 if (satisfy) {
694 DPRINTF(Cache, "Cache snooped a %s request and now supplying data,"
695 "new state is %i\n",
696 pkt->cmdString(), new_state);
697
698 tags->handleSnoop(blk, new_state, pkt);
699 return hitLatency;
700 }
701 if (blk) DPRINTF(Cache, "Cache snooped a %s request, new state is %i\n",
702 pkt->cmdString(), new_state);
703 tags->handleSnoop(blk, new_state);
704 return 0;
705 }
706