Merge zizzer.eecs.umich.edu:/z/m5/Bitkeeper/newmem
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 */
32
33 /**
34 * @file
35 * Cache definitions.
36 */
37
38 #include <assert.h>
39 #include <math.h>
40
41 #include <cassert>
42 #include <iostream>
43 #include <string>
44
45 #include "sim/host.hh"
46 #include "base/misc.hh"
47 #include "cpu/smt.hh"
48
49 #include "mem/cache/cache.hh"
50 #include "mem/cache/cache_blk.hh"
51 #include "mem/cache/miss/mshr.hh"
52 #include "mem/cache/prefetch/prefetcher.hh"
53
54 #include "sim/sim_exit.hh" // for SimExitEvent
55
56 template<class TagStore, class Buffering, class Coherence>
57 bool
58 Cache<TagStore,Buffering,Coherence>::
59 doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
60 {
61 if (isCpuSide)
62 {
63 if (pkt->isWrite() && (pkt->req->isLocked())) {
64 pkt->req->setScResult(1);
65 }
66 access(pkt);
67
68 }
69 else
70 {
71 if (pkt->isResponse())
72 handleResponse(pkt);
73 else {
74 //Check if we should do the snoop
75 if (pkt->flags & SNOOP_COMMIT)
76 snoop(pkt);
77 }
78 }
79 return true;
80 }
81
82 template<class TagStore, class Buffering, class Coherence>
83 Tick
84 Cache<TagStore,Buffering,Coherence>::
85 doAtomicAccess(Packet *pkt, bool isCpuSide)
86 {
87 if (isCpuSide)
88 {
89 probe(pkt, true, NULL);
90 //TEMP ALWAYS SUCCES FOR NOW
91 pkt->result = Packet::Success;
92 }
93 else
94 {
95 if (pkt->isResponse())
96 handleResponse(pkt);
97 else
98 return snoopProbe(pkt);
99 }
100 //Fix this timing info
101 return hitLatency;
102 }
103
104 template<class TagStore, class Buffering, class Coherence>
105 void
106 Cache<TagStore,Buffering,Coherence>::
107 doFunctionalAccess(Packet *pkt, bool isCpuSide)
108 {
109 if (isCpuSide)
110 {
111 //TEMP USE CPU?THREAD 0 0
112 pkt->req->setThreadContext(0,0);
113
114 probe(pkt, false, memSidePort);
115 //TEMP ALWAYS SUCCESFUL FOR NOW
116 pkt->result = Packet::Success;
117 }
118 else
119 {
120 probe(pkt, false, cpuSidePort);
121 }
122 }
123
124 template<class TagStore, class Buffering, class Coherence>
125 void
126 Cache<TagStore,Buffering,Coherence>::
127 recvStatusChange(Port::Status status, bool isCpuSide)
128 {
129
130 }
131
132
133 template<class TagStore, class Buffering, class Coherence>
134 Cache<TagStore,Buffering,Coherence>::
135 Cache(const std::string &_name,
136 Cache<TagStore,Buffering,Coherence>::Params &params)
137 : BaseCache(_name, params.baseParams),
138 prefetchAccess(params.prefetchAccess),
139 tags(params.tags), missQueue(params.missQueue),
140 coherence(params.coherence), prefetcher(params.prefetcher),
141 hitLatency(params.hitLatency)
142 {
143 tags->setCache(this);
144 tags->setPrefetcher(prefetcher);
145 missQueue->setCache(this);
146 missQueue->setPrefetcher(prefetcher);
147 coherence->setCache(this);
148 prefetcher->setCache(this);
149 prefetcher->setTags(tags);
150 prefetcher->setBuffer(missQueue);
151 invalidateReq = new Request((Addr) NULL, blkSize, 0);
152 invalidatePkt = new Packet(invalidateReq, Packet::InvalidateReq, 0);
153 }
154
155 template<class TagStore, class Buffering, class Coherence>
156 void
157 Cache<TagStore,Buffering,Coherence>::regStats()
158 {
159 BaseCache::regStats();
160 tags->regStats(name());
161 missQueue->regStats(name());
162 coherence->regStats(name());
163 prefetcher->regStats(name());
164 }
165
166 template<class TagStore, class Buffering, class Coherence>
167 bool
168 Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
169 {
170 //@todo Add back in MemDebug Calls
171 // MemDebug::cacheAccess(pkt);
172 BlkType *blk = NULL;
173 PacketList writebacks;
174 int size = blkSize;
175 int lat = hitLatency;
176 if (prefetchAccess) {
177 //We are determining prefetches on access stream, call prefetcher
178 prefetcher->handleMiss(pkt, curTick);
179 }
180 if (!pkt->req->isUncacheable()) {
181 blk = tags->handleAccess(pkt, lat, writebacks);
182 } else {
183 size = pkt->getSize();
184 }
185 // If this is a block size write/hint (WH64) allocate the block here
186 // if the coherence protocol allows it.
187 /** @todo make the fast write alloc (wh64) work with coherence. */
188 /** @todo Do we want to do fast writes for writebacks as well? */
189 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
190 (pkt->cmd == Packet::WriteReq
191 || pkt->cmd == Packet::WriteInvalidateReq) ) {
192 // not outstanding misses, can do this
193 MSHR* outstanding_miss = missQueue->findMSHR(pkt->getAddr());
194 if (pkt->cmd == Packet::WriteInvalidateReq || !outstanding_miss) {
195 if (outstanding_miss) {
196 warn("WriteInv doing a fastallocate"
197 "with an outstanding miss to the same address\n");
198 }
199 blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
200 writebacks);
201 ++fastWrites;
202 }
203 }
204 while (!writebacks.empty()) {
205 missQueue->doWriteback(writebacks.front());
206 writebacks.pop_front();
207 }
208 DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(),
209 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
210 pkt->getAddr() & ~((Addr)blkSize - 1));
211 if (blk) {
212 // Hit
213 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
214 // clear dirty bit if write through
215 if (pkt->needsResponse())
216 respond(pkt, curTick+lat);
217 if (pkt->cmd == Packet::Writeback) {
218 //Signal that you can kill the pkt/req
219 pkt->flags |= SATISFIED;
220 }
221 return true;
222 }
223
224 // Miss
225 if (!pkt->req->isUncacheable()) {
226 misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
227 /** @todo Move miss count code into BaseCache */
228 if (missCount) {
229 --missCount;
230 if (missCount == 0)
231 exitSimLoop("A cache reached the maximum miss count");
232 }
233 }
234 missQueue->handleMiss(pkt, size, curTick + hitLatency);
235 // return MA_CACHE_MISS;
236 return true;
237 }
238
239
240 template<class TagStore, class Buffering, class Coherence>
241 Packet *
242 Cache<TagStore,Buffering,Coherence>::getPacket()
243 {
244 assert(missQueue->havePending());
245 Packet * pkt = missQueue->getPacket();
246 if (pkt) {
247 if (!pkt->req->isUncacheable()) {
248 if (pkt->cmd == Packet::HardPFReq)
249 misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++;
250 BlkType *blk = tags->findBlock(pkt);
251 Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
252 (blk)? blk->status : 0);
253 missQueue->setBusCmd(pkt, cmd);
254 }
255 }
256
257 assert(!doMasterRequest() || missQueue->havePending());
258 assert(!pkt || pkt->time <= curTick);
259 return pkt;
260 }
261
262 template<class TagStore, class Buffering, class Coherence>
263 void
264 Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
265 bool success)
266 {
267 if (success && !(pkt && (pkt->flags & NACKED_LINE))) {
268 if (!mshr->pkt->needsResponse()
269 && !(mshr->pkt->cmd == Packet::UpgradeReq)
270 && (pkt && (pkt->flags & SATISFIED))) {
271 //Writeback, clean up the non copy version of the packet
272 delete pkt;
273 }
274 missQueue->markInService(mshr->pkt, mshr);
275 //Temp Hack for UPGRADES
276 if (mshr->pkt && mshr->pkt->cmd == Packet::UpgradeReq) {
277 assert(pkt); //Upgrades need to be fixed
278 pkt->flags &= ~CACHE_LINE_FILL;
279 BlkType *blk = tags->findBlock(pkt);
280 CacheBlk::State old_state = (blk) ? blk->status : 0;
281 CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
282 if (old_state != new_state)
283 DPRINTF(Cache, "Block for blk addr %x moving from "
284 "state %i to %i\n",
285 pkt->getAddr() & (((ULL(1))<<48)-1),
286 old_state, new_state);
287 //Set the state on the upgrade
288 memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
289 PacketList writebacks;
290 tags->handleFill(blk, mshr, new_state, writebacks, pkt);
291 assert(writebacks.empty());
292 missQueue->handleResponse(pkt, curTick + hitLatency);
293 }
294 } else if (pkt && !pkt->req->isUncacheable()) {
295 pkt->flags &= ~NACKED_LINE;
296 pkt->flags &= ~SATISFIED;
297 pkt->flags &= ~SNOOP_COMMIT;
298
299 //Rmove copy from mshr
300 delete mshr->pkt;
301 mshr->pkt = pkt;
302
303 missQueue->restoreOrigCmd(pkt);
304 }
305 }
306
307 template<class TagStore, class Buffering, class Coherence>
308 void
309 Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
310 {
311 BlkType *blk = NULL;
312 if (pkt->senderState) {
313 //Delete temp copy in MSHR, restore it.
314 delete ((MSHR*)pkt->senderState)->pkt;
315 ((MSHR*)pkt->senderState)->pkt = pkt;
316 if (pkt->result == Packet::Nacked) {
317 //pkt->reinitFromRequest();
318 warn("NACKs from devices not connected to the same bus "
319 "not implemented\n");
320 return;
321 }
322 if (pkt->result == Packet::BadAddress) {
323 //Make the response a Bad address and send it
324 }
325 // MemDebug::cacheResponse(pkt);
326 DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->getAddr(),
327 pkt->getAddr() & (((ULL(1))<<48)-1));
328
329 if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
330 blk = tags->findBlock(pkt);
331 CacheBlk::State old_state = (blk) ? blk->status : 0;
332 PacketList writebacks;
333 CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
334 if (old_state != new_state)
335 DPRINTF(Cache, "Block for blk addr %x moving from "
336 "state %i to %i\n",
337 pkt->getAddr() & (((ULL(1))<<48)-1),
338 old_state, new_state);
339 blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
340 new_state, writebacks, pkt);
341 while (!writebacks.empty()) {
342 missQueue->doWriteback(writebacks.front());
343 writebacks.pop_front();
344 }
345 }
346 missQueue->handleResponse(pkt, curTick + hitLatency);
347 }
348 }
349
350 template<class TagStore, class Buffering, class Coherence>
351 Packet *
352 Cache<TagStore,Buffering,Coherence>::getCoherencePacket()
353 {
354 return coherence->getPacket();
355 }
356
357 template<class TagStore, class Buffering, class Coherence>
358 void
359 Cache<TagStore,Buffering,Coherence>::sendCoherenceResult(Packet* &pkt,
360 MSHR *cshr,
361 bool success)
362 {
363 coherence->sendResult(pkt, cshr, success);
364 }
365
366
367 template<class TagStore, class Buffering, class Coherence>
368 void
369 Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
370 {
371 if (pkt->req->isUncacheable()) {
372 //Can't get a hit on an uncacheable address
373 //Revisit this for multi level coherence
374 return;
375 }
376 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
377 BlkType *blk = tags->findBlock(pkt);
378 MSHR *mshr = missQueue->findMSHR(blk_addr);
379 if (coherence->hasProtocol()) { //@todo Move this into handle bus req
380 //If we find an mshr, and it is in service, we need to NACK or
381 //invalidate
382 if (mshr) {
383 if (mshr->inService) {
384 if ((mshr->pkt->isInvalidate() || !mshr->pkt->isCacheFill())
385 && (pkt->cmd != Packet::InvalidateReq
386 && pkt->cmd != Packet::WriteInvalidateReq)) {
387 //If the outstanding request was an invalidate
388 //(upgrade,readex,..) Then we need to ACK the request
389 //until we get the data Also NACK if the outstanding
390 //request is not a cachefill (writeback)
391 assert(!(pkt->flags & SATISFIED));
392 pkt->flags |= SATISFIED;
393 pkt->flags |= NACKED_LINE;
394 ///@todo NACK's from other levels
395 //warn("NACKs from devices not connected to the same bus "
396 //"not implemented\n");
397 //respondToSnoop(pkt, curTick + hitLatency);
398 return;
399 }
400 else {
401 //The supplier will be someone else, because we are
402 //waiting for the data. This should cause this cache to
403 //be forced to go to the shared state, not the exclusive
404 //even though the shared line won't be asserted. But for
405 //now we will just invlidate ourselves and allow the other
406 //cache to go into the exclusive state. @todo Make it so
407 //a read to a pending read doesn't invalidate. @todo Make
408 //it so that a read to a pending read can't be exclusive
409 //now.
410
411 //Set the address so find match works
412 //panic("Don't have invalidates yet\n");
413 invalidatePkt->addrOverride(pkt->getAddr());
414
415 //Append the invalidate on
416 missQueue->addTarget(mshr,invalidatePkt);
417 DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n",
418 pkt->getAddr() & (((ULL(1))<<48)-1));
419 return;
420 }
421 }
422 }
423 //We also need to check the writeback buffers and handle those
424 std::vector<MSHR *> writebacks;
425 if (missQueue->findWrites(blk_addr, writebacks)) {
426 DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n",
427 pkt->getAddr() & (((ULL(1))<<48)-1));
428
429 //Look through writebacks for any non-uncachable writes, use that
430 for (int i=0; i<writebacks.size(); i++) {
431 mshr = writebacks[i];
432
433 if (!mshr->pkt->req->isUncacheable()) {
434 if (pkt->isRead()) {
435 //Only Upgrades don't get here
436 //Supply the data
437 assert(!(pkt->flags & SATISFIED));
438 pkt->flags |= SATISFIED;
439
440 //If we are in an exclusive protocol, make it ask again
441 //to get write permissions (upgrade), signal shared
442 pkt->flags |= SHARED_LINE;
443
444 assert(pkt->isRead());
445 Addr offset = pkt->getAddr() & (blkSize - 1);
446 assert(offset < blkSize);
447 assert(pkt->getSize() <= blkSize);
448 assert(offset + pkt->getSize() <=blkSize);
449 memcpy(pkt->getPtr<uint8_t>(), mshr->pkt->getPtr<uint8_t>() + offset, pkt->getSize());
450
451 respondToSnoop(pkt, curTick + hitLatency);
452 }
453
454 if (pkt->isInvalidate()) {
455 //This must be an upgrade or other cache will take
456 //ownership
457 missQueue->markInService(mshr->pkt, mshr);
458 }
459 return;
460 }
461 }
462 }
463 }
464 CacheBlk::State new_state;
465 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
466 if (satisfy) {
467 DPRINTF(Cache, "Cache snooped a %s request for addr %x and "
468 "now supplying data, new state is %i\n",
469 pkt->cmdString(), blk_addr, new_state);
470
471 tags->handleSnoop(blk, new_state, pkt);
472 respondToSnoop(pkt, curTick + hitLatency);
473 return;
474 }
475 if (blk)
476 DPRINTF(Cache, "Cache snooped a %s request for addr %x, "
477 "new state is %i\n", pkt->cmdString(), blk_addr, new_state);
478 tags->handleSnoop(blk, new_state);
479 }
480
481 template<class TagStore, class Buffering, class Coherence>
482 void
483 Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
484 {
485 //Need to handle the response, if NACKED
486 if (pkt->flags & NACKED_LINE) {
487 //Need to mark it as not in service, and retry for bus
488 assert(0); //Yeah, we saw a NACK come through
489
490 //For now this should never get called, we return false when we see a
491 //NACK instead, by doing this we allow the bus_blocked mechanism to
492 //handle the retry For now it retrys in just 2 cycles, need to figure
493 //out how to change that Eventually we will want to also have success
494 //come in as a parameter Need to make sure that we handle the
495 //functionality that happens on successufl return of the sendAddr
496 //function
497 }
498 }
499
500 template<class TagStore, class Buffering, class Coherence>
501 void
502 Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr)
503 {
504 tags->invalidateBlk(addr);
505 }
506
507
508 /**
509 * @todo Fix to not assume write allocate
510 */
511 template<class TagStore, class Buffering, class Coherence>
512 Tick
513 Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update,
514 CachePort* otherSidePort)
515 {
516 // MemDebug::cacheProbe(pkt);
517 if (!pkt->req->isUncacheable()) {
518 if (pkt->isInvalidate() && !pkt->isRead()
519 && !pkt->isWrite()) {
520 //Upgrade or Invalidate, satisfy it, don't forward
521 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
522 pkt->getAddr() & (((ULL(1))<<48)-1),
523 pkt->getAddr() & ~((Addr)blkSize - 1));
524 pkt->flags |= SATISFIED;
525 return 0;
526 }
527 }
528
529 PacketList writebacks;
530 int lat;
531 BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
532
533 DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(),
534 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
535 pkt->getAddr() & ~((Addr)blkSize - 1));
536
537 if (!blk) {
538 // Need to check for outstanding misses and writes
539 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
540
541 // There can only be one matching outstanding miss.
542 MSHR* mshr = missQueue->findMSHR(blk_addr);
543
544 // There can be many matching outstanding writes.
545 std::vector<MSHR*> writes;
546 missQueue->findWrites(blk_addr, writes);
547
548 if (!update) {
549 otherSidePort->sendFunctional(pkt);
550
551 // Check for data in MSHR and writebuffer.
552 if (mshr) {
553 warn("Found outstanding miss on an non-update probe");
554 MSHR::TargetList *targets = mshr->getTargetList();
555 MSHR::TargetList::iterator i = targets->begin();
556 MSHR::TargetList::iterator end = targets->end();
557 for (; i != end; ++i) {
558 Packet * target = *i;
559 // If the target contains data, and it overlaps the
560 // probed request, need to update data
561 if (target->isWrite() && target->intersect(pkt)) {
562 uint8_t* pkt_data;
563 uint8_t* write_data;
564 int data_size;
565 if (target->getAddr() < pkt->getAddr()) {
566 int offset = pkt->getAddr() - target->getAddr();
567 pkt_data = pkt->getPtr<uint8_t>();
568 write_data = target->getPtr<uint8_t>() + offset;
569 data_size = target->getSize() - offset;
570 assert(data_size > 0);
571 if (data_size > pkt->getSize())
572 data_size = pkt->getSize();
573 } else {
574 int offset = target->getAddr() - pkt->getAddr();
575 pkt_data = pkt->getPtr<uint8_t>() + offset;
576 write_data = target->getPtr<uint8_t>();
577 data_size = pkt->getSize() - offset;
578 assert(data_size > pkt->getSize());
579 if (data_size > target->getSize())
580 data_size = target->getSize();
581 }
582
583 if (pkt->isWrite()) {
584 memcpy(pkt_data, write_data, data_size);
585 } else {
586 memcpy(write_data, pkt_data, data_size);
587 }
588 }
589 }
590 }
591 for (int i = 0; i < writes.size(); ++i) {
592 Packet * write = writes[i]->pkt;
593 if (write->intersect(pkt)) {
594 warn("Found outstanding write on an non-update probe");
595 uint8_t* pkt_data;
596 uint8_t* write_data;
597 int data_size;
598 if (write->getAddr() < pkt->getAddr()) {
599 int offset = pkt->getAddr() - write->getAddr();
600 pkt_data = pkt->getPtr<uint8_t>();
601 write_data = write->getPtr<uint8_t>() + offset;
602 data_size = write->getSize() - offset;
603 assert(data_size > 0);
604 if (data_size > pkt->getSize())
605 data_size = pkt->getSize();
606 } else {
607 int offset = write->getAddr() - pkt->getAddr();
608 pkt_data = pkt->getPtr<uint8_t>() + offset;
609 write_data = write->getPtr<uint8_t>();
610 data_size = pkt->getSize() - offset;
611 assert(data_size > pkt->getSize());
612 if (data_size > write->getSize())
613 data_size = write->getSize();
614 }
615
616 if (pkt->isWrite()) {
617 memcpy(pkt_data, write_data, data_size);
618 } else {
619 memcpy(write_data, pkt_data, data_size);
620 }
621
622 }
623 }
624 return 0;
625 } else {
626 // update the cache state and statistics
627 if (mshr || !writes.empty()){
628 // Can't handle it, return pktuest unsatisfied.
629 panic("Atomic access ran into outstanding MSHR's or WB's!");
630 }
631 if (!pkt->req->isUncacheable()) {
632 // Fetch the cache block to fill
633 BlkType *blk = tags->findBlock(pkt);
634 Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
635 (blk)? blk->status : 0);
636
637 Packet * busPkt = new Packet(pkt->req,temp_cmd, -1, blkSize);
638
639 busPkt->allocate();
640
641 busPkt->time = curTick;
642
643 DPRINTF(Cache, "Sending a atomic %s for %x blk_addr: %x\n",
644 busPkt->cmdString(),
645 busPkt->getAddr() & (((ULL(1))<<48)-1),
646 busPkt->getAddr() & ~((Addr)blkSize - 1));
647
648 lat = memSidePort->sendAtomic(busPkt);
649
650 //Be sure to flip the response to a request for coherence
651 if (busPkt->needsResponse()) {
652 busPkt->makeAtomicResponse();
653 }
654
655 /* if (!(busPkt->flags & SATISFIED)) {
656 // blocked at a higher level, just return
657 return 0;
658 }
659
660 */ misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
661
662 CacheBlk::State old_state = (blk) ? blk->status : 0;
663 CacheBlk::State new_state =
664 coherence->getNewState(busPkt, old_state);
665 DPRINTF(Cache,
666 "Receive response:%s for blk addr %x in state %i\n",
667 busPkt->cmdString(),
668 busPkt->getAddr() & (((ULL(1))<<48)-1), old_state);
669 if (old_state != new_state)
670 DPRINTF(Cache, "Block for blk addr %x moving from "
671 "state %i to %i\n",
672 busPkt->getAddr() & (((ULL(1))<<48)-1),
673 old_state, new_state);
674
675 tags->handleFill(blk, busPkt,
676 new_state,
677 writebacks, pkt);
678 //Free the packet
679 delete busPkt;
680
681 // Handle writebacks if needed
682 while (!writebacks.empty()){
683 Packet *wbPkt = writebacks.front();
684 memSidePort->sendAtomic(wbPkt);
685 writebacks.pop_front();
686 delete wbPkt;
687 }
688 return lat + hitLatency;
689 } else {
690 return memSidePort->sendAtomic(pkt);
691 }
692 }
693 } else {
694 // There was a cache hit.
695 // Handle writebacks if needed
696 while (!writebacks.empty()){
697 memSidePort->sendAtomic(writebacks.front());
698 writebacks.pop_front();
699 }
700
701 if (update) {
702 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
703 } else if (pkt->isWrite()) {
704 // Still need to change data in all locations.
705 otherSidePort->sendFunctional(pkt);
706 }
707 return hitLatency;
708 }
709 fatal("Probe not handled.\n");
710 return 0;
711 }
712
713 template<class TagStore, class Buffering, class Coherence>
714 Tick
715 Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt)
716 {
717 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
718 BlkType *blk = tags->findBlock(pkt);
719 MSHR *mshr = missQueue->findMSHR(blk_addr);
720 CacheBlk::State new_state = 0;
721 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
722 if (satisfy) {
723 DPRINTF(Cache, "Cache snooped a %s request for addr %x and "
724 "now supplying data, new state is %i\n",
725 pkt->cmdString(), blk_addr, new_state);
726
727 tags->handleSnoop(blk, new_state, pkt);
728 return hitLatency;
729 }
730 if (blk)
731 DPRINTF(Cache, "Cache snooped a %s request for addr %x, "
732 "new state is %i\n",
733 pkt->cmdString(), blk_addr, new_state);
734 tags->handleSnoop(blk, new_state);
735 return 0;
736 }
737