Merge zizzer:/bk/newmem
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 */
32
33 /**
34 * @file
35 * Cache definitions.
36 */
37
38 #include <assert.h>
39 #include <math.h>
40
41 #include <cassert>
42 #include <iostream>
43 #include <string>
44
45 #include "sim/host.hh"
46 #include "base/misc.hh"
47 #include "cpu/smt.hh"
48
49 #include "mem/cache/cache.hh"
50 #include "mem/cache/cache_blk.hh"
51 #include "mem/cache/miss/mshr.hh"
52 #include "mem/cache/prefetch/prefetcher.hh"
53
54 #include "sim/sim_exit.hh" // for SimExitEvent
55
56 template<class TagStore, class Buffering, class Coherence>
57 bool
58 Cache<TagStore,Buffering,Coherence>::
59 doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
60 {
61 if (isCpuSide)
62 {
63 if (pkt->isWrite() && (pkt->req->isLocked())) {
64 pkt->req->setScResult(1);
65 }
66 access(pkt);
67
68 }
69 else
70 {
71 if (pkt->isResponse())
72 handleResponse(pkt);
73 else {
74 //Check if we should do the snoop
75 if (pkt->flags & SNOOP_COMMIT)
76 snoop(pkt);
77 }
78 }
79 return true;
80 }
81
82 template<class TagStore, class Buffering, class Coherence>
83 Tick
84 Cache<TagStore,Buffering,Coherence>::
85 doAtomicAccess(Packet *pkt, bool isCpuSide)
86 {
87 if (isCpuSide)
88 {
89 probe(pkt, true, NULL);
90 //TEMP ALWAYS SUCCES FOR NOW
91 pkt->result = Packet::Success;
92 }
93 else
94 {
95 if (pkt->isResponse())
96 handleResponse(pkt);
97 else
98 return snoopProbe(pkt);
99 }
100 //Fix this timing info
101 return hitLatency;
102 }
103
104 template<class TagStore, class Buffering, class Coherence>
105 void
106 Cache<TagStore,Buffering,Coherence>::
107 doFunctionalAccess(Packet *pkt, bool isCpuSide)
108 {
109 if (isCpuSide)
110 {
111 //TEMP USE CPU?THREAD 0 0
112 pkt->req->setThreadContext(0,0);
113
114 probe(pkt, false, memSidePort);
115 //TEMP ALWAYS SUCCESFUL FOR NOW
116 pkt->result = Packet::Success;
117 }
118 else
119 {
120 probe(pkt, false, cpuSidePort);
121 }
122 }
123
124 template<class TagStore, class Buffering, class Coherence>
125 void
126 Cache<TagStore,Buffering,Coherence>::
127 recvStatusChange(Port::Status status, bool isCpuSide)
128 {
129
130 }
131
132
133 template<class TagStore, class Buffering, class Coherence>
134 Cache<TagStore,Buffering,Coherence>::
135 Cache(const std::string &_name,
136 Cache<TagStore,Buffering,Coherence>::Params &params)
137 : BaseCache(_name, params.baseParams),
138 prefetchAccess(params.prefetchAccess),
139 tags(params.tags), missQueue(params.missQueue),
140 coherence(params.coherence), prefetcher(params.prefetcher),
141 hitLatency(params.hitLatency)
142 {
143 tags->setCache(this);
144 tags->setPrefetcher(prefetcher);
145 missQueue->setCache(this);
146 missQueue->setPrefetcher(prefetcher);
147 coherence->setCache(this);
148 prefetcher->setCache(this);
149 prefetcher->setTags(tags);
150 prefetcher->setBuffer(missQueue);
151 invalidateReq = new Request((Addr) NULL, blkSize, 0);
152 invalidatePkt = new Packet(invalidateReq, Packet::InvalidateReq, 0);
153 }
154
155 template<class TagStore, class Buffering, class Coherence>
156 void
157 Cache<TagStore,Buffering,Coherence>::regStats()
158 {
159 BaseCache::regStats();
160 tags->regStats(name());
161 missQueue->regStats(name());
162 coherence->regStats(name());
163 prefetcher->regStats(name());
164 }
165
166 template<class TagStore, class Buffering, class Coherence>
167 bool
168 Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
169 {
170 //@todo Add back in MemDebug Calls
171 // MemDebug::cacheAccess(pkt);
172 BlkType *blk = NULL;
173 PacketList writebacks;
174 int size = blkSize;
175 int lat = hitLatency;
176 if (prefetchAccess) {
177 //We are determining prefetches on access stream, call prefetcher
178 prefetcher->handleMiss(pkt, curTick);
179 }
180 if (!pkt->req->isUncacheable()) {
181 blk = tags->handleAccess(pkt, lat, writebacks);
182 } else {
183 size = pkt->getSize();
184 }
185 // If this is a block size write/hint (WH64) allocate the block here
186 // if the coherence protocol allows it.
187 /** @todo make the fast write alloc (wh64) work with coherence. */
188 /** @todo Do we want to do fast writes for writebacks as well? */
189 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
190 (pkt->cmd == Packet::WriteReq
191 || pkt->cmd == Packet::WriteInvalidateReq) ) {
192 // not outstanding misses, can do this
193 MSHR* outstanding_miss = missQueue->findMSHR(pkt->getAddr());
194 if (pkt->cmd == Packet::WriteInvalidateReq || !outstanding_miss) {
195 if (outstanding_miss) {
196 warn("WriteInv doing a fastallocate"
197 "with an outstanding miss to the same address\n");
198 }
199 blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
200 writebacks);
201 ++fastWrites;
202 }
203 }
204 while (!writebacks.empty()) {
205 missQueue->doWriteback(writebacks.front());
206 writebacks.pop_front();
207 }
208 DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(),
209 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
210 pkt->getAddr() & ~((Addr)blkSize - 1));
211 if (blk) {
212 // Hit
213 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
214 // clear dirty bit if write through
215 if (pkt->needsResponse())
216 respond(pkt, curTick+lat);
217 if (pkt->cmd == Packet::Writeback) {
218 //Signal that you can kill the pkt/req
219 pkt->flags |= SATISFIED;
220 }
221 return true;
222 }
223
224 // Miss
225 if (!pkt->req->isUncacheable()) {
226 misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
227 /** @todo Move miss count code into BaseCache */
228 if (missCount) {
229 --missCount;
230 if (missCount == 0)
231 exitSimLoop("A cache reached the maximum miss count");
232 }
233 }
234 missQueue->handleMiss(pkt, size, curTick + hitLatency);
235 // return MA_CACHE_MISS;
236 return true;
237 }
238
239
240 template<class TagStore, class Buffering, class Coherence>
241 Packet *
242 Cache<TagStore,Buffering,Coherence>::getPacket()
243 {
244 assert(missQueue->havePending());
245 Packet * pkt = missQueue->getPacket();
246 if (pkt) {
247 if (!pkt->req->isUncacheable()) {
248 if (pkt->cmd == Packet::HardPFReq)
249 misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++;
250 BlkType *blk = tags->findBlock(pkt);
251 Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
252 (blk)? blk->status : 0);
253 missQueue->setBusCmd(pkt, cmd);
254 }
255 }
256
257 assert(!doMasterRequest() || missQueue->havePending());
258 assert(!pkt || pkt->time <= curTick);
259 return pkt;
260 }
261
262 template<class TagStore, class Buffering, class Coherence>
263 void
264 Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr,
265 bool success)
266 {
267 if (success && !(pkt && (pkt->flags & NACKED_LINE))) {
268 if (!mshr->pkt->needsResponse()
269 && !(mshr->pkt->cmd == Packet::UpgradeReq)
270 && (pkt && (pkt->flags & SATISFIED))) {
271 //Writeback, clean up the non copy version of the packet
272 delete pkt;
273 }
274 missQueue->markInService(mshr->pkt, mshr);
275 //Temp Hack for UPGRADES
276 if (mshr->pkt && mshr->pkt->cmd == Packet::UpgradeReq) {
277 assert(pkt); //Upgrades need to be fixed
278 pkt->flags &= ~CACHE_LINE_FILL;
279 BlkType *blk = tags->findBlock(pkt);
280 CacheBlk::State old_state = (blk) ? blk->status : 0;
281 CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
282 if (old_state != new_state)
283 DPRINTF(Cache, "Block for blk addr %x moving from "
284 "state %i to %i\n",
285 pkt->getAddr() & (((ULL(1))<<48)-1),
286 old_state, new_state);
287 //Set the state on the upgrade
288 memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
289 PacketList writebacks;
290 tags->handleFill(blk, mshr, new_state, writebacks, pkt);
291 assert(writebacks.empty());
292 missQueue->handleResponse(pkt, curTick + hitLatency);
293 }
294 } else if (pkt && !pkt->req->isUncacheable()) {
295 pkt->flags &= ~NACKED_LINE;
296 pkt->flags &= ~SATISFIED;
297 pkt->flags &= ~SNOOP_COMMIT;
298
299 //Rmove copy from mshr
300 delete mshr->pkt;
301 mshr->pkt = pkt;
302
303 missQueue->restoreOrigCmd(pkt);
304 }
305 }
306
307 template<class TagStore, class Buffering, class Coherence>
308 void
309 Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
310 {
311 BlkType *blk = NULL;
312 if (pkt->senderState) {
313 //Delete temp copy in MSHR, restore it.
314 delete ((MSHR*)pkt->senderState)->pkt;
315 ((MSHR*)pkt->senderState)->pkt = pkt;
316 if (pkt->result == Packet::Nacked) {
317 //pkt->reinitFromRequest();
318 warn("NACKs from devices not connected to the same bus "
319 "not implemented\n");
320 return;
321 }
322 if (pkt->result == Packet::BadAddress) {
323 //Make the response a Bad address and send it
324 }
325 // MemDebug::cacheResponse(pkt);
326 DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->getAddr(),
327 pkt->getAddr() & (((ULL(1))<<48)-1));
328
329 if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
330 blk = tags->findBlock(pkt);
331 CacheBlk::State old_state = (blk) ? blk->status : 0;
332 PacketList writebacks;
333 CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
334 if (old_state != new_state)
335 DPRINTF(Cache, "Block for blk addr %x moving from "
336 "state %i to %i\n",
337 pkt->getAddr() & (((ULL(1))<<48)-1),
338 old_state, new_state);
339 blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
340 new_state, writebacks, pkt);
341 while (!writebacks.empty()) {
342 missQueue->doWriteback(writebacks.front());
343 writebacks.pop_front();
344 }
345 }
346 missQueue->handleResponse(pkt, curTick + hitLatency);
347 }
348 }
349
350 template<class TagStore, class Buffering, class Coherence>
351 Packet *
352 Cache<TagStore,Buffering,Coherence>::getCoherencePacket()
353 {
354 return coherence->getPacket();
355 }
356
357 template<class TagStore, class Buffering, class Coherence>
358 void
359 Cache<TagStore,Buffering,Coherence>::sendCoherenceResult(Packet* &pkt,
360 MSHR *cshr,
361 bool success)
362 {
363 coherence->sendResult(pkt, cshr, success);
364 }
365
366
367 template<class TagStore, class Buffering, class Coherence>
368 void
369 Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
370 {
371 if (pkt->req->isUncacheable()) {
372 //Can't get a hit on an uncacheable address
373 //Revisit this for multi level coherence
374 return;
375 }
376 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
377 BlkType *blk = tags->findBlock(pkt);
378 MSHR *mshr = missQueue->findMSHR(blk_addr);
379 if (coherence->hasProtocol()) { //@todo Move this into handle bus req
380 //If we find an mshr, and it is in service, we need to NACK or
381 //invalidate
382 if (mshr) {
383 if (mshr->inService) {
384 if ((mshr->pkt->isInvalidate() || !mshr->pkt->isCacheFill())
385 && (pkt->cmd != Packet::InvalidateReq
386 && pkt->cmd != Packet::WriteInvalidateReq)) {
387 //If the outstanding request was an invalidate
388 //(upgrade,readex,..) Then we need to ACK the request
389 //until we get the data Also NACK if the outstanding
390 //request is not a cachefill (writeback)
391 assert(!(pkt->flags & SATISFIED));
392 pkt->flags |= SATISFIED;
393 pkt->flags |= NACKED_LINE;
394 ///@todo NACK's from other levels
395 //warn("NACKs from devices not connected to the same bus "
396 //"not implemented\n");
397 //respondToSnoop(pkt, curTick + hitLatency);
398 return;
399 }
400 else {
401 //The supplier will be someone else, because we are
402 //waiting for the data. This should cause this cache to
403 //be forced to go to the shared state, not the exclusive
404 //even though the shared line won't be asserted. But for
405 //now we will just invlidate ourselves and allow the other
406 //cache to go into the exclusive state. @todo Make it so
407 //a read to a pending read doesn't invalidate. @todo Make
408 //it so that a read to a pending read can't be exclusive
409 //now.
410
411 //Set the address so find match works
412 //panic("Don't have invalidates yet\n");
413 invalidatePkt->addrOverride(pkt->getAddr());
414
415 //Append the invalidate on
416 missQueue->addTarget(mshr,invalidatePkt);
417 DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n",
418 pkt->getAddr() & (((ULL(1))<<48)-1));
419 return;
420 }
421 }
422 }
423 //We also need to check the writeback buffers and handle those
424 std::vector<MSHR *> writebacks;
425 if (missQueue->findWrites(blk_addr, writebacks)) {
426 DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n",
427 pkt->getAddr() & (((ULL(1))<<48)-1));
428
429 //Look through writebacks for any non-uncachable writes, use that
430 for (int i=0; i<writebacks.size(); i++) {
431 mshr = writebacks[i];
432
433 if (!mshr->pkt->req->isUncacheable()) {
434 if (pkt->isRead()) {
435 //Only Upgrades don't get here
436 //Supply the data
437 assert(!(pkt->flags & SATISFIED));
438 pkt->flags |= SATISFIED;
439
440 //If we are in an exclusive protocol, make it ask again
441 //to get write permissions (upgrade), signal shared
442 pkt->flags |= SHARED_LINE;
443
444 assert(pkt->isRead());
445 Addr offset = pkt->getAddr() & (blkSize - 1);
446 assert(offset < blkSize);
447 assert(pkt->getSize() <= blkSize);
448 assert(offset + pkt->getSize() <=blkSize);
449 memcpy(pkt->getPtr<uint8_t>(), mshr->pkt->getPtr<uint8_t>() + offset, pkt->getSize());
450
451 respondToSnoop(pkt, curTick + hitLatency);
452 }
453
454 if (pkt->isInvalidate()) {
455 //This must be an upgrade or other cache will take
456 //ownership
457 missQueue->markInService(mshr->pkt, mshr);
458 }
459 return;
460 }
461 }
462 }
463 }
464 CacheBlk::State new_state;
465 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
466 if (satisfy) {
467 DPRINTF(Cache, "Cache snooped a %s request for addr %x and "
468 "now supplying data, new state is %i\n",
469 pkt->cmdString(), blk_addr, new_state);
470
471 tags->handleSnoop(blk, new_state, pkt);
472 respondToSnoop(pkt, curTick + hitLatency);
473 return;
474 }
475 if (blk)
476 DPRINTF(Cache, "Cache snooped a %s request for addr %x, "
477 "new state is %i\n", pkt->cmdString(), blk_addr, new_state);
478 tags->handleSnoop(blk, new_state);
479 }
480
481 template<class TagStore, class Buffering, class Coherence>
482 void
483 Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
484 {
485 //Need to handle the response, if NACKED
486 if (pkt->flags & NACKED_LINE) {
487 //Need to mark it as not in service, and retry for bus
488 assert(0); //Yeah, we saw a NACK come through
489
490 //For now this should never get called, we return false when we see a
491 //NACK instead, by doing this we allow the bus_blocked mechanism to
492 //handle the retry For now it retrys in just 2 cycles, need to figure
493 //out how to change that Eventually we will want to also have success
494 //come in as a parameter Need to make sure that we handle the
495 //functionality that happens on successufl return of the sendAddr
496 //function
497 }
498 }
499
500 template<class TagStore, class Buffering, class Coherence>
501 void
502 Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr)
503 {
504 tags->invalidateBlk(addr);
505 }
506
507
508 /**
509 * @todo Fix to not assume write allocate
510 */
511 template<class TagStore, class Buffering, class Coherence>
512 Tick
513 Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update,
514 CachePort* otherSidePort)
515 {
516 // MemDebug::cacheProbe(pkt);
517 if (!pkt->req->isUncacheable()) {
518 if (pkt->isInvalidate() && !pkt->isRead()
519 && !pkt->isWrite()) {
520 //Upgrade or Invalidate, satisfy it, don't forward
521 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
522 pkt->getAddr() & (((ULL(1))<<48)-1),
523 pkt->getAddr() & ~((Addr)blkSize - 1));
524 pkt->flags |= SATISFIED;
525 return 0;
526 }
527 }
528
529 if (!update && (pkt->isWrite() || (otherSidePort == cpuSidePort))) {
530 // Still need to change data in all locations.
531 otherSidePort->sendFunctional(pkt);
532 if (pkt->isRead() && pkt->result == Packet::Success)
533 return 0;
534 }
535
536 PacketList writebacks;
537 int lat;
538 BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
539
540 DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(),
541 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
542 pkt->getAddr() & ~((Addr)blkSize - 1));
543
544
545 // Need to check for outstanding misses and writes
546 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
547
548 // There can only be one matching outstanding miss.
549 MSHR* mshr = missQueue->findMSHR(blk_addr);
550
551 // There can be many matching outstanding writes.
552 std::vector<MSHR*> writes;
553 missQueue->findWrites(blk_addr, writes);
554
555 if (!update) {
556 // Check for data in MSHR and writebuffer.
557 if (mshr) {
558 warn("Found outstanding miss on an non-update probe");
559 MSHR::TargetList *targets = mshr->getTargetList();
560 MSHR::TargetList::iterator i = targets->begin();
561 MSHR::TargetList::iterator end = targets->end();
562 for (; i != end; ++i) {
563 Packet * target = *i;
564 // If the target contains data, and it overlaps the
565 // probed request, need to update data
566 if (target->isWrite() && target->intersect(pkt)) {
567 uint8_t* pkt_data;
568 uint8_t* write_data;
569 int data_size;
570 if (target->getAddr() < pkt->getAddr()) {
571 int offset = pkt->getAddr() - target->getAddr();
572 pkt_data = pkt->getPtr<uint8_t>();
573 write_data = target->getPtr<uint8_t>() + offset;
574 data_size = target->getSize() - offset;
575 assert(data_size > 0);
576 if (data_size > pkt->getSize())
577 data_size = pkt->getSize();
578 } else {
579 int offset = target->getAddr() - pkt->getAddr();
580 pkt_data = pkt->getPtr<uint8_t>() + offset;
581 write_data = target->getPtr<uint8_t>();
582 data_size = pkt->getSize() - offset;
583 assert(data_size > pkt->getSize());
584 if (data_size > target->getSize())
585 data_size = target->getSize();
586 }
587
588 if (pkt->isWrite()) {
589 memcpy(pkt_data, write_data, data_size);
590 } else {
591 memcpy(write_data, pkt_data, data_size);
592 }
593 }
594 }
595 }
596 for (int i = 0; i < writes.size(); ++i) {
597 Packet * write = writes[i]->pkt;
598 if (write->intersect(pkt)) {
599 warn("Found outstanding write on an non-update probe");
600 uint8_t* pkt_data;
601 uint8_t* write_data;
602 int data_size;
603 if (write->getAddr() < pkt->getAddr()) {
604 int offset = pkt->getAddr() - write->getAddr();
605 pkt_data = pkt->getPtr<uint8_t>();
606 write_data = write->getPtr<uint8_t>() + offset;
607 data_size = write->getSize() - offset;
608 assert(data_size > 0);
609 if (data_size > pkt->getSize())
610 data_size = pkt->getSize();
611 } else {
612 int offset = write->getAddr() - pkt->getAddr();
613 pkt_data = pkt->getPtr<uint8_t>() + offset;
614 write_data = write->getPtr<uint8_t>();
615 data_size = pkt->getSize() - offset;
616 assert(data_size > pkt->getSize());
617 if (data_size > write->getSize())
618 data_size = write->getSize();
619 }
620
621 if (pkt->isWrite()) {
622 memcpy(pkt_data, write_data, data_size);
623 } else {
624 memcpy(write_data, pkt_data, data_size);
625 }
626
627 }
628 }
629 } else if (!blk) {
630 // update the cache state and statistics
631 if (mshr || !writes.empty()){
632 // Can't handle it, return pktuest unsatisfied.
633 panic("Atomic access ran into outstanding MSHR's or WB's!");
634 }
635 if (!pkt->req->isUncacheable()) {
636 // Fetch the cache block to fill
637 BlkType *blk = tags->findBlock(pkt);
638 Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
639 (blk)? blk->status : 0);
640
641 Packet * busPkt = new Packet(pkt->req,temp_cmd, -1, blkSize);
642
643 busPkt->allocate();
644
645 busPkt->time = curTick;
646
647 DPRINTF(Cache, "Sending a atomic %s for %x blk_addr: %x\n",
648 busPkt->cmdString(),
649 busPkt->getAddr() & (((ULL(1))<<48)-1),
650 busPkt->getAddr() & ~((Addr)blkSize - 1));
651
652 lat = memSidePort->sendAtomic(busPkt);
653
654 //Be sure to flip the response to a request for coherence
655 if (busPkt->needsResponse()) {
656 busPkt->makeAtomicResponse();
657 }
658
659 /* if (!(busPkt->flags & SATISFIED)) {
660 // blocked at a higher level, just return
661 return 0;
662 }
663
664 */ misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
665
666 CacheBlk::State old_state = (blk) ? blk->status : 0;
667 CacheBlk::State new_state =
668 coherence->getNewState(busPkt, old_state);
669 DPRINTF(Cache,
670 "Receive response:%s for blk addr %x in state %i\n",
671 busPkt->cmdString(),
672 busPkt->getAddr() & (((ULL(1))<<48)-1), old_state);
673 if (old_state != new_state)
674 DPRINTF(Cache, "Block for blk addr %x moving from "
675 "state %i to %i\n",
676 busPkt->getAddr() & (((ULL(1))<<48)-1),
677 old_state, new_state);
678
679 tags->handleFill(blk, busPkt,
680 new_state,
681 writebacks, pkt);
682 //Free the packet
683 delete busPkt;
684
685 // Handle writebacks if needed
686 while (!writebacks.empty()){
687 Packet *wbPkt = writebacks.front();
688 memSidePort->sendAtomic(wbPkt);
689 writebacks.pop_front();
690 delete wbPkt;
691 }
692 return lat + hitLatency;
693 } else {
694 return memSidePort->sendAtomic(pkt);
695 }
696 } else {
697 // There was a cache hit.
698 // Handle writebacks if needed
699 while (!writebacks.empty()){
700 memSidePort->sendAtomic(writebacks.front());
701 writebacks.pop_front();
702 }
703
704 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
705
706 return hitLatency;
707 }
708 fatal("Probe not handled.\n");
709 return 0;
710 }
711
712 template<class TagStore, class Buffering, class Coherence>
713 Tick
714 Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt)
715 {
716 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
717 BlkType *blk = tags->findBlock(pkt);
718 MSHR *mshr = missQueue->findMSHR(blk_addr);
719 CacheBlk::State new_state = 0;
720 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
721 if (satisfy) {
722 DPRINTF(Cache, "Cache snooped a %s request for addr %x and "
723 "now supplying data, new state is %i\n",
724 pkt->cmdString(), blk_addr, new_state);
725
726 tags->handleSnoop(blk, new_state, pkt);
727 return hitLatency;
728 }
729 if (blk)
730 DPRINTF(Cache, "Cache snooped a %s request for addr %x, "
731 "new state is %i\n",
732 pkt->cmdString(), blk_addr, new_state);
733 tags->handleSnoop(blk, new_state);
734 return 0;
735 }
736