Merge zizzer.eecs.umich.edu:/bk/newmem
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 */
32
33 /**
34 * @file
35 * Cache definitions.
36 */
37
38 #include <assert.h>
39 #include <math.h>
40
41 #include <cassert>
42 #include <iostream>
43 #include <string>
44
45 #include "sim/host.hh"
46 #include "base/misc.hh"
47 #include "cpu/smt.hh"
48
49 #include "mem/cache/cache.hh"
50 #include "mem/cache/cache_blk.hh"
51 #include "mem/cache/miss/mshr.hh"
52 #include "mem/cache/prefetch/prefetcher.hh"
53
54 #include "sim/sim_exit.hh" // for SimExitEvent
55
56 template<class TagStore, class Buffering, class Coherence>
57 bool
58 Cache<TagStore,Buffering,Coherence>::
59 doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
60 {
61 if (isCpuSide)
62 {
63 if (pkt->isWrite() && (pkt->req->isLocked())) {
64 pkt->req->setScResult(1);
65 }
66 access(pkt);
67
68 }
69 else
70 {
71 if (pkt->isResponse())
72 handleResponse(pkt);
73 else {
74 //Check if we should do the snoop
75 if (pkt->flags & SNOOP_COMMIT)
76 snoop(pkt);
77 }
78 }
79 return true;
80 }
81
82 template<class TagStore, class Buffering, class Coherence>
83 Tick
84 Cache<TagStore,Buffering,Coherence>::
85 doAtomicAccess(Packet *pkt, bool isCpuSide)
86 {
87 if (isCpuSide)
88 {
89 //Temporary solution to LL/SC
90 if (pkt->isWrite() && (pkt->req->isLocked())) {
91 pkt->req->setScResult(1);
92 }
93
94 probe(pkt, true, NULL);
95 //TEMP ALWAYS SUCCES FOR NOW
96 pkt->result = Packet::Success;
97 }
98 else
99 {
100 if (pkt->isResponse())
101 handleResponse(pkt);
102 else
103 snoopProbe(pkt);
104 }
105 //Fix this timing info
106 return hitLatency;
107 }
108
109 template<class TagStore, class Buffering, class Coherence>
110 void
111 Cache<TagStore,Buffering,Coherence>::
112 doFunctionalAccess(Packet *pkt, bool isCpuSide)
113 {
114 if (isCpuSide)
115 {
116 //TEMP USE CPU?THREAD 0 0
117 pkt->req->setThreadContext(0,0);
118
119 //Temporary solution to LL/SC
120 if (pkt->isWrite() && (pkt->req->isLocked())) {
121 assert("Can't handle LL/SC on functional path\n");
122 }
123
124 probe(pkt, false, memSidePort);
125 //TEMP ALWAYS SUCCESFUL FOR NOW
126 pkt->result = Packet::Success;
127 }
128 else
129 {
130 probe(pkt, false, cpuSidePort);
131 }
132 }
133
134 template<class TagStore, class Buffering, class Coherence>
135 void
136 Cache<TagStore,Buffering,Coherence>::
137 recvStatusChange(Port::Status status, bool isCpuSide)
138 {
139
140 }
141
142
143 template<class TagStore, class Buffering, class Coherence>
144 Cache<TagStore,Buffering,Coherence>::
145 Cache(const std::string &_name,
146 Cache<TagStore,Buffering,Coherence>::Params &params)
147 : BaseCache(_name, params.baseParams),
148 prefetchAccess(params.prefetchAccess),
149 tags(params.tags), missQueue(params.missQueue),
150 coherence(params.coherence), prefetcher(params.prefetcher),
151 doCopy(params.doCopy), blockOnCopy(params.blockOnCopy)
152 {
153 //FIX BUS POINTERS
154 // if (params.in == NULL) {
155 topLevelCache = true;
156 // }
157 //PLEASE FIX THIS, BUS SIZES NOT BEING USED
158 tags->setCache(this, blkSize, 1/*params.out->width, params.out->clockRate*/);
159 tags->setPrefetcher(prefetcher);
160 missQueue->setCache(this);
161 missQueue->setPrefetcher(prefetcher);
162 coherence->setCache(this);
163 prefetcher->setCache(this);
164 prefetcher->setTags(tags);
165 prefetcher->setBuffer(missQueue);
166 invalidateReq = new Request((Addr) NULL, blkSize, 0);
167 invalidatePkt = new Packet(invalidateReq, Packet::InvalidateReq, 0);
168 }
169
170 template<class TagStore, class Buffering, class Coherence>
171 void
172 Cache<TagStore,Buffering,Coherence>::regStats()
173 {
174 BaseCache::regStats();
175 tags->regStats(name());
176 missQueue->regStats(name());
177 coherence->regStats(name());
178 prefetcher->regStats(name());
179 }
180
181 template<class TagStore, class Buffering, class Coherence>
182 bool
183 Cache<TagStore,Buffering,Coherence>::access(PacketPtr &pkt)
184 {
185 //@todo Add back in MemDebug Calls
186 // MemDebug::cacheAccess(pkt);
187 BlkType *blk = NULL;
188 PacketList writebacks;
189 int size = blkSize;
190 int lat = hitLatency;
191 if (prefetchAccess) {
192 //We are determining prefetches on access stream, call prefetcher
193 prefetcher->handleMiss(pkt, curTick);
194 }
195 if (!pkt->req->isUncacheable()) {
196 if (pkt->isInvalidate() && !pkt->isRead()
197 && !pkt->isWrite()) {
198 //Upgrade or Invalidate
199 //Look into what happens if two slave caches on bus
200 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
201 pkt->getAddr() & (((ULL(1))<<48)-1),
202 pkt->getAddr() & ~((Addr)blkSize - 1));
203
204 pkt->flags |= SATISFIED;
205 //Invalidates/Upgrades need no response if they get the bus
206 // return MA_HIT; //@todo, return values
207 return true;
208 }
209 blk = tags->handleAccess(pkt, lat, writebacks);
210 } else {
211 size = pkt->getSize();
212 }
213 // If this is a block size write/hint (WH64) allocate the block here
214 // if the coherence protocol allows it.
215 /** @todo make the fast write alloc (wh64) work with coherence. */
216 /** @todo Do we want to do fast writes for writebacks as well? */
217 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
218 (pkt->cmd == Packet::WriteReq || pkt->cmd == Packet::WriteInvalidateReq) ) {
219 // not outstanding misses, can do this
220 MSHR* outstanding_miss = missQueue->findMSHR(pkt->getAddr());
221 if (pkt->cmd == Packet::WriteInvalidateReq || !outstanding_miss) {
222 if (outstanding_miss) {
223 warn("WriteInv doing a fastallocate"
224 "with an outstanding miss to the same address\n");
225 }
226 blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
227 writebacks);
228 ++fastWrites;
229 }
230 }
231 while (!writebacks.empty()) {
232 missQueue->doWriteback(writebacks.front());
233 writebacks.pop_front();
234 }
235 DPRINTF(Cache, "%s %x %s blk_addr: %x\n", pkt->cmdString(),
236 pkt->getAddr() & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
237 pkt->getAddr() & ~((Addr)blkSize - 1));
238 if (blk) {
239 // Hit
240 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
241 // clear dirty bit if write through
242 if (pkt->needsResponse())
243 respond(pkt, curTick+lat);
244 // return MA_HIT;
245 return true;
246 }
247
248 // Miss
249 if (!pkt->req->isUncacheable()) {
250 misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
251 /** @todo Move miss count code into BaseCache */
252 if (missCount) {
253 --missCount;
254 if (missCount == 0)
255 exitSimLoop("A cache reached the maximum miss count");
256 }
257 }
258 missQueue->handleMiss(pkt, size, curTick + hitLatency);
259 // return MA_CACHE_MISS;
260 return true;
261 }
262
263
264 template<class TagStore, class Buffering, class Coherence>
265 Packet *
266 Cache<TagStore,Buffering,Coherence>::getPacket()
267 {
268 assert(missQueue->havePending());
269 Packet * pkt = missQueue->getPacket();
270 if (pkt) {
271 if (!pkt->req->isUncacheable()) {
272 if (pkt->cmd == Packet::HardPFReq) misses[Packet::HardPFReq][0/*pkt->req->getThreadNum()*/]++;
273 BlkType *blk = tags->findBlock(pkt);
274 Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
275 (blk)? blk->status : 0);
276 missQueue->setBusCmd(pkt, cmd);
277 }
278 }
279
280 assert(!doMasterRequest() || missQueue->havePending());
281 assert(!pkt || pkt->time <= curTick);
282 return pkt;
283 }
284
285 template<class TagStore, class Buffering, class Coherence>
286 void
287 Cache<TagStore,Buffering,Coherence>::sendResult(PacketPtr &pkt, MSHR* mshr, bool success)
288 {
289 if (success && !(pkt->flags & NACKED_LINE)) {
290 missQueue->markInService(pkt, mshr);
291 //Temp Hack for UPGRADES
292 if (pkt->cmd == Packet::UpgradeReq) {
293 pkt->flags &= ~CACHE_LINE_FILL;
294 BlkType *blk = tags->findBlock(pkt);
295 CacheBlk::State old_state = (blk) ? blk->status : 0;
296 CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
297 DPRINTF(Cache, "Block for blk addr %x moving from state %i to %i\n",
298 pkt->getAddr() & (((ULL(1))<<48)-1), old_state, new_state);
299 //Set the state on the upgrade
300 memcpy(pkt->getPtr<uint8_t>(), blk->data, blkSize);
301 PacketList writebacks;
302 tags->handleFill(blk, mshr, new_state, writebacks, pkt);
303 assert(writebacks.empty());
304 missQueue->handleResponse(pkt, curTick + hitLatency);
305 }
306 } else if (pkt && !pkt->req->isUncacheable()) {
307 pkt->flags &= ~NACKED_LINE;
308 pkt->flags &= ~SATISFIED;
309 pkt->flags &= ~SNOOP_COMMIT;
310 missQueue->restoreOrigCmd(pkt);
311 }
312 }
313
314 template<class TagStore, class Buffering, class Coherence>
315 void
316 Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
317 {
318 BlkType *blk = NULL;
319 if (pkt->senderState) {
320 if (pkt->result == Packet::Nacked) {
321 //pkt->reinitFromRequest();
322 warn("NACKs from devices not connected to the same bus not implemented\n");
323 return;
324 }
325 if (pkt->result == Packet::BadAddress) {
326 //Make the response a Bad address and send it
327 }
328 // MemDebug::cacheResponse(pkt);
329 DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->getAddr(),
330 pkt->getAddr() & (((ULL(1))<<48)-1));
331
332 if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
333 blk = tags->findBlock(pkt);
334 CacheBlk::State old_state = (blk) ? blk->status : 0;
335 PacketList writebacks;
336 CacheBlk::State new_state = coherence->getNewState(pkt,old_state);
337 DPRINTF(Cache, "Block for blk addr %x moving from state %i to %i\n",
338 pkt->getAddr() & (((ULL(1))<<48)-1), old_state, new_state);
339 blk = tags->handleFill(blk, (MSHR*)pkt->senderState,
340 new_state, writebacks, pkt);
341 while (!writebacks.empty()) {
342 missQueue->doWriteback(writebacks.front());
343 writebacks.pop_front();
344 }
345 }
346 missQueue->handleResponse(pkt, curTick + hitLatency);
347 }
348 }
349
350 template<class TagStore, class Buffering, class Coherence>
351 void
352 Cache<TagStore,Buffering,Coherence>::pseudoFill(Addr addr)
353 {
354 // Need to temporarily move this blk into MSHRs
355 MSHR *mshr = missQueue->allocateTargetList(addr);
356 int lat;
357 PacketList dummy;
358 // Read the data into the mshr
359 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
360 assert(dummy.empty());
361 assert(mshr->pkt->flags & SATISFIED);
362 // can overload order since it isn't used on non pending blocks
363 mshr->order = blk->status;
364 // temporarily remove the block from the cache.
365 tags->invalidateBlk(addr);
366 }
367
368 template<class TagStore, class Buffering, class Coherence>
369 void
370 Cache<TagStore,Buffering,Coherence>::pseudoFill(MSHR *mshr)
371 {
372 // Need to temporarily move this blk into MSHRs
373 assert(mshr->pkt->cmd == Packet::ReadReq);
374 int lat;
375 PacketList dummy;
376 // Read the data into the mshr
377 BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
378 assert(dummy.empty());
379 assert(mshr->pkt->flags & SATISFIED);
380 // can overload order since it isn't used on non pending blocks
381 mshr->order = blk->status;
382 // temporarily remove the block from the cache.
383 tags->invalidateBlk(mshr->pkt->getAddr());
384 }
385
386
387 template<class TagStore, class Buffering, class Coherence>
388 Packet *
389 Cache<TagStore,Buffering,Coherence>::getCoherencePacket()
390 {
391 return coherence->getPacket();
392 }
393
394
395 template<class TagStore, class Buffering, class Coherence>
396 void
397 Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
398 {
399 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
400 BlkType *blk = tags->findBlock(pkt);
401 MSHR *mshr = missQueue->findMSHR(blk_addr);
402 if (isTopLevel() && coherence->hasProtocol()) { //@todo Move this into handle bus req
403 //If we find an mshr, and it is in service, we need to NACK or invalidate
404 if (mshr) {
405 if (mshr->inService) {
406 if ((mshr->pkt->isInvalidate() || !mshr->pkt->isCacheFill())
407 && (pkt->cmd != Packet::InvalidateReq && pkt->cmd != Packet::WriteInvalidateReq)) {
408 //If the outstanding request was an invalidate (upgrade,readex,..)
409 //Then we need to ACK the request until we get the data
410 //Also NACK if the outstanding request is not a cachefill (writeback)
411 assert(!(pkt->flags & SATISFIED));
412 pkt->flags |= SATISFIED;
413 pkt->flags |= NACKED_LINE;
414 ///@todo NACK's from other levels
415 //warn("NACKs from devices not connected to the same bus not implemented\n");
416 //respondToSnoop(pkt, curTick + hitLatency);
417 return;
418 }
419 else {
420 //The supplier will be someone else, because we are waiting for
421 //the data. This should cause this cache to be forced to go to
422 //the shared state, not the exclusive even though the shared line
423 //won't be asserted. But for now we will just invlidate ourselves
424 //and allow the other cache to go into the exclusive state.
425 //@todo Make it so a read to a pending read doesn't invalidate.
426 //@todo Make it so that a read to a pending read can't be exclusive now.
427
428 //Set the address so find match works
429 //panic("Don't have invalidates yet\n");
430 invalidatePkt->addrOverride(pkt->getAddr());
431
432 //Append the invalidate on
433 missQueue->addTarget(mshr,invalidatePkt);
434 DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
435 return;
436 }
437 }
438 }
439 //We also need to check the writeback buffers and handle those
440 std::vector<MSHR *> writebacks;
441 if (missQueue->findWrites(blk_addr, writebacks)) {
442 DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n", pkt->getAddr() & (((ULL(1))<<48)-1));
443
444 //Look through writebacks for any non-uncachable writes, use that
445 for (int i=0; i<writebacks.size(); i++) {
446 mshr = writebacks[i];
447
448 if (!mshr->pkt->req->isUncacheable()) {
449 if (pkt->isRead()) {
450 //Only Upgrades don't get here
451 //Supply the data
452 assert(!(pkt->flags & SATISFIED));
453 pkt->flags |= SATISFIED;
454
455 //If we are in an exclusive protocol, make it ask again
456 //to get write permissions (upgrade), signal shared
457 pkt->flags |= SHARED_LINE;
458
459 assert(pkt->isRead());
460 Addr offset = pkt->getAddr() & (blkSize - 1);
461 assert(offset < blkSize);
462 assert(pkt->getSize() <= blkSize);
463 assert(offset + pkt->getSize() <=blkSize);
464 memcpy(pkt->getPtr<uint8_t>(), mshr->pkt->getPtr<uint8_t>() + offset, pkt->getSize());
465
466 respondToSnoop(pkt, curTick + hitLatency);
467 }
468
469 if (pkt->isInvalidate()) {
470 //This must be an upgrade or other cache will take ownership
471 missQueue->markInService(mshr->pkt, mshr);
472 }
473 return;
474 }
475 }
476 }
477 }
478 CacheBlk::State new_state;
479 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
480 if (satisfy) {
481 DPRINTF(Cache, "Cache snooped a %s request for addr %x and now supplying data,"
482 "new state is %i\n",
483 pkt->cmdString(), blk_addr, new_state);
484
485 tags->handleSnoop(blk, new_state, pkt);
486 respondToSnoop(pkt, curTick + hitLatency);
487 return;
488 }
489 if (blk) DPRINTF(Cache, "Cache snooped a %s request for addr %x, new state is %i\n",
490 pkt->cmdString(), blk_addr, new_state);
491 tags->handleSnoop(blk, new_state);
492 }
493
494 template<class TagStore, class Buffering, class Coherence>
495 void
496 Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
497 {
498 //Need to handle the response, if NACKED
499 if (pkt->flags & NACKED_LINE) {
500 //Need to mark it as not in service, and retry for bus
501 assert(0); //Yeah, we saw a NACK come through
502
503 //For now this should never get called, we return false when we see a NACK
504 //instead, by doing this we allow the bus_blocked mechanism to handle the retry
505 //For now it retrys in just 2 cycles, need to figure out how to change that
506 //Eventually we will want to also have success come in as a parameter
507 //Need to make sure that we handle the functionality that happens on successufl
508 //return of the sendAddr function
509 }
510 }
511
512 template<class TagStore, class Buffering, class Coherence>
513 void
514 Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr)
515 {
516 tags->invalidateBlk(addr);
517 }
518
519
520 /**
521 * @todo Fix to not assume write allocate
522 */
523 template<class TagStore, class Buffering, class Coherence>
524 Tick
525 Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update, CachePort* otherSidePort)
526 {
527 // MemDebug::cacheProbe(pkt);
528 if (!pkt->req->isUncacheable()) {
529 if (pkt->isInvalidate() && !pkt->isRead()
530 && !pkt->isWrite()) {
531 //Upgrade or Invalidate, satisfy it, don't forward
532 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
533 pkt->getAddr() & (((ULL(1))<<48)-1),
534 pkt->getAddr() & ~((Addr)blkSize - 1));
535 pkt->flags |= SATISFIED;
536 return 0;
537 }
538 }
539
540 PacketList writebacks;
541 int lat;
542 BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
543
544 if (!blk) {
545 // Need to check for outstanding misses and writes
546 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
547
548 // There can only be one matching outstanding miss.
549 MSHR* mshr = missQueue->findMSHR(blk_addr);
550
551 // There can be many matching outstanding writes.
552 std::vector<MSHR*> writes;
553 missQueue->findWrites(blk_addr, writes);
554
555 if (!update) {
556 otherSidePort->sendFunctional(pkt);
557
558 // Check for data in MSHR and writebuffer.
559 if (mshr) {
560 warn("Found outstanding miss on an non-update probe");
561 MSHR::TargetList *targets = mshr->getTargetList();
562 MSHR::TargetList::iterator i = targets->begin();
563 MSHR::TargetList::iterator end = targets->end();
564 for (; i != end; ++i) {
565 Packet * target = *i;
566 // If the target contains data, and it overlaps the
567 // probed request, need to update data
568 if (target->isWrite() && target->intersect(pkt)) {
569 uint8_t* pkt_data;
570 uint8_t* write_data;
571 int data_size;
572 if (target->getAddr() < pkt->getAddr()) {
573 int offset = pkt->getAddr() - target->getAddr();
574 pkt_data = pkt->getPtr<uint8_t>();
575 write_data = target->getPtr<uint8_t>() + offset;
576 data_size = target->getSize() - offset;
577 assert(data_size > 0);
578 if (data_size > pkt->getSize())
579 data_size = pkt->getSize();
580 } else {
581 int offset = target->getAddr() - pkt->getAddr();
582 pkt_data = pkt->getPtr<uint8_t>() + offset;
583 write_data = target->getPtr<uint8_t>();
584 data_size = pkt->getSize() - offset;
585 assert(data_size > pkt->getSize());
586 if (data_size > target->getSize())
587 data_size = target->getSize();
588 }
589
590 if (pkt->isWrite()) {
591 memcpy(pkt_data, write_data, data_size);
592 } else {
593 memcpy(write_data, pkt_data, data_size);
594 }
595 }
596 }
597 }
598 for (int i = 0; i < writes.size(); ++i) {
599 Packet * write = writes[i]->pkt;
600 if (write->intersect(pkt)) {
601 warn("Found outstanding write on an non-update probe");
602 uint8_t* pkt_data;
603 uint8_t* write_data;
604 int data_size;
605 if (write->getAddr() < pkt->getAddr()) {
606 int offset = pkt->getAddr() - write->getAddr();
607 pkt_data = pkt->getPtr<uint8_t>();
608 write_data = write->getPtr<uint8_t>() + offset;
609 data_size = write->getSize() - offset;
610 assert(data_size > 0);
611 if (data_size > pkt->getSize())
612 data_size = pkt->getSize();
613 } else {
614 int offset = write->getAddr() - pkt->getAddr();
615 pkt_data = pkt->getPtr<uint8_t>() + offset;
616 write_data = write->getPtr<uint8_t>();
617 data_size = pkt->getSize() - offset;
618 assert(data_size > pkt->getSize());
619 if (data_size > write->getSize())
620 data_size = write->getSize();
621 }
622
623 if (pkt->isWrite()) {
624 memcpy(pkt_data, write_data, data_size);
625 } else {
626 memcpy(write_data, pkt_data, data_size);
627 }
628
629 }
630 }
631 return 0;
632 } else {
633 // update the cache state and statistics
634 if (mshr || !writes.empty()){
635 // Can't handle it, return pktuest unsatisfied.
636 panic("Atomic access ran into outstanding MSHR's or WB's!");
637 }
638 if (!pkt->req->isUncacheable()) {
639 // Fetch the cache block to fill
640 BlkType *blk = tags->findBlock(pkt);
641 Packet::Command temp_cmd = coherence->getBusCmd(pkt->cmd,
642 (blk)? blk->status : 0);
643
644 Packet * busPkt = new Packet(pkt->req,temp_cmd, -1, blkSize);
645
646 busPkt->allocate();
647
648 busPkt->time = curTick;
649
650 lat = memSidePort->sendAtomic(busPkt);
651
652 //Be sure to flip the response to a request for coherence
653 if (busPkt->needsResponse()) {
654 busPkt->makeAtomicResponse();
655 }
656
657 /* if (!(busPkt->flags & SATISFIED)) {
658 // blocked at a higher level, just return
659 return 0;
660 }
661
662 */ misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
663
664 CacheBlk::State old_state = (blk) ? blk->status : 0;
665 tags->handleFill(blk, busPkt,
666 coherence->getNewState(busPkt, old_state),
667 writebacks, pkt);
668 // Handle writebacks if needed
669 while (!writebacks.empty()){
670 memSidePort->sendAtomic(writebacks.front());
671 writebacks.pop_front();
672 }
673 return lat + hitLatency;
674 } else {
675 return memSidePort->sendAtomic(pkt);
676 }
677 }
678 } else {
679 // There was a cache hit.
680 // Handle writebacks if needed
681 while (!writebacks.empty()){
682 memSidePort->sendAtomic(writebacks.front());
683 writebacks.pop_front();
684 }
685
686 if (update) {
687 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
688 } else if (pkt->isWrite()) {
689 // Still need to change data in all locations.
690 otherSidePort->sendFunctional(pkt);
691 }
692 return curTick + lat;
693 }
694 fatal("Probe not handled.\n");
695 return 0;
696 }
697
698 template<class TagStore, class Buffering, class Coherence>
699 Tick
700 Cache<TagStore,Buffering,Coherence>::snoopProbe(PacketPtr &pkt)
701 {
702 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
703 BlkType *blk = tags->findBlock(pkt);
704 MSHR *mshr = missQueue->findMSHR(blk_addr);
705 CacheBlk::State new_state = 0;
706 bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
707 if (satisfy) {
708 DPRINTF(Cache, "Cache snooped a %s request for addr %x and now supplying data,"
709 "new state is %i\n",
710 pkt->cmdString(), blk_addr, new_state);
711
712 tags->handleSnoop(blk, new_state, pkt);
713 return hitLatency;
714 }
715 if (blk) DPRINTF(Cache, "Cache snooped a %s request for addr %x, new state is %i\n",
716 pkt->cmdString(), blk_addr, new_state);
717 tags->handleSnoop(blk, new_state);
718 return 0;
719 }
720