merge
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 * Steve Reinhardt
32 * Ron Dreslinski
33 */
34
35 /**
36 * @file
37 * Cache definitions.
38 */
39
40 #include "base/fast_alloc.hh"
41 #include "base/misc.hh"
42 #include "base/range.hh"
43 #include "base/types.hh"
44 #include "mem/cache/blk.hh"
45 #include "mem/cache/cache.hh"
46 #include "mem/cache/mshr.hh"
47 #include "mem/cache/prefetch/base.hh"
48 #include "sim/sim_exit.hh"
49
50 template<class TagStore>
51 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
52 : BaseCache(p),
53 tags(tags),
54 prefetcher(pf),
55 doFastWrites(true),
56 prefetchOnAccess(p->prefetch_on_access)
57 {
58 tempBlock = new BlkType();
59 tempBlock->data = new uint8_t[blkSize];
60
61 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
62 "CpuSidePort");
63 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
64 "MemSidePort");
65 cpuSidePort->setOtherPort(memSidePort);
66 memSidePort->setOtherPort(cpuSidePort);
67
68 tags->setCache(this);
69 if (prefetcher)
70 prefetcher->setCache(this);
71 }
72
73 template<class TagStore>
74 void
75 Cache<TagStore>::regStats()
76 {
77 BaseCache::regStats();
78 tags->regStats(name());
79 if (prefetcher)
80 prefetcher->regStats(name());
81 }
82
83 template<class TagStore>
84 Port *
85 Cache<TagStore>::getPort(const std::string &if_name, int idx)
86 {
87 if (if_name == "" || if_name == "cpu_side") {
88 return cpuSidePort;
89 } else if (if_name == "mem_side") {
90 return memSidePort;
91 } else if (if_name == "functional") {
92 CpuSidePort *funcPort =
93 new CpuSidePort(name() + "-cpu_side_funcport", this,
94 "CpuSideFuncPort");
95 funcPort->setOtherPort(memSidePort);
96 return funcPort;
97 } else {
98 panic("Port name %s unrecognized\n", if_name);
99 }
100 }
101
102 template<class TagStore>
103 void
104 Cache<TagStore>::deletePortRefs(Port *p)
105 {
106 if (cpuSidePort == p || memSidePort == p)
107 panic("Can only delete functional ports\n");
108
109 delete p;
110 }
111
112
113 template<class TagStore>
114 void
115 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
116 {
117 uint64_t overwrite_val;
118 bool overwrite_mem;
119 uint64_t condition_val64;
120 uint32_t condition_val32;
121
122 int offset = tags->extractBlkOffset(pkt->getAddr());
123 uint8_t *blk_data = blk->data + offset;
124
125 assert(sizeof(uint64_t) >= pkt->getSize());
126
127 overwrite_mem = true;
128 // keep a copy of our possible write value, and copy what is at the
129 // memory address into the packet
130 pkt->writeData((uint8_t *)&overwrite_val);
131 pkt->setData(blk_data);
132
133 if (pkt->req->isCondSwap()) {
134 if (pkt->getSize() == sizeof(uint64_t)) {
135 condition_val64 = pkt->req->getExtraData();
136 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
137 sizeof(uint64_t));
138 } else if (pkt->getSize() == sizeof(uint32_t)) {
139 condition_val32 = (uint32_t)pkt->req->getExtraData();
140 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
141 sizeof(uint32_t));
142 } else
143 panic("Invalid size for conditional read/write\n");
144 }
145
146 if (overwrite_mem) {
147 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
148 blk->status |= BlkDirty;
149 }
150 }
151
152
153 template<class TagStore>
154 void
155 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
156 {
157 assert(blk);
158 // Occasionally this is not true... if we are a lower-level cache
159 // satisfying a string of Read and ReadEx requests from
160 // upper-level caches, a Read will mark the block as shared but we
161 // can satisfy a following ReadEx anyway since we can rely on the
162 // Read requester(s) to have buffered the ReadEx snoop and to
163 // invalidate their blocks after receiving them.
164 // assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
165 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
166
167 // Check RMW operations first since both isRead() and
168 // isWrite() will be true for them
169 if (pkt->cmd == MemCmd::SwapReq) {
170 cmpAndSwap(blk, pkt);
171 } else if (pkt->isWrite()) {
172 blk->status |= BlkDirty;
173 if (blk->checkWrite(pkt)) {
174 pkt->writeDataToBlock(blk->data, blkSize);
175 }
176 } else if (pkt->isRead()) {
177 if (pkt->isLLSC()) {
178 blk->trackLoadLocked(pkt);
179 }
180 pkt->setDataFromBlock(blk->data, blkSize);
181 if (pkt->getSize() == blkSize) {
182 // special handling for coherent block requests from
183 // upper-level caches
184 if (pkt->needsExclusive()) {
185 // on ReadExReq we give up our copy
186 tags->invalidateBlk(blk);
187 } else {
188 // on ReadReq we create shareable copies here and in
189 // the requester
190 pkt->assertShared();
191 blk->status &= ~BlkWritable;
192 }
193 }
194 } else {
195 // Not a read or write... must be an upgrade. it's OK
196 // to just ack those as long as we have an exclusive
197 // copy at this level.
198 assert(pkt->cmd == MemCmd::UpgradeReq);
199 tags->invalidateBlk(blk);
200 }
201 }
202
203
204 /////////////////////////////////////////////////////
205 //
206 // MSHR helper functions
207 //
208 /////////////////////////////////////////////////////
209
210
211 template<class TagStore>
212 void
213 Cache<TagStore>::markInService(MSHR *mshr)
214 {
215 markInServiceInternal(mshr);
216 #if 0
217 if (mshr->originalCmd == MemCmd::HardPFReq) {
218 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
219 name());
220 //Also clear pending if need be
221 if (!prefetcher->havePending())
222 {
223 deassertMemSideBusRequest(Request_PF);
224 }
225 }
226 #endif
227 }
228
229
230 template<class TagStore>
231 void
232 Cache<TagStore>::squash(int threadNum)
233 {
234 bool unblock = false;
235 BlockedCause cause = NUM_BLOCKED_CAUSES;
236
237 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
238 noTargetMSHR = NULL;
239 unblock = true;
240 cause = Blocked_NoTargets;
241 }
242 if (mshrQueue.isFull()) {
243 unblock = true;
244 cause = Blocked_NoMSHRs;
245 }
246 mshrQueue.squash(threadNum);
247 if (unblock && !mshrQueue.isFull()) {
248 clearBlocked(cause);
249 }
250 }
251
252 /////////////////////////////////////////////////////
253 //
254 // Access path: requests coming in from the CPU side
255 //
256 /////////////////////////////////////////////////////
257
258 template<class TagStore>
259 bool
260 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
261 int &lat, PacketList &writebacks)
262 {
263 if (pkt->req->isUncacheable()) {
264 blk = NULL;
265 lat = hitLatency;
266 return false;
267 }
268
269 blk = tags->accessBlock(pkt->getAddr(), lat);
270
271 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
272 pkt->req->isInstFetch() ? " (ifetch)" : "",
273 pkt->getAddr(), (blk) ? "hit" : "miss");
274
275 if (blk != NULL) {
276
277 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
278 // OK to satisfy access
279 hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
280 satisfyCpuSideRequest(pkt, blk);
281 return true;
282 }
283 }
284
285 // Can't satisfy access normally... either no block (blk == NULL)
286 // or have block but need exclusive & only have shared.
287
288 // Writeback handling is special case. We can write the block
289 // into the cache without having a writeable copy (or any copy at
290 // all).
291 if (pkt->cmd == MemCmd::Writeback) {
292 assert(blkSize == pkt->getSize());
293 if (blk == NULL) {
294 // need to do a replacement
295 blk = allocateBlock(pkt->getAddr(), writebacks);
296 if (blk == NULL) {
297 // no replaceable block available, give up.
298 // writeback will be forwarded to next level.
299 incMissCount(pkt);
300 return false;
301 }
302 tags->insertBlock(pkt->getAddr(), blk);
303 blk->status = BlkValid | BlkReadable;
304 }
305 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
306 blk->status |= BlkDirty;
307 // nothing else to do; writeback doesn't expect response
308 assert(!pkt->needsResponse());
309 hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
310 return true;
311 }
312
313 incMissCount(pkt);
314
315 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
316 // complete miss on store conditional... just give up now
317 pkt->req->setExtraData(0);
318 return true;
319 }
320
321 return false;
322 }
323
324
325 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
326 {
327 Packet::SenderState *prevSenderState;
328 int prevSrc;
329 #ifndef NDEBUG
330 BaseCache *cache;
331 #endif
332 public:
333 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
334 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
335 #ifndef NDEBUG
336 , cache(_cache)
337 #endif
338 {}
339 void restore(Packet *pkt, BaseCache *_cache)
340 {
341 assert(_cache == cache);
342 pkt->senderState = prevSenderState;
343 pkt->setDest(prevSrc);
344 }
345 };
346
347
348 template<class TagStore>
349 bool
350 Cache<TagStore>::timingAccess(PacketPtr pkt)
351 {
352 //@todo Add back in MemDebug Calls
353 // MemDebug::cacheAccess(pkt);
354
355 // we charge hitLatency for doing just about anything here
356 Tick time = curTick + hitLatency;
357
358 if (pkt->isResponse()) {
359 // must be cache-to-cache response from upper to lower level
360 ForwardResponseRecord *rec =
361 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
362 assert(rec != NULL);
363 rec->restore(pkt, this);
364 delete rec;
365 memSidePort->respond(pkt, time);
366 return true;
367 }
368
369 assert(pkt->isRequest());
370
371 if (pkt->memInhibitAsserted()) {
372 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
373 pkt->getAddr());
374 assert(!pkt->req->isUncacheable());
375 // Special tweak for multilevel coherence: snoop downward here
376 // on invalidates since there may be other caches below here
377 // that have shared copies. Not necessary if we know that
378 // supplier had exclusive copy to begin with.
379 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
380 Packet *snoopPkt = new Packet(pkt, true); // clear flags
381 snoopPkt->setExpressSnoop();
382 snoopPkt->assertMemInhibit();
383 memSidePort->sendTiming(snoopPkt);
384 // main memory will delete snoopPkt
385 }
386 // since we're the official target but we aren't responding,
387 // delete the packet now.
388 delete pkt;
389 return true;
390 }
391
392 if (pkt->req->isUncacheable()) {
393 // writes go in write buffer, reads use MSHR
394 if (pkt->isWrite() && !pkt->isRead()) {
395 allocateWriteBuffer(pkt, time, true);
396 } else {
397 allocateUncachedReadBuffer(pkt, time, true);
398 }
399 assert(pkt->needsResponse()); // else we should delete it here??
400 return true;
401 }
402
403 int lat = hitLatency;
404 BlkType *blk = NULL;
405 PacketList writebacks;
406
407 bool satisfied = access(pkt, blk, lat, writebacks);
408
409 #if 0
410 /** @todo make the fast write alloc (wh64) work with coherence. */
411
412 // If this is a block size write/hint (WH64) allocate the block here
413 // if the coherence protocol allows it.
414 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
415 (pkt->cmd == MemCmd::WriteReq
416 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
417 // not outstanding misses, can do this
418 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
419 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
420 if (outstanding_miss) {
421 warn("WriteInv doing a fastallocate"
422 "with an outstanding miss to the same address\n");
423 }
424 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
425 writebacks);
426 ++fastWrites;
427 }
428 }
429 #endif
430
431 // track time of availability of next prefetch, if any
432 Tick next_pf_time = 0;
433
434 bool needsResponse = pkt->needsResponse();
435
436 if (satisfied) {
437 if (needsResponse) {
438 pkt->makeTimingResponse();
439 cpuSidePort->respond(pkt, curTick+lat);
440 } else {
441 delete pkt;
442 }
443
444 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
445 if (blk)
446 blk->status &= ~BlkHWPrefetched;
447 next_pf_time = prefetcher->notify(pkt, time);
448 }
449 } else {
450 // miss
451
452 Addr blk_addr = blockAlign(pkt->getAddr());
453 MSHR *mshr = mshrQueue.findMatch(blk_addr);
454
455 if (mshr) {
456 // MSHR hit
457 //@todo remove hw_pf here
458 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
459 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
460 mshr->threadNum = -1;
461 }
462 mshr->allocateTarget(pkt, time, order++);
463 if (mshr->getNumTargets() == numTarget) {
464 noTargetMSHR = mshr;
465 setBlocked(Blocked_NoTargets);
466 // need to be careful with this... if this mshr isn't
467 // ready yet (i.e. time > curTick_, we don't want to
468 // move it ahead of mshrs that are ready
469 // mshrQueue.moveToFront(mshr);
470 }
471 } else {
472 // no MSHR
473 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
474 // always mark as cache fill for now... if we implement
475 // no-write-allocate or bypass accesses this will have to
476 // be changed.
477 if (pkt->cmd == MemCmd::Writeback) {
478 allocateWriteBuffer(pkt, time, true);
479 } else {
480 if (blk && blk->isValid()) {
481 // If we have a write miss to a valid block, we
482 // need to mark the block non-readable. Otherwise
483 // if we allow reads while there's an outstanding
484 // write miss, the read could return stale data
485 // out of the cache block... a more aggressive
486 // system could detect the overlap (if any) and
487 // forward data out of the MSHRs, but we don't do
488 // that yet. Note that we do need to leave the
489 // block valid so that it stays in the cache, in
490 // case we get an upgrade response (and hence no
491 // new data) when the write miss completes.
492 // As long as CPUs do proper store/load forwarding
493 // internally, and have a sufficiently weak memory
494 // model, this is probably unnecessary, but at some
495 // point it must have seemed like we needed it...
496 assert(pkt->needsExclusive() && !blk->isWritable());
497 blk->status &= ~BlkReadable;
498 }
499
500 allocateMissBuffer(pkt, time, true);
501 }
502
503 if (prefetcher) {
504 next_pf_time = prefetcher->notify(pkt, time);
505 }
506 }
507 }
508
509 if (next_pf_time != 0)
510 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
511
512 // copy writebacks to write buffer
513 while (!writebacks.empty()) {
514 PacketPtr wbPkt = writebacks.front();
515 allocateWriteBuffer(wbPkt, time, true);
516 writebacks.pop_front();
517 }
518
519 return true;
520 }
521
522
523 // See comment in cache.hh.
524 template<class TagStore>
525 PacketPtr
526 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
527 bool needsExclusive)
528 {
529 bool blkValid = blk && blk->isValid();
530
531 if (cpu_pkt->req->isUncacheable()) {
532 assert(blk == NULL);
533 return NULL;
534 }
535
536 if (!blkValid && (cpu_pkt->cmd == MemCmd::Writeback ||
537 cpu_pkt->cmd == MemCmd::UpgradeReq)) {
538 // Writebacks that weren't allocated in access() and upgrades
539 // from upper-level caches that missed completely just go
540 // through.
541 return NULL;
542 }
543
544 assert(cpu_pkt->needsResponse());
545
546 MemCmd cmd;
547 // @TODO make useUpgrades a parameter.
548 // Note that ownership protocols require upgrade, otherwise a
549 // write miss on a shared owned block will generate a ReadExcl,
550 // which will clobber the owned copy.
551 const bool useUpgrades = true;
552 if (blkValid && useUpgrades) {
553 // only reason to be here is that blk is shared
554 // (read-only) and we need exclusive
555 assert(needsExclusive && !blk->isWritable());
556 cmd = MemCmd::UpgradeReq;
557 } else {
558 // block is invalid
559 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
560 }
561 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
562
563 pkt->allocate();
564 return pkt;
565 }
566
567
568 template<class TagStore>
569 Tick
570 Cache<TagStore>::atomicAccess(PacketPtr pkt)
571 {
572 int lat = hitLatency;
573
574 // @TODO: make this a parameter
575 bool last_level_cache = false;
576
577 if (pkt->memInhibitAsserted()) {
578 assert(!pkt->req->isUncacheable());
579 // have to invalidate ourselves and any lower caches even if
580 // upper cache will be responding
581 if (pkt->isInvalidate()) {
582 BlkType *blk = tags->findBlock(pkt->getAddr());
583 if (blk && blk->isValid()) {
584 tags->invalidateBlk(blk);
585 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
586 pkt->cmdString(), pkt->getAddr());
587 }
588 if (!last_level_cache) {
589 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
590 pkt->cmdString(), pkt->getAddr());
591 lat += memSidePort->sendAtomic(pkt);
592 }
593 } else {
594 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
595 pkt->cmdString(), pkt->getAddr());
596 }
597
598 return lat;
599 }
600
601 // should assert here that there are no outstanding MSHRs or
602 // writebacks... that would mean that someone used an atomic
603 // access in timing mode
604
605 BlkType *blk = NULL;
606 PacketList writebacks;
607
608 if (!access(pkt, blk, lat, writebacks)) {
609 // MISS
610 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
611
612 bool is_forward = (bus_pkt == NULL);
613
614 if (is_forward) {
615 // just forwarding the same request to the next level
616 // no local cache operation involved
617 bus_pkt = pkt;
618 }
619
620 DPRINTF(Cache, "Sending an atomic %s for %x\n",
621 bus_pkt->cmdString(), bus_pkt->getAddr());
622
623 #if TRACING_ON
624 CacheBlk::State old_state = blk ? blk->status : 0;
625 #endif
626
627 lat += memSidePort->sendAtomic(bus_pkt);
628
629 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
630 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
631
632 assert(!bus_pkt->wasNacked());
633
634 // If packet was a forward, the response (if any) is already
635 // in place in the bus_pkt == pkt structure, so we don't need
636 // to do anything. Otherwise, use the separate bus_pkt to
637 // generate response to pkt and then delete it.
638 if (!is_forward) {
639 if (pkt->needsResponse()) {
640 assert(bus_pkt->isResponse());
641 if (bus_pkt->isError()) {
642 pkt->makeAtomicResponse();
643 pkt->copyError(bus_pkt);
644 } else if (bus_pkt->isRead() ||
645 bus_pkt->cmd == MemCmd::UpgradeResp) {
646 // we're updating cache state to allow us to
647 // satisfy the upstream request from the cache
648 blk = handleFill(bus_pkt, blk, writebacks);
649 satisfyCpuSideRequest(pkt, blk);
650 } else {
651 // we're satisfying the upstream request without
652 // modifying cache state, e.g., a write-through
653 pkt->makeAtomicResponse();
654 }
655 }
656 delete bus_pkt;
657 }
658 }
659
660 // Note that we don't invoke the prefetcher at all in atomic mode.
661 // It's not clear how to do it properly, particularly for
662 // prefetchers that aggressively generate prefetch candidates and
663 // rely on bandwidth contention to throttle them; these will tend
664 // to pollute the cache in atomic mode since there is no bandwidth
665 // contention. If we ever do want to enable prefetching in atomic
666 // mode, though, this is the place to do it... see timingAccess()
667 // for an example (though we'd want to issue the prefetch(es)
668 // immediately rather than calling requestMemSideBus() as we do
669 // there).
670
671 // Handle writebacks if needed
672 while (!writebacks.empty()){
673 PacketPtr wbPkt = writebacks.front();
674 memSidePort->sendAtomic(wbPkt);
675 writebacks.pop_front();
676 delete wbPkt;
677 }
678
679 // We now have the block one way or another (hit or completed miss)
680
681 if (pkt->needsResponse()) {
682 pkt->makeAtomicResponse();
683 }
684
685 return lat;
686 }
687
688
689 template<class TagStore>
690 void
691 Cache<TagStore>::functionalAccess(PacketPtr pkt,
692 CachePort *incomingPort,
693 CachePort *otherSidePort)
694 {
695 Addr blk_addr = blockAlign(pkt->getAddr());
696 BlkType *blk = tags->findBlock(pkt->getAddr());
697
698 pkt->pushLabel(name());
699
700 CacheBlkPrintWrapper cbpw(blk);
701 bool done =
702 (blk && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data))
703 || incomingPort->checkFunctional(pkt)
704 || mshrQueue.checkFunctional(pkt, blk_addr)
705 || writeBuffer.checkFunctional(pkt, blk_addr)
706 || otherSidePort->checkFunctional(pkt);
707
708 // We're leaving the cache, so pop cache->name() label
709 pkt->popLabel();
710
711 if (!done) {
712 otherSidePort->sendFunctional(pkt);
713 }
714 }
715
716
717 /////////////////////////////////////////////////////
718 //
719 // Response handling: responses from the memory side
720 //
721 /////////////////////////////////////////////////////
722
723
724 template<class TagStore>
725 void
726 Cache<TagStore>::handleResponse(PacketPtr pkt)
727 {
728 Tick time = curTick + hitLatency;
729 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
730 bool is_error = pkt->isError();
731
732 assert(mshr);
733
734 if (pkt->wasNacked()) {
735 //pkt->reinitFromRequest();
736 warn("NACKs from devices not connected to the same bus "
737 "not implemented\n");
738 return;
739 }
740 if (is_error) {
741 DPRINTF(Cache, "Cache received packet with error for address %x, "
742 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
743 }
744
745 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
746
747 MSHRQueue *mq = mshr->queue;
748 bool wasFull = mq->isFull();
749
750 if (mshr == noTargetMSHR) {
751 // we always clear at least one target
752 clearBlocked(Blocked_NoTargets);
753 noTargetMSHR = NULL;
754 }
755
756 // Initial target is used just for stats
757 MSHR::Target *initial_tgt = mshr->getTarget();
758 BlkType *blk = tags->findBlock(pkt->getAddr());
759 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
760 Tick miss_latency = curTick - initial_tgt->recvTime;
761 PacketList writebacks;
762
763 if (pkt->req->isUncacheable()) {
764 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
765 miss_latency;
766 } else {
767 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
768 miss_latency;
769 }
770
771 bool is_fill = !mshr->isForward &&
772 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
773
774 if (is_fill && !is_error) {
775 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
776 pkt->getAddr());
777
778 // give mshr a chance to do some dirty work
779 mshr->handleFill(pkt, blk);
780
781 blk = handleFill(pkt, blk, writebacks);
782 assert(blk != NULL);
783 }
784
785 // First offset for critical word first calculations
786 int initial_offset = 0;
787
788 if (mshr->hasTargets()) {
789 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
790 }
791
792 while (mshr->hasTargets()) {
793 MSHR::Target *target = mshr->getTarget();
794
795 switch (target->source) {
796 case MSHR::Target::FromCPU:
797 Tick completion_time;
798 if (is_fill) {
799 satisfyCpuSideRequest(target->pkt, blk);
800 // How many bytes past the first request is this one
801 int transfer_offset =
802 target->pkt->getOffset(blkSize) - initial_offset;
803 if (transfer_offset < 0) {
804 transfer_offset += blkSize;
805 }
806
807 // If critical word (no offset) return first word time
808 completion_time = tags->getHitLatency() +
809 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
810
811 assert(!target->pkt->req->isUncacheable());
812 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
813 completion_time - target->recvTime;
814 } else {
815 // not a cache fill, just forwarding response
816 completion_time = tags->getHitLatency() + pkt->finishTime;
817 if (pkt->isRead() && !is_error) {
818 target->pkt->setData(pkt->getPtr<uint8_t>());
819 }
820 }
821 target->pkt->makeTimingResponse();
822 // if this packet is an error copy that to the new packet
823 if (is_error)
824 target->pkt->copyError(pkt);
825 if (pkt->isInvalidate()) {
826 // If intermediate cache got ReadRespWithInvalidate,
827 // propagate that. Response should not have
828 // isInvalidate() set otherwise.
829 assert(target->pkt->cmd == MemCmd::ReadResp);
830 assert(pkt->cmd == MemCmd::ReadRespWithInvalidate);
831 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
832 }
833 cpuSidePort->respond(target->pkt, completion_time);
834 break;
835
836 case MSHR::Target::FromPrefetcher:
837 assert(target->pkt->cmd == MemCmd::HardPFReq);
838 if (blk)
839 blk->status |= BlkHWPrefetched;
840 delete target->pkt->req;
841 delete target->pkt;
842 break;
843
844 case MSHR::Target::FromSnoop:
845 // I don't believe that a snoop can be in an error state
846 assert(!is_error);
847 // response to snoop request
848 DPRINTF(Cache, "processing deferred snoop...\n");
849 handleSnoop(target->pkt, blk, true, true,
850 mshr->pendingInvalidate || pkt->isInvalidate());
851 break;
852
853 default:
854 panic("Illegal target->source enum %d\n", target->source);
855 }
856
857 mshr->popTarget();
858 }
859
860 if (pkt->isInvalidate()) {
861 tags->invalidateBlk(blk);
862 }
863
864 if (mshr->promoteDeferredTargets()) {
865 // avoid later read getting stale data while write miss is
866 // outstanding.. see comment in timingAccess()
867 blk->status &= ~BlkReadable;
868 MSHRQueue *mq = mshr->queue;
869 mq->markPending(mshr);
870 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
871 } else {
872 mq->deallocate(mshr);
873 if (wasFull && !mq->isFull()) {
874 clearBlocked((BlockedCause)mq->index);
875 }
876 }
877
878 // copy writebacks to write buffer
879 while (!writebacks.empty()) {
880 PacketPtr wbPkt = writebacks.front();
881 allocateWriteBuffer(wbPkt, time, true);
882 writebacks.pop_front();
883 }
884 // if we used temp block, clear it out
885 if (blk == tempBlock) {
886 if (blk->isDirty()) {
887 allocateWriteBuffer(writebackBlk(blk), time, true);
888 }
889 tags->invalidateBlk(blk);
890 }
891
892 delete pkt;
893 }
894
895
896
897
898 template<class TagStore>
899 PacketPtr
900 Cache<TagStore>::writebackBlk(BlkType *blk)
901 {
902 assert(blk && blk->isValid() && blk->isDirty());
903
904 writebacks[0/*pkt->req->threadId()*/]++;
905
906 Request *writebackReq =
907 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
908 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
909 writeback->allocate();
910 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
911
912 blk->status &= ~BlkDirty;
913 return writeback;
914 }
915
916
917 template<class TagStore>
918 typename Cache<TagStore>::BlkType*
919 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
920 {
921 BlkType *blk = tags->findVictim(addr, writebacks);
922
923 if (blk->isValid()) {
924 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
925 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
926 if (repl_mshr) {
927 // must be an outstanding upgrade request on block
928 // we're about to replace...
929 assert(!blk->isWritable());
930 assert(repl_mshr->needsExclusive());
931 // too hard to replace block with transient state
932 // allocation failed, block not inserted
933 return NULL;
934 } else {
935 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
936 repl_addr, addr,
937 blk->isDirty() ? "writeback" : "clean");
938
939 if (blk->isDirty()) {
940 // Save writeback packet for handling by caller
941 writebacks.push_back(writebackBlk(blk));
942 }
943 }
944 }
945
946 return blk;
947 }
948
949
950 // Note that the reason we return a list of writebacks rather than
951 // inserting them directly in the write buffer is that this function
952 // is called by both atomic and timing-mode accesses, and in atomic
953 // mode we don't mess with the write buffer (we just perform the
954 // writebacks atomically once the original request is complete).
955 template<class TagStore>
956 typename Cache<TagStore>::BlkType*
957 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
958 PacketList &writebacks)
959 {
960 Addr addr = pkt->getAddr();
961 #if TRACING_ON
962 CacheBlk::State old_state = blk ? blk->status : 0;
963 #endif
964
965 if (blk == NULL) {
966 // better have read new data...
967 assert(pkt->hasData());
968 // need to do a replacement
969 blk = allocateBlock(addr, writebacks);
970 if (blk == NULL) {
971 // No replaceable block... just use temporary storage to
972 // complete the current request and then get rid of it
973 assert(!tempBlock->isValid());
974 blk = tempBlock;
975 tempBlock->set = tags->extractSet(addr);
976 tempBlock->tag = tags->extractTag(addr);
977 DPRINTF(Cache, "using temp block for %x\n", addr);
978 } else {
979 tags->insertBlock(addr, blk);
980 }
981 } else {
982 // existing block... probably an upgrade
983 assert(blk->tag == tags->extractTag(addr));
984 // either we're getting new data or the block should already be valid
985 assert(pkt->hasData() || blk->isValid());
986 }
987
988 if (!pkt->sharedAsserted()) {
989 blk->status = BlkValid | BlkReadable | BlkWritable;
990 } else {
991 assert(!pkt->needsExclusive());
992 blk->status = BlkValid | BlkReadable;
993 }
994
995 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
996 addr, old_state, blk->status);
997
998 // if we got new data, copy it in
999 if (pkt->isRead()) {
1000 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1001 }
1002
1003 blk->whenReady = pkt->finishTime;
1004
1005 return blk;
1006 }
1007
1008
1009 /////////////////////////////////////////////////////
1010 //
1011 // Snoop path: requests coming in from the memory side
1012 //
1013 /////////////////////////////////////////////////////
1014
1015 template<class TagStore>
1016 void
1017 Cache<TagStore>::
1018 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1019 bool already_copied, bool pending_inval)
1020 {
1021 // timing-mode snoop responses require a new packet, unless we
1022 // already made a copy...
1023 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt, true);
1024 if (!req_pkt->isInvalidate()) {
1025 // note that we're ignoring the shared flag on req_pkt... it's
1026 // basically irrelevant, as we'll always assert shared unless
1027 // it's an exclusive request, in which case the shared line
1028 // should never be asserted1
1029 pkt->assertShared();
1030 }
1031 pkt->allocate();
1032 pkt->makeTimingResponse();
1033 if (pkt->isRead()) {
1034 pkt->setDataFromBlock(blk_data, blkSize);
1035 }
1036 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1037 // Assume we defer a response to a read from a far-away cache
1038 // A, then later defer a ReadExcl from a cache B on the same
1039 // bus as us. We'll assert MemInhibit in both cases, but in
1040 // the latter case MemInhibit will keep the invalidation from
1041 // reaching cache A. This special response tells cache A that
1042 // it gets the block to satisfy its read, but must immediately
1043 // invalidate it.
1044 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1045 }
1046 memSidePort->respond(pkt, curTick + hitLatency);
1047 }
1048
1049 template<class TagStore>
1050 void
1051 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1052 bool is_timing, bool is_deferred,
1053 bool pending_inval)
1054 {
1055 // deferred snoops can only happen in timing mode
1056 assert(!(is_deferred && !is_timing));
1057 // pending_inval only makes sense on deferred snoops
1058 assert(!(pending_inval && !is_deferred));
1059 assert(pkt->isRequest());
1060
1061 if (forwardSnoops) {
1062 // first propagate snoop upward to see if anyone above us wants to
1063 // handle it. save & restore packet src since it will get
1064 // rewritten to be relative to cpu-side bus (if any)
1065 bool alreadyResponded = pkt->memInhibitAsserted();
1066 if (is_timing) {
1067 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1068 snoopPkt->setExpressSnoop();
1069 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1070 cpuSidePort->sendTiming(snoopPkt);
1071 if (snoopPkt->memInhibitAsserted()) {
1072 // cache-to-cache response from some upper cache
1073 assert(!alreadyResponded);
1074 pkt->assertMemInhibit();
1075 } else {
1076 delete snoopPkt->senderState;
1077 }
1078 if (snoopPkt->sharedAsserted()) {
1079 pkt->assertShared();
1080 }
1081 delete snoopPkt;
1082 } else {
1083 int origSrc = pkt->getSrc();
1084 cpuSidePort->sendAtomic(pkt);
1085 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1086 // cache-to-cache response from some upper cache:
1087 // forward response to original requester
1088 assert(pkt->isResponse());
1089 }
1090 pkt->setSrc(origSrc);
1091 }
1092 }
1093
1094 if (!blk || !blk->isValid()) {
1095 return;
1096 }
1097
1098 // we may end up modifying both the block state and the packet (if
1099 // we respond in atomic mode), so just figure out what to do now
1100 // and then do it later
1101 bool respond = blk->isDirty() && pkt->needsResponse();
1102 bool have_exclusive = blk->isWritable();
1103 bool invalidate = pkt->isInvalidate();
1104
1105 if (pkt->isRead() && !pkt->isInvalidate()) {
1106 assert(!pkt->needsExclusive());
1107 pkt->assertShared();
1108 int bits_to_clear = BlkWritable;
1109 const bool haveOwnershipState = true; // for now
1110 if (!haveOwnershipState) {
1111 // if we don't support pure ownership (dirty && !writable),
1112 // have to clear dirty bit here, assume memory snarfs data
1113 // on cache-to-cache xfer
1114 bits_to_clear |= BlkDirty;
1115 }
1116 blk->status &= ~bits_to_clear;
1117 }
1118
1119 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1120 pkt->cmdString(), blockAlign(pkt->getAddr()),
1121 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1122
1123 if (respond) {
1124 assert(!pkt->memInhibitAsserted());
1125 pkt->assertMemInhibit();
1126 if (have_exclusive) {
1127 pkt->setSupplyExclusive();
1128 }
1129 if (is_timing) {
1130 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1131 } else {
1132 pkt->makeAtomicResponse();
1133 pkt->setDataFromBlock(blk->data, blkSize);
1134 }
1135 } else if (is_timing && is_deferred) {
1136 // if it's a deferred timing snoop then we've made a copy of
1137 // the packet, and so if we're not using that copy to respond
1138 // then we need to delete it here.
1139 delete pkt;
1140 }
1141
1142 // Do this last in case it deallocates block data or something
1143 // like that
1144 if (invalidate) {
1145 tags->invalidateBlk(blk);
1146 }
1147 }
1148
1149
1150 template<class TagStore>
1151 void
1152 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1153 {
1154 // Note that some deferred snoops don't have requests, since the
1155 // original access may have already completed
1156 if ((pkt->req && pkt->req->isUncacheable()) ||
1157 pkt->cmd == MemCmd::Writeback) {
1158 //Can't get a hit on an uncacheable address
1159 //Revisit this for multi level coherence
1160 return;
1161 }
1162
1163 BlkType *blk = tags->findBlock(pkt->getAddr());
1164
1165 Addr blk_addr = blockAlign(pkt->getAddr());
1166 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1167
1168 // Let the MSHR itself track the snoop and decide whether we want
1169 // to go ahead and do the regular cache snoop
1170 if (mshr && mshr->handleSnoop(pkt, order++)) {
1171 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1172 blk_addr);
1173 if (mshr->getNumTargets() > numTarget)
1174 warn("allocating bonus target for snoop"); //handle later
1175 return;
1176 }
1177
1178 //We also need to check the writeback buffers and handle those
1179 std::vector<MSHR *> writebacks;
1180 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1181 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1182 pkt->getAddr());
1183
1184 //Look through writebacks for any non-uncachable writes, use that
1185 for (int i = 0; i < writebacks.size(); i++) {
1186 mshr = writebacks[i];
1187 assert(!mshr->isUncacheable());
1188 assert(mshr->getNumTargets() == 1);
1189 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1190 assert(wb_pkt->cmd == MemCmd::Writeback);
1191
1192 assert(!pkt->memInhibitAsserted());
1193 pkt->assertMemInhibit();
1194 if (!pkt->needsExclusive()) {
1195 pkt->assertShared();
1196 } else {
1197 // if we're not asserting the shared line, we need to
1198 // invalidate our copy. we'll do that below as long as
1199 // the packet's invalidate flag is set...
1200 assert(pkt->isInvalidate());
1201 }
1202 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1203 false, false);
1204
1205 if (pkt->isInvalidate()) {
1206 // Invalidation trumps our writeback... discard here
1207 markInService(mshr);
1208 delete wb_pkt;
1209 }
1210
1211 // If this was a shared writeback, there may still be
1212 // other shared copies above that require invalidation.
1213 // We could be more selective and return here if the
1214 // request is non-exclusive or if the writeback is
1215 // exclusive.
1216 break;
1217 }
1218 }
1219
1220 handleSnoop(pkt, blk, true, false, false);
1221 }
1222
1223
1224 template<class TagStore>
1225 Tick
1226 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1227 {
1228 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1229 // Can't get a hit on an uncacheable address
1230 // Revisit this for multi level coherence
1231 return hitLatency;
1232 }
1233
1234 BlkType *blk = tags->findBlock(pkt->getAddr());
1235 handleSnoop(pkt, blk, false, false, false);
1236 return hitLatency;
1237 }
1238
1239
1240 template<class TagStore>
1241 MSHR *
1242 Cache<TagStore>::getNextMSHR()
1243 {
1244 // Check both MSHR queue and write buffer for potential requests
1245 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1246 MSHR *write_mshr = writeBuffer.getNextMSHR();
1247
1248 // Now figure out which one to send... some cases are easy
1249 if (miss_mshr && !write_mshr) {
1250 return miss_mshr;
1251 }
1252 if (write_mshr && !miss_mshr) {
1253 return write_mshr;
1254 }
1255
1256 if (miss_mshr && write_mshr) {
1257 // We have one of each... normally we favor the miss request
1258 // unless the write buffer is full
1259 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1260 // Write buffer is full, so we'd like to issue a write;
1261 // need to search MSHR queue for conflicting earlier miss.
1262 MSHR *conflict_mshr =
1263 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1264
1265 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1266 // Service misses in order until conflict is cleared.
1267 return conflict_mshr;
1268 }
1269
1270 // No conflicts; issue write
1271 return write_mshr;
1272 }
1273
1274 // Write buffer isn't full, but need to check it for
1275 // conflicting earlier writeback
1276 MSHR *conflict_mshr =
1277 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1278 if (conflict_mshr) {
1279 // not sure why we don't check order here... it was in the
1280 // original code but commented out.
1281
1282 // The only way this happens is if we are
1283 // doing a write and we didn't have permissions
1284 // then subsequently saw a writeback (owned got evicted)
1285 // We need to make sure to perform the writeback first
1286 // To preserve the dirty data, then we can issue the write
1287
1288 // should we return write_mshr here instead? I.e. do we
1289 // have to flush writes in order? I don't think so... not
1290 // for Alpha anyway. Maybe for x86?
1291 return conflict_mshr;
1292 }
1293
1294 // No conflicts; issue read
1295 return miss_mshr;
1296 }
1297
1298 // fall through... no pending requests. Try a prefetch.
1299 assert(!miss_mshr && !write_mshr);
1300 if (!mshrQueue.isFull()) {
1301 // If we have a miss queue slot, we can try a prefetch
1302 PacketPtr pkt = prefetcher->getPacket();
1303 if (pkt) {
1304 Addr pf_addr = blockAlign(pkt->getAddr());
1305 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) {
1306 // Update statistic on number of prefetches issued
1307 // (hwpf_mshr_misses)
1308 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1309 // Don't request bus, since we already have it
1310 return allocateMissBuffer(pkt, curTick, false);
1311 }
1312 }
1313 }
1314
1315 return NULL;
1316 }
1317
1318
1319 template<class TagStore>
1320 PacketPtr
1321 Cache<TagStore>::getTimingPacket()
1322 {
1323 MSHR *mshr = getNextMSHR();
1324
1325 if (mshr == NULL) {
1326 return NULL;
1327 }
1328
1329 // use request from 1st target
1330 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1331 PacketPtr pkt = NULL;
1332
1333 if (mshr->isForwardNoResponse()) {
1334 // no response expected, just forward packet as it is
1335 assert(tags->findBlock(mshr->addr) == NULL);
1336 pkt = tgt_pkt;
1337 } else {
1338 BlkType *blk = tags->findBlock(mshr->addr);
1339 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1340
1341 mshr->isForward = (pkt == NULL);
1342
1343 if (mshr->isForward) {
1344 // not a cache block request, but a response is expected
1345 // make copy of current packet to forward, keep current
1346 // copy for response handling
1347 pkt = new Packet(tgt_pkt);
1348 pkt->allocate();
1349 if (pkt->isWrite()) {
1350 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1351 }
1352 }
1353 }
1354
1355 assert(pkt != NULL);
1356 pkt->senderState = mshr;
1357 return pkt;
1358 }
1359
1360
1361 template<class TagStore>
1362 Tick
1363 Cache<TagStore>::nextMSHRReadyTime()
1364 {
1365 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1366 writeBuffer.nextMSHRReadyTime());
1367
1368 if (prefetcher) {
1369 nextReady = std::min(nextReady,
1370 prefetcher->nextPrefetchReadyTime());
1371 }
1372
1373 return nextReady;
1374 }
1375
1376
1377 ///////////////
1378 //
1379 // CpuSidePort
1380 //
1381 ///////////////
1382
1383 template<class TagStore>
1384 void
1385 Cache<TagStore>::CpuSidePort::
1386 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1387 {
1388 // CPU side port doesn't snoop; it's a target only. It can
1389 // potentially respond to any address.
1390 snoop = false;
1391 resp.push_back(myCache()->getAddrRange());
1392 }
1393
1394
1395 template<class TagStore>
1396 bool
1397 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1398 {
1399 // illegal to block responses... can lead to deadlock
1400 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1401 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1402 mustSendRetry = true;
1403 return false;
1404 }
1405
1406 myCache()->timingAccess(pkt);
1407 return true;
1408 }
1409
1410
1411 template<class TagStore>
1412 Tick
1413 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1414 {
1415 return myCache()->atomicAccess(pkt);
1416 }
1417
1418
1419 template<class TagStore>
1420 void
1421 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1422 {
1423 myCache()->functionalAccess(pkt, this, otherPort);
1424 }
1425
1426
1427 template<class TagStore>
1428 Cache<TagStore>::
1429 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1430 const std::string &_label)
1431 : BaseCache::CachePort(_name, _cache, _label)
1432 {
1433 }
1434
1435 ///////////////
1436 //
1437 // MemSidePort
1438 //
1439 ///////////////
1440
1441 template<class TagStore>
1442 void
1443 Cache<TagStore>::MemSidePort::
1444 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1445 {
1446 // Memory-side port always snoops, but never passes requests
1447 // through to targets on the cpu side (so we don't add anything to
1448 // the address range list).
1449 snoop = true;
1450 }
1451
1452
1453 template<class TagStore>
1454 bool
1455 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1456 {
1457 // this needs to be fixed so that the cache updates the mshr and sends the
1458 // packet back out on the link, but it probably won't happen so until this
1459 // gets fixed, just panic when it does
1460 if (pkt->wasNacked())
1461 panic("Need to implement cache resending nacked packets!\n");
1462
1463 if (pkt->isRequest() && blocked) {
1464 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1465 mustSendRetry = true;
1466 return false;
1467 }
1468
1469 if (pkt->isResponse()) {
1470 myCache()->handleResponse(pkt);
1471 } else {
1472 myCache()->snoopTiming(pkt);
1473 }
1474 return true;
1475 }
1476
1477
1478 template<class TagStore>
1479 Tick
1480 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1481 {
1482 // in atomic mode, responses go back to the sender via the
1483 // function return from sendAtomic(), not via a separate
1484 // sendAtomic() from the responder. Thus we should never see a
1485 // response packet in recvAtomic() (anywhere, not just here).
1486 assert(!pkt->isResponse());
1487 return myCache()->snoopAtomic(pkt);
1488 }
1489
1490
1491 template<class TagStore>
1492 void
1493 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1494 {
1495 myCache()->functionalAccess(pkt, this, otherPort);
1496 }
1497
1498
1499
1500 template<class TagStore>
1501 void
1502 Cache<TagStore>::MemSidePort::sendPacket()
1503 {
1504 // if we have responses that are ready, they take precedence
1505 if (deferredPacketReady()) {
1506 bool success = sendTiming(transmitList.front().pkt);
1507
1508 if (success) {
1509 //send successful, remove packet
1510 transmitList.pop_front();
1511 }
1512
1513 waitingOnRetry = !success;
1514 } else {
1515 // check for non-response packets (requests & writebacks)
1516 PacketPtr pkt = myCache()->getTimingPacket();
1517 if (pkt == NULL) {
1518 // can happen if e.g. we attempt a writeback and fail, but
1519 // before the retry, the writeback is eliminated because
1520 // we snoop another cache's ReadEx.
1521 waitingOnRetry = false;
1522 } else {
1523 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1524
1525 bool success = sendTiming(pkt);
1526
1527 waitingOnRetry = !success;
1528 if (waitingOnRetry) {
1529 DPRINTF(CachePort, "now waiting on a retry\n");
1530 if (!mshr->isForwardNoResponse()) {
1531 delete pkt;
1532 }
1533 } else {
1534 myCache()->markInService(mshr);
1535 }
1536 }
1537 }
1538
1539
1540 // tried to send packet... if it was successful (no retry), see if
1541 // we need to rerequest bus or not
1542 if (!waitingOnRetry) {
1543 Tick nextReady = std::min(deferredPacketReadyTime(),
1544 myCache()->nextMSHRReadyTime());
1545 // @TODO: need to facotr in prefetch requests here somehow
1546 if (nextReady != MaxTick) {
1547 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1548 schedule(sendEvent, std::max(nextReady, curTick + 1));
1549 } else {
1550 // no more to send right now: if we're draining, we may be done
1551 if (drainEvent) {
1552 drainEvent->process();
1553 drainEvent = NULL;
1554 }
1555 }
1556 }
1557 }
1558
1559 template<class TagStore>
1560 void
1561 Cache<TagStore>::MemSidePort::recvRetry()
1562 {
1563 assert(waitingOnRetry);
1564 sendPacket();
1565 }
1566
1567
1568 template<class TagStore>
1569 void
1570 Cache<TagStore>::MemSidePort::processSendEvent()
1571 {
1572 assert(!waitingOnRetry);
1573 sendPacket();
1574 }
1575
1576
1577 template<class TagStore>
1578 Cache<TagStore>::
1579 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1580 const std::string &_label)
1581 : BaseCache::CachePort(_name, _cache, _label)
1582 {
1583 // override default send event from SimpleTimingPort
1584 delete sendEvent;
1585 sendEvent = new SendEvent(this);
1586 }