02e951df4124e9424bf938a36241e107a45b7ac4
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 * Steve Reinhardt
32 * Ron Dreslinski
33 */
34
35 /**
36 * @file
37 * Cache definitions.
38 */
39
40 #include "sim/host.hh"
41 #include "base/misc.hh"
42 #include "base/range_ops.hh"
43
44 #include "mem/cache/cache.hh"
45 #include "mem/cache/cache_blk.hh"
46 #include "mem/cache/miss/mshr.hh"
47 #include "mem/cache/prefetch/base_prefetcher.hh"
48
49 #include "sim/sim_exit.hh" // for SimExitEvent
50
51
52 template<class TagStore>
53 Cache<TagStore>::Cache(const std::string &_name,
54 Cache<TagStore>::Params &params)
55 : BaseCache(_name, params.baseParams),
56 prefetchAccess(params.prefetchAccess),
57 tags(params.tags),
58 prefetcher(params.prefetcher),
59 doFastWrites(params.doFastWrites),
60 prefetchMiss(params.prefetchMiss)
61 {
62 tempBlock = new BlkType();
63 tempBlock->data = new uint8_t[blkSize];
64
65 cpuSidePort = new CpuSidePort(_name + "-cpu_side_port", this,
66 params.baseParams.cpuSideFilterRanges);
67 memSidePort = new MemSidePort(_name + "-mem_side_port", this,
68 params.baseParams.memSideFilterRanges);
69 cpuSidePort->setOtherPort(memSidePort);
70 memSidePort->setOtherPort(cpuSidePort);
71
72 tags->setCache(this);
73 prefetcher->setCache(this);
74 }
75
76 template<class TagStore>
77 void
78 Cache<TagStore>::regStats()
79 {
80 BaseCache::regStats();
81 tags->regStats(name());
82 prefetcher->regStats(name());
83 }
84
85 template<class TagStore>
86 Port *
87 Cache<TagStore>::getPort(const std::string &if_name, int idx)
88 {
89 if (if_name == "" || if_name == "cpu_side") {
90 return cpuSidePort;
91 } else if (if_name == "mem_side") {
92 return memSidePort;
93 } else if (if_name == "functional") {
94 return new CpuSidePort(name() + "-cpu_side_funcport", this,
95 std::vector<Range<Addr> >());
96 } else {
97 panic("Port name %s unrecognized\n", if_name);
98 }
99 }
100
101 template<class TagStore>
102 void
103 Cache<TagStore>::deletePortRefs(Port *p)
104 {
105 if (cpuSidePort == p || memSidePort == p)
106 panic("Can only delete functional ports\n");
107
108 delete p;
109 }
110
111
112 template<class TagStore>
113 void
114 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
115 {
116 uint64_t overwrite_val;
117 bool overwrite_mem;
118 uint64_t condition_val64;
119 uint32_t condition_val32;
120
121 int offset = tags->extractBlkOffset(pkt->getAddr());
122 uint8_t *blk_data = blk->data + offset;
123
124 assert(sizeof(uint64_t) >= pkt->getSize());
125
126 overwrite_mem = true;
127 // keep a copy of our possible write value, and copy what is at the
128 // memory address into the packet
129 pkt->writeData((uint8_t *)&overwrite_val);
130 pkt->setData(blk_data);
131
132 if (pkt->req->isCondSwap()) {
133 if (pkt->getSize() == sizeof(uint64_t)) {
134 condition_val64 = pkt->req->getExtraData();
135 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
136 sizeof(uint64_t));
137 } else if (pkt->getSize() == sizeof(uint32_t)) {
138 condition_val32 = (uint32_t)pkt->req->getExtraData();
139 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
140 sizeof(uint32_t));
141 } else
142 panic("Invalid size for conditional read/write\n");
143 }
144
145 if (overwrite_mem)
146 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
147 }
148
149
150 template<class TagStore>
151 void
152 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
153 {
154 assert(blk);
155 // Occasionally this is not true... if we are a lower-level cache
156 // satisfying a string of Read and ReadEx requests from
157 // upper-level caches, a Read will mark the block as shared but we
158 // can satisfy a following ReadEx anyway since we can rely on the
159 // Read requester(s) to have buffered the ReadEx snoop and to
160 // invalidate their blocks after receiving them.
161 // assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
162 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
163
164 // Check RMW operations first since both isRead() and
165 // isWrite() will be true for them
166 if (pkt->cmd == MemCmd::SwapReq) {
167 cmpAndSwap(blk, pkt);
168 } else if (pkt->isWrite()) {
169 if (blk->checkWrite(pkt)) {
170 blk->status |= BlkDirty;
171 pkt->writeDataToBlock(blk->data, blkSize);
172 }
173 } else if (pkt->isRead()) {
174 if (pkt->isLocked()) {
175 blk->trackLoadLocked(pkt);
176 }
177 pkt->setDataFromBlock(blk->data, blkSize);
178 if (pkt->getSize() == blkSize) {
179 // special handling for coherent block requests from
180 // upper-level caches
181 if (pkt->needsExclusive()) {
182 // on ReadExReq we give up our copy
183 tags->invalidateBlk(blk);
184 } else {
185 // on ReadReq we create shareable copies here and in
186 // the requester
187 pkt->assertShared();
188 blk->status &= ~BlkWritable;
189 }
190 }
191 } else {
192 // Not a read or write... must be an upgrade. it's OK
193 // to just ack those as long as we have an exclusive
194 // copy at this level.
195 assert(pkt->cmd == MemCmd::UpgradeReq);
196 tags->invalidateBlk(blk);
197 }
198 }
199
200
201 /////////////////////////////////////////////////////
202 //
203 // MSHR helper functions
204 //
205 /////////////////////////////////////////////////////
206
207
208 template<class TagStore>
209 void
210 Cache<TagStore>::markInService(MSHR *mshr)
211 {
212 markInServiceInternal(mshr);
213 #if 0
214 if (mshr->originalCmd == MemCmd::HardPFReq) {
215 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
216 name());
217 //Also clear pending if need be
218 if (!prefetcher->havePending())
219 {
220 deassertMemSideBusRequest(Request_PF);
221 }
222 }
223 #endif
224 }
225
226
227 template<class TagStore>
228 void
229 Cache<TagStore>::squash(int threadNum)
230 {
231 bool unblock = false;
232 BlockedCause cause = NUM_BLOCKED_CAUSES;
233
234 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
235 noTargetMSHR = NULL;
236 unblock = true;
237 cause = Blocked_NoTargets;
238 }
239 if (mshrQueue.isFull()) {
240 unblock = true;
241 cause = Blocked_NoMSHRs;
242 }
243 mshrQueue.squash(threadNum);
244 if (unblock && !mshrQueue.isFull()) {
245 clearBlocked(cause);
246 }
247 }
248
249 /////////////////////////////////////////////////////
250 //
251 // Access path: requests coming in from the CPU side
252 //
253 /////////////////////////////////////////////////////
254
255 template<class TagStore>
256 bool
257 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, int &lat)
258 {
259 if (pkt->req->isUncacheable()) {
260 blk = NULL;
261 lat = hitLatency;
262 return false;
263 }
264
265 bool satisfied = false; // assume the worst
266 blk = tags->findBlock(pkt->getAddr(), lat);
267
268 if (prefetchAccess) {
269 //We are determining prefetches on access stream, call prefetcher
270 prefetcher->handleMiss(pkt, curTick);
271 }
272
273 DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(), pkt->getAddr(),
274 (blk) ? "hit" : "miss");
275
276 if (blk != NULL) {
277 // HIT
278 if (blk->isPrefetch()) {
279 //Signal that this was a hit under prefetch (no need for
280 //use prefetch (only can get here if true)
281 DPRINTF(HWPrefetch, "Hit a block that was prefetched\n");
282 blk->status &= ~BlkHWPrefetched;
283 if (prefetchMiss) {
284 //If we are using the miss stream, signal the
285 //prefetcher otherwise the access stream would have
286 //already signaled this hit
287 prefetcher->handleMiss(pkt, curTick);
288 }
289 }
290
291 if (pkt->needsExclusive() ? blk->isWritable() : blk->isValid()) {
292 // OK to satisfy access
293 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
294 satisfied = true;
295 satisfyCpuSideRequest(pkt, blk);
296 } else if (pkt->cmd == MemCmd::Writeback) {
297 // special case: writeback to read-only block (e.g., from
298 // L1 into L2). since we're really just passing ownership
299 // from one cache to another, we can update this cache to
300 // be the owner without making the block writeable
301 assert(!blk->isWritable() /* && !blk->isDirty() */);
302 assert(blkSize == pkt->getSize());
303 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
304 blk->status |= BlkDirty;
305 satisfied = true;
306 // nothing else to do; writeback doesn't expect response
307 assert(!pkt->needsResponse());
308 } else {
309 // permission violation... nothing to do here, leave unsatisfied
310 // for statistics purposes this counts like a complete miss
311 incMissCount(pkt);
312 }
313 } else {
314 // complete miss (no matching block)
315 incMissCount(pkt);
316
317 if (pkt->isLocked() && pkt->isWrite()) {
318 // miss on store conditional... just give up now
319 pkt->req->setExtraData(0);
320 satisfied = true;
321 }
322 }
323
324 return satisfied;
325 }
326
327
328 class ForwardResponseRecord : public Packet::SenderState
329 {
330 Packet::SenderState *prevSenderState;
331 int prevSrc;
332 #ifndef NDEBUG
333 BaseCache *cache;
334 #endif
335 public:
336 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
337 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
338 #ifndef NDEBUG
339 , cache(_cache)
340 #endif
341 {}
342 void restore(Packet *pkt, BaseCache *_cache)
343 {
344 assert(_cache == cache);
345 pkt->senderState = prevSenderState;
346 pkt->setDest(prevSrc);
347 }
348 };
349
350
351 template<class TagStore>
352 bool
353 Cache<TagStore>::timingAccess(PacketPtr pkt)
354 {
355 //@todo Add back in MemDebug Calls
356 // MemDebug::cacheAccess(pkt);
357
358 // we charge hitLatency for doing just about anything here
359 Tick time = curTick + hitLatency;
360
361 if (pkt->isResponse()) {
362 // must be cache-to-cache response from upper to lower level
363 ForwardResponseRecord *rec =
364 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
365 assert(rec != NULL);
366 rec->restore(pkt, this);
367 delete rec;
368 memSidePort->respond(pkt, time);
369 return true;
370 }
371
372 assert(pkt->isRequest());
373
374 if (pkt->memInhibitAsserted()) {
375 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
376 pkt->getAddr());
377 assert(!pkt->req->isUncacheable());
378 // Special tweak for multilevel coherence: snoop downward here
379 // on invalidates since there may be other caches below here
380 // that have shared copies. Not necessary if we know that
381 // supplier had exclusive copy to begin with.
382 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
383 Packet *snoopPkt = new Packet(pkt, true); // clear flags
384 snoopPkt->setExpressSnoop();
385 snoopPkt->assertMemInhibit();
386 memSidePort->sendTiming(snoopPkt);
387 // main memory will delete snoopPkt
388 }
389 return true;
390 }
391
392 if (pkt->req->isUncacheable()) {
393 // writes go in write buffer, reads use MSHR
394 if (pkt->isWrite() && !pkt->isRead()) {
395 allocateWriteBuffer(pkt, time, true);
396 } else {
397 allocateUncachedReadBuffer(pkt, time, true);
398 }
399 assert(pkt->needsResponse()); // else we should delete it here??
400 return true;
401 }
402
403 int lat = hitLatency;
404 bool satisfied = false;
405
406 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
407 MSHR *mshr = mshrQueue.findMatch(blk_addr);
408
409 if (!mshr) {
410 // no outstanding access to this block, look up in cache
411 // (otherwise if we allow reads while there's an outstanding
412 // write miss, the read could return stale data out of the
413 // cache block... a more aggressive system could detect the
414 // overlap (if any) and forward data out of the MSHRs, but we
415 // don't do that yet)
416 BlkType *blk = NULL;
417 satisfied = access(pkt, blk, lat);
418 }
419
420 #if 0
421 PacketList writebacks;
422
423 // If this is a block size write/hint (WH64) allocate the block here
424 // if the coherence protocol allows it.
425 /** @todo make the fast write alloc (wh64) work with coherence. */
426 /** @todo Do we want to do fast writes for writebacks as well? */
427 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
428 (pkt->cmd == MemCmd::WriteReq
429 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
430 // not outstanding misses, can do this
431 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
432 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
433 if (outstanding_miss) {
434 warn("WriteInv doing a fastallocate"
435 "with an outstanding miss to the same address\n");
436 }
437 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
438 writebacks);
439 ++fastWrites;
440 }
441 }
442
443 // copy writebacks to write buffer
444 while (!writebacks.empty()) {
445 PacketPtr wbPkt = writebacks.front();
446 allocateWriteBuffer(wbPkt, time, true);
447 writebacks.pop_front();
448 }
449 #endif
450
451 bool needsResponse = pkt->needsResponse();
452
453 if (satisfied) {
454 if (needsResponse) {
455 pkt->makeTimingResponse();
456 cpuSidePort->respond(pkt, curTick+lat);
457 } else {
458 delete pkt;
459 }
460 } else {
461 // miss
462 if (prefetchMiss)
463 prefetcher->handleMiss(pkt, time);
464
465 if (mshr) {
466 // MSHR hit
467 //@todo remove hw_pf here
468 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
469 if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) {
470 mshr->threadNum = -1;
471 }
472 mshr->allocateTarget(pkt, time, order++);
473 if (mshr->getNumTargets() == numTarget) {
474 noTargetMSHR = mshr;
475 setBlocked(Blocked_NoTargets);
476 // need to be careful with this... if this mshr isn't
477 // ready yet (i.e. time > curTick_, we don't want to
478 // move it ahead of mshrs that are ready
479 // mshrQueue.moveToFront(mshr);
480 }
481 } else {
482 // no MSHR
483 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
484 // always mark as cache fill for now... if we implement
485 // no-write-allocate or bypass accesses this will have to
486 // be changed.
487 if (pkt->cmd == MemCmd::Writeback) {
488 allocateWriteBuffer(pkt, time, true);
489 } else {
490 allocateMissBuffer(pkt, time, true);
491 }
492 }
493 }
494
495 return true;
496 }
497
498
499 template<class TagStore>
500 PacketPtr
501 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
502 bool needsExclusive)
503 {
504 bool blkValid = blk && blk->isValid();
505
506 if (cpu_pkt->req->isUncacheable()) {
507 assert(blk == NULL);
508 return NULL;
509 }
510
511 if (!blkValid &&
512 (cpu_pkt->cmd == MemCmd::Writeback ||
513 cpu_pkt->cmd == MemCmd::UpgradeReq)) {
514 // For now, writebacks from upper-level caches that
515 // completely miss in the cache just go through. If we had
516 // "fast write" support (where we could write the whole
517 // block w/o fetching new data) we might want to allocate
518 // on writeback misses instead.
519 return NULL;
520 }
521
522 assert(cpu_pkt->needsResponse());
523
524 MemCmd cmd;
525 // @TODO make useUpgrades a parameter.
526 // Note that ownership protocols require upgrade, otherwise a
527 // write miss on a shared owned block will generate a ReadExcl,
528 // which will clobber the owned copy.
529 const bool useUpgrades = true;
530 if (blkValid && useUpgrades) {
531 // only reason to be here is that blk is shared
532 // (read-only) and we need exclusive
533 assert(needsExclusive && !blk->isWritable());
534 cmd = MemCmd::UpgradeReq;
535 } else {
536 // block is invalid
537 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
538 }
539 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
540
541 pkt->allocate();
542 return pkt;
543 }
544
545
546 template<class TagStore>
547 Tick
548 Cache<TagStore>::atomicAccess(PacketPtr pkt)
549 {
550 int lat = hitLatency;
551
552 // @TODO: make this a parameter
553 bool last_level_cache = false;
554
555 if (pkt->memInhibitAsserted()) {
556 assert(!pkt->req->isUncacheable());
557 // have to invalidate ourselves and any lower caches even if
558 // upper cache will be responding
559 if (pkt->isInvalidate()) {
560 BlkType *blk = tags->findBlock(pkt->getAddr());
561 if (blk && blk->isValid()) {
562 tags->invalidateBlk(blk);
563 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
564 pkt->cmdString(), pkt->getAddr());
565 }
566 if (!last_level_cache) {
567 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
568 pkt->cmdString(), pkt->getAddr());
569 lat += memSidePort->sendAtomic(pkt);
570 }
571 } else {
572 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
573 pkt->cmdString(), pkt->getAddr());
574 }
575
576 return lat;
577 }
578
579 // should assert here that there are no outstanding MSHRs or
580 // writebacks... that would mean that someone used an atomic
581 // access in timing mode
582
583 BlkType *blk = NULL;
584
585 if (!access(pkt, blk, lat)) {
586 // MISS
587 PacketPtr busPkt = getBusPacket(pkt, blk, pkt->needsExclusive());
588
589 bool isCacheFill = (busPkt != NULL);
590
591 if (busPkt == NULL) {
592 // just forwarding the same request to the next level
593 // no local cache operation involved
594 busPkt = pkt;
595 }
596
597 DPRINTF(Cache, "Sending an atomic %s for %x\n",
598 busPkt->cmdString(), busPkt->getAddr());
599
600 #if TRACING_ON
601 CacheBlk::State old_state = blk ? blk->status : 0;
602 #endif
603
604 lat += memSidePort->sendAtomic(busPkt);
605
606 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
607 busPkt->cmdString(), busPkt->getAddr(), old_state);
608
609 if (isCacheFill) {
610 PacketList writebacks;
611 blk = handleFill(busPkt, blk, writebacks);
612 satisfyCpuSideRequest(pkt, blk);
613 delete busPkt;
614
615 // Handle writebacks if needed
616 while (!writebacks.empty()){
617 PacketPtr wbPkt = writebacks.front();
618 memSidePort->sendAtomic(wbPkt);
619 writebacks.pop_front();
620 delete wbPkt;
621 }
622 }
623 }
624
625 // We now have the block one way or another (hit or completed miss)
626
627 if (pkt->needsResponse()) {
628 pkt->makeAtomicResponse();
629 }
630
631 return lat;
632 }
633
634
635 template<class TagStore>
636 void
637 Cache<TagStore>::functionalAccess(PacketPtr pkt,
638 CachePort *otherSidePort)
639 {
640 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
641 BlkType *blk = tags->findBlock(pkt->getAddr());
642
643 if (blk && pkt->checkFunctional(blk_addr, blkSize, blk->data)) {
644 // request satisfied from block
645 return;
646 }
647
648 // Need to check for outstanding misses and writes; if neither one
649 // satisfies, then forward to other side of cache.
650 if (!(mshrQueue.checkFunctional(pkt, blk_addr) ||
651 writeBuffer.checkFunctional(pkt, blk_addr))) {
652 otherSidePort->checkAndSendFunctional(pkt);
653 }
654 }
655
656
657 /////////////////////////////////////////////////////
658 //
659 // Response handling: responses from the memory side
660 //
661 /////////////////////////////////////////////////////
662
663
664 template<class TagStore>
665 void
666 Cache<TagStore>::handleResponse(PacketPtr pkt)
667 {
668 Tick time = curTick + hitLatency;
669 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
670 assert(mshr);
671
672 if (pkt->wasNacked()) {
673 //pkt->reinitFromRequest();
674 warn("NACKs from devices not connected to the same bus "
675 "not implemented\n");
676 return;
677 }
678 assert(!pkt->isError());
679 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
680
681 MSHRQueue *mq = mshr->queue;
682 bool wasFull = mq->isFull();
683
684 if (mshr == noTargetMSHR) {
685 // we always clear at least one target
686 clearBlocked(Blocked_NoTargets);
687 noTargetMSHR = NULL;
688 }
689
690 // Initial target is used just for stats
691 MSHR::Target *initial_tgt = mshr->getTarget();
692 BlkType *blk = tags->findBlock(pkt->getAddr());
693 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
694 Tick miss_latency = curTick - initial_tgt->recvTime;
695 PacketList writebacks;
696
697 if (pkt->req->isUncacheable()) {
698 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
699 miss_latency;
700 } else {
701 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
702 miss_latency;
703 }
704
705 if (mshr->isCacheFill) {
706 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
707 pkt->getAddr());
708
709 // give mshr a chance to do some dirty work
710 mshr->handleFill(pkt, blk);
711
712 blk = handleFill(pkt, blk, writebacks);
713 assert(blk != NULL);
714 }
715
716 // First offset for critical word first calculations
717 int initial_offset = 0;
718
719 if (mshr->hasTargets()) {
720 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
721 }
722
723 while (mshr->hasTargets()) {
724 MSHR::Target *target = mshr->getTarget();
725
726 if (target->isCpuSide()) {
727 Tick completion_time;
728 if (blk != NULL) {
729 satisfyCpuSideRequest(target->pkt, blk);
730 // How many bytes past the first request is this one
731 int transfer_offset =
732 target->pkt->getOffset(blkSize) - initial_offset;
733 if (transfer_offset < 0) {
734 transfer_offset += blkSize;
735 }
736
737 // If critical word (no offset) return first word time
738 completion_time = tags->getHitLatency() +
739 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
740
741 assert(!target->pkt->req->isUncacheable());
742 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
743 completion_time - target->recvTime;
744 } else {
745 // not a cache fill, just forwarding response
746 completion_time = tags->getHitLatency() + pkt->finishTime;
747 if (pkt->isRead()) {
748 target->pkt->setData(pkt->getPtr<uint8_t>());
749 }
750 }
751 target->pkt->makeTimingResponse();
752 cpuSidePort->respond(target->pkt, completion_time);
753 } else {
754 // response to snoop request
755 DPRINTF(Cache, "processing deferred snoop...\n");
756 handleSnoop(target->pkt, blk, true, true);
757 }
758
759 mshr->popTarget();
760 }
761
762 if (mshr->promoteDeferredTargets()) {
763 MSHRQueue *mq = mshr->queue;
764 mq->markPending(mshr);
765 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
766 } else {
767 mq->deallocate(mshr);
768 if (wasFull && !mq->isFull()) {
769 clearBlocked((BlockedCause)mq->index);
770 }
771 }
772
773 // copy writebacks to write buffer
774 while (!writebacks.empty()) {
775 PacketPtr wbPkt = writebacks.front();
776 allocateWriteBuffer(wbPkt, time, true);
777 writebacks.pop_front();
778 }
779 // if we used temp block, clear it out
780 if (blk == tempBlock) {
781 if (blk->isDirty()) {
782 allocateWriteBuffer(writebackBlk(blk), time, true);
783 }
784 tags->invalidateBlk(blk);
785 }
786
787 delete pkt;
788 }
789
790
791
792
793 template<class TagStore>
794 PacketPtr
795 Cache<TagStore>::writebackBlk(BlkType *blk)
796 {
797 assert(blk && blk->isValid() && blk->isDirty());
798
799 writebacks[0/*pkt->req->getThreadNum()*/]++;
800
801 Request *writebackReq =
802 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
803 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
804 writeback->allocate();
805 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
806
807 blk->status &= ~BlkDirty;
808 return writeback;
809 }
810
811
812 // Note that the reason we return a list of writebacks rather than
813 // inserting them directly in the write buffer is that this function
814 // is called by both atomic and timing-mode accesses, and in atomic
815 // mode we don't mess with the write buffer (we just perform the
816 // writebacks atomically once the original request is complete).
817 template<class TagStore>
818 typename Cache<TagStore>::BlkType*
819 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
820 PacketList &writebacks)
821 {
822 Addr addr = pkt->getAddr();
823 #if TRACING_ON
824 CacheBlk::State old_state = blk ? blk->status : 0;
825 #endif
826
827 if (blk == NULL) {
828 // better have read new data...
829 assert(pkt->isRead());
830
831 // need to do a replacement
832 blk = tags->findReplacement(addr, writebacks);
833 if (blk->isValid()) {
834 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
835 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
836 if (repl_mshr) {
837 // must be an outstanding upgrade request on block
838 // we're about to replace...
839 assert(!blk->isWritable());
840 assert(repl_mshr->needsExclusive());
841 // too hard to replace block with transient state;
842 // just use temporary storage to complete the current
843 // request and then get rid of it
844 assert(!tempBlock->isValid());
845 blk = tempBlock;
846 tempBlock->set = tags->extractSet(addr);
847 DPRINTF(Cache, "using temp block for %x\n", addr);
848 } else {
849 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
850 repl_addr, addr,
851 blk->isDirty() ? "writeback" : "clean");
852
853 if (blk->isDirty()) {
854 // Save writeback packet for handling by caller
855 writebacks.push_back(writebackBlk(blk));
856 }
857 }
858 }
859
860 blk->tag = tags->extractTag(addr);
861 } else {
862 // existing block... probably an upgrade
863 assert(blk->tag == tags->extractTag(addr));
864 // either we're getting new data or the block should already be valid
865 assert(pkt->isRead() || blk->isValid());
866 }
867
868 if (pkt->needsExclusive() || !pkt->sharedAsserted()) {
869 blk->status = BlkValid | BlkWritable;
870 } else {
871 blk->status = BlkValid;
872 }
873
874 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
875 addr, old_state, blk->status);
876
877 // if we got new data, copy it in
878 if (pkt->isRead()) {
879 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
880 }
881
882 blk->whenReady = pkt->finishTime;
883
884 return blk;
885 }
886
887
888 /////////////////////////////////////////////////////
889 //
890 // Snoop path: requests coming in from the memory side
891 //
892 /////////////////////////////////////////////////////
893
894 template<class TagStore>
895 void
896 Cache<TagStore>::doTimingSupplyResponse(PacketPtr req_pkt,
897 uint8_t *blk_data,
898 bool already_copied)
899 {
900 // timing-mode snoop responses require a new packet, unless we
901 // already made a copy...
902 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt, true);
903 if (!req_pkt->isInvalidate()) {
904 // note that we're ignoring the shared flag on req_pkt... it's
905 // basically irrelveant, as we'll always assert shared unless
906 // it's an exclusive request, in which case the shared line
907 // should never be asserted1
908 pkt->assertShared();
909 }
910 pkt->allocate();
911 pkt->makeTimingResponse();
912 if (pkt->isRead()) {
913 pkt->setDataFromBlock(blk_data, blkSize);
914 }
915 memSidePort->respond(pkt, curTick + hitLatency);
916 }
917
918 template<class TagStore>
919 void
920 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
921 bool is_timing, bool is_deferred)
922 {
923 assert(pkt->isRequest());
924
925 // first propagate snoop upward to see if anyone above us wants to
926 // handle it. save & restore packet src since it will get
927 // rewritten to be relative to cpu-side bus (if any)
928 bool alreadyResponded = pkt->memInhibitAsserted();
929 if (is_timing) {
930 Packet *snoopPkt = new Packet(pkt, true); // clear flags
931 snoopPkt->setExpressSnoop();
932 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
933 cpuSidePort->sendTiming(snoopPkt);
934 if (snoopPkt->memInhibitAsserted()) {
935 // cache-to-cache response from some upper cache
936 assert(!alreadyResponded);
937 pkt->assertMemInhibit();
938 } else {
939 delete snoopPkt->senderState;
940 }
941 if (snoopPkt->sharedAsserted()) {
942 pkt->assertShared();
943 }
944 delete snoopPkt;
945 } else {
946 int origSrc = pkt->getSrc();
947 cpuSidePort->sendAtomic(pkt);
948 if (!alreadyResponded && pkt->memInhibitAsserted()) {
949 // cache-to-cache response from some upper cache:
950 // forward response to original requester
951 assert(pkt->isResponse());
952 }
953 pkt->setSrc(origSrc);
954 }
955
956 if (!blk || !blk->isValid()) {
957 return;
958 }
959
960 // we may end up modifying both the block state and the packet (if
961 // we respond in atomic mode), so just figure out what to do now
962 // and then do it later
963 bool respond = blk->isDirty() && pkt->needsResponse();
964 bool have_exclusive = blk->isWritable();
965 bool invalidate = pkt->isInvalidate();
966
967 if (pkt->isRead() && !pkt->isInvalidate()) {
968 assert(!pkt->needsExclusive());
969 pkt->assertShared();
970 int bits_to_clear = BlkWritable;
971 const bool haveOwnershipState = true; // for now
972 if (!haveOwnershipState) {
973 // if we don't support pure ownership (dirty && !writable),
974 // have to clear dirty bit here, assume memory snarfs data
975 // on cache-to-cache xfer
976 bits_to_clear |= BlkDirty;
977 }
978 blk->status &= ~bits_to_clear;
979 }
980
981 if (respond) {
982 assert(!pkt->memInhibitAsserted());
983 pkt->assertMemInhibit();
984 if (have_exclusive) {
985 pkt->setSupplyExclusive();
986 }
987 if (is_timing) {
988 doTimingSupplyResponse(pkt, blk->data, is_deferred);
989 } else {
990 pkt->makeAtomicResponse();
991 pkt->setDataFromBlock(blk->data, blkSize);
992 }
993 }
994
995 // Do this last in case it deallocates block data or something
996 // like that
997 if (invalidate) {
998 tags->invalidateBlk(blk);
999 }
1000
1001 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1002 pkt->cmdString(), blockAlign(pkt->getAddr()),
1003 respond ? "responding, " : "", blk->status);
1004 }
1005
1006
1007 template<class TagStore>
1008 void
1009 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1010 {
1011 // Note that some deferred snoops don't have requests, since the
1012 // original access may have already completed
1013 if ((pkt->req && pkt->req->isUncacheable()) ||
1014 pkt->cmd == MemCmd::Writeback) {
1015 //Can't get a hit on an uncacheable address
1016 //Revisit this for multi level coherence
1017 return;
1018 }
1019
1020 BlkType *blk = tags->findBlock(pkt->getAddr());
1021
1022 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
1023 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1024
1025 // Let the MSHR itself track the snoop and decide whether we want
1026 // to go ahead and do the regular cache snoop
1027 if (mshr && mshr->handleSnoop(pkt, order++)) {
1028 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1029 blk_addr);
1030 if (mshr->getNumTargets() > numTarget)
1031 warn("allocating bonus target for snoop"); //handle later
1032 return;
1033 }
1034
1035 //We also need to check the writeback buffers and handle those
1036 std::vector<MSHR *> writebacks;
1037 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1038 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1039 pkt->getAddr());
1040
1041 //Look through writebacks for any non-uncachable writes, use that
1042 for (int i=0; i<writebacks.size(); i++) {
1043 mshr = writebacks[i];
1044 assert(!mshr->isUncacheable());
1045 assert(mshr->getNumTargets() == 1);
1046 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1047 assert(wb_pkt->cmd == MemCmd::Writeback);
1048
1049 assert(!pkt->memInhibitAsserted());
1050 pkt->assertMemInhibit();
1051 if (!pkt->needsExclusive()) {
1052 pkt->assertShared();
1053 } else {
1054 // if we're not asserting the shared line, we need to
1055 // invalidate our copy. we'll do that below as long as
1056 // the packet's invalidate flag is set...
1057 assert(pkt->isInvalidate());
1058 }
1059 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(), false);
1060
1061 if (pkt->isInvalidate()) {
1062 // Invalidation trumps our writeback... discard here
1063 markInService(mshr);
1064 }
1065
1066 // If this was a shared writeback, there may still be
1067 // other shared copies above that require invalidation.
1068 // We could be more selective and return here if the
1069 // request is non-exclusive or if the writeback is
1070 // exclusive.
1071 break;
1072 }
1073 }
1074
1075 handleSnoop(pkt, blk, true, false);
1076 }
1077
1078
1079 template<class TagStore>
1080 Tick
1081 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1082 {
1083 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1084 // Can't get a hit on an uncacheable address
1085 // Revisit this for multi level coherence
1086 return hitLatency;
1087 }
1088
1089 BlkType *blk = tags->findBlock(pkt->getAddr());
1090 handleSnoop(pkt, blk, false, false);
1091 return hitLatency;
1092 }
1093
1094
1095 template<class TagStore>
1096 MSHR *
1097 Cache<TagStore>::getNextMSHR()
1098 {
1099 // Check both MSHR queue and write buffer for potential requests
1100 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1101 MSHR *write_mshr = writeBuffer.getNextMSHR();
1102
1103 // Now figure out which one to send... some cases are easy
1104 if (miss_mshr && !write_mshr) {
1105 return miss_mshr;
1106 }
1107 if (write_mshr && !miss_mshr) {
1108 return write_mshr;
1109 }
1110
1111 if (miss_mshr && write_mshr) {
1112 // We have one of each... normally we favor the miss request
1113 // unless the write buffer is full
1114 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1115 // Write buffer is full, so we'd like to issue a write;
1116 // need to search MSHR queue for conflicting earlier miss.
1117 MSHR *conflict_mshr =
1118 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1119
1120 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1121 // Service misses in order until conflict is cleared.
1122 return conflict_mshr;
1123 }
1124
1125 // No conflicts; issue write
1126 return write_mshr;
1127 }
1128
1129 // Write buffer isn't full, but need to check it for
1130 // conflicting earlier writeback
1131 MSHR *conflict_mshr =
1132 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1133 if (conflict_mshr) {
1134 // not sure why we don't check order here... it was in the
1135 // original code but commented out.
1136
1137 // The only way this happens is if we are
1138 // doing a write and we didn't have permissions
1139 // then subsequently saw a writeback (owned got evicted)
1140 // We need to make sure to perform the writeback first
1141 // To preserve the dirty data, then we can issue the write
1142
1143 // should we return write_mshr here instead? I.e. do we
1144 // have to flush writes in order? I don't think so... not
1145 // for Alpha anyway. Maybe for x86?
1146 return conflict_mshr;
1147 }
1148
1149 // No conflicts; issue read
1150 return miss_mshr;
1151 }
1152
1153 // fall through... no pending requests. Try a prefetch.
1154 assert(!miss_mshr && !write_mshr);
1155 if (!mshrQueue.isFull()) {
1156 // If we have a miss queue slot, we can try a prefetch
1157 PacketPtr pkt = prefetcher->getPacket();
1158 if (pkt) {
1159 // Update statistic on number of prefetches issued
1160 // (hwpf_mshr_misses)
1161 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
1162 // Don't request bus, since we already have it
1163 return allocateMissBuffer(pkt, curTick, false);
1164 }
1165 }
1166
1167 return NULL;
1168 }
1169
1170
1171 template<class TagStore>
1172 PacketPtr
1173 Cache<TagStore>::getTimingPacket()
1174 {
1175 MSHR *mshr = getNextMSHR();
1176
1177 if (mshr == NULL) {
1178 return NULL;
1179 }
1180
1181 // use request from 1st target
1182 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1183 PacketPtr pkt = NULL;
1184
1185 if (mshr->isSimpleForward()) {
1186 // no response expected, just forward packet as it is
1187 assert(tags->findBlock(mshr->addr) == NULL);
1188 pkt = tgt_pkt;
1189 } else {
1190 BlkType *blk = tags->findBlock(mshr->addr);
1191 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1192
1193 mshr->isCacheFill = (pkt != NULL);
1194
1195 if (pkt == NULL) {
1196 // not a cache block request, but a response is expected
1197 assert(!mshr->isSimpleForward());
1198 // make copy of current packet to forward, keep current
1199 // copy for response handling
1200 pkt = new Packet(tgt_pkt);
1201 pkt->allocate();
1202 if (pkt->isWrite()) {
1203 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1204 }
1205 }
1206 }
1207
1208 assert(pkt != NULL);
1209 pkt->senderState = mshr;
1210 return pkt;
1211 }
1212
1213
1214 ///////////////
1215 //
1216 // CpuSidePort
1217 //
1218 ///////////////
1219
1220 template<class TagStore>
1221 void
1222 Cache<TagStore>::CpuSidePort::
1223 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1224 {
1225 // CPU side port doesn't snoop; it's a target only.
1226 bool dummy;
1227 otherPort->getPeerAddressRanges(resp, dummy);
1228 FilterRangeList(filterRanges, resp);
1229 snoop = false;
1230 }
1231
1232
1233 template<class TagStore>
1234 bool
1235 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1236 {
1237 // illegal to block responses... can lead to deadlock
1238 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1239 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1240 mustSendRetry = true;
1241 return false;
1242 }
1243
1244 myCache()->timingAccess(pkt);
1245 return true;
1246 }
1247
1248
1249 template<class TagStore>
1250 Tick
1251 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1252 {
1253 return myCache()->atomicAccess(pkt);
1254 }
1255
1256
1257 template<class TagStore>
1258 void
1259 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1260 {
1261 if (!checkFunctional(pkt)) {
1262 myCache()->functionalAccess(pkt, cache->memSidePort);
1263 }
1264 }
1265
1266
1267 template<class TagStore>
1268 Cache<TagStore>::
1269 CpuSidePort::CpuSidePort(const std::string &_name,
1270 Cache<TagStore> *_cache, std::vector<Range<Addr> >
1271 filterRanges)
1272 : BaseCache::CachePort(_name, _cache, filterRanges)
1273 {
1274 }
1275
1276 ///////////////
1277 //
1278 // MemSidePort
1279 //
1280 ///////////////
1281
1282 template<class TagStore>
1283 void
1284 Cache<TagStore>::MemSidePort::
1285 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1286 {
1287 otherPort->getPeerAddressRanges(resp, snoop);
1288 FilterRangeList(filterRanges, resp);
1289
1290 // Memory-side port always snoops, so unconditionally set flag for
1291 // caller.
1292 snoop = true;
1293 }
1294
1295
1296 template<class TagStore>
1297 bool
1298 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1299 {
1300 // this needs to be fixed so that the cache updates the mshr and sends the
1301 // packet back out on the link, but it probably won't happen so until this
1302 // gets fixed, just panic when it does
1303 if (pkt->wasNacked())
1304 panic("Need to implement cache resending nacked packets!\n");
1305
1306 if (pkt->isRequest() && blocked) {
1307 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1308 mustSendRetry = true;
1309 return false;
1310 }
1311
1312 if (pkt->isResponse()) {
1313 myCache()->handleResponse(pkt);
1314 } else {
1315 myCache()->snoopTiming(pkt);
1316 }
1317 return true;
1318 }
1319
1320
1321 template<class TagStore>
1322 Tick
1323 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1324 {
1325 // in atomic mode, responses go back to the sender via the
1326 // function return from sendAtomic(), not via a separate
1327 // sendAtomic() from the responder. Thus we should never see a
1328 // response packet in recvAtomic() (anywhere, not just here).
1329 assert(!pkt->isResponse());
1330 return myCache()->snoopAtomic(pkt);
1331 }
1332
1333
1334 template<class TagStore>
1335 void
1336 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1337 {
1338 if (!checkFunctional(pkt)) {
1339 myCache()->functionalAccess(pkt, cache->cpuSidePort);
1340 }
1341 }
1342
1343
1344
1345 template<class TagStore>
1346 void
1347 Cache<TagStore>::MemSidePort::sendPacket()
1348 {
1349 // if we have responses that are ready, they take precedence
1350 if (deferredPacketReady()) {
1351 bool success = sendTiming(transmitList.front().pkt);
1352
1353 if (success) {
1354 //send successful, remove packet
1355 transmitList.pop_front();
1356 }
1357
1358 waitingOnRetry = !success;
1359 } else {
1360 // check for non-response packets (requests & writebacks)
1361 PacketPtr pkt = myCache()->getTimingPacket();
1362 if (pkt == NULL) {
1363 // can happen if e.g. we attempt a writeback and fail, but
1364 // before the retry, the writeback is eliminated because
1365 // we snoop another cache's ReadEx.
1366 waitingOnRetry = false;
1367 } else {
1368 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1369
1370 bool success = sendTiming(pkt);
1371 DPRINTF(CachePort,
1372 "Address %x was %s in sending the timing request\n",
1373 pkt->getAddr(), success ? "successful" : "unsuccessful");
1374
1375 waitingOnRetry = !success;
1376 if (waitingOnRetry) {
1377 DPRINTF(CachePort, "now waiting on a retry\n");
1378 if (!mshr->isSimpleForward()) {
1379 delete pkt;
1380 }
1381 } else {
1382 myCache()->markInService(mshr);
1383 }
1384 }
1385 }
1386
1387
1388 // tried to send packet... if it was successful (no retry), see if
1389 // we need to rerequest bus or not
1390 if (!waitingOnRetry) {
1391 Tick nextReady = std::min(deferredPacketReadyTime(),
1392 myCache()->nextMSHRReadyTime());
1393 // @TODO: need to facotr in prefetch requests here somehow
1394 if (nextReady != MaxTick) {
1395 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1396 sendEvent->schedule(std::max(nextReady, curTick + 1));
1397 } else {
1398 // no more to send right now: if we're draining, we may be done
1399 if (drainEvent) {
1400 drainEvent->process();
1401 drainEvent = NULL;
1402 }
1403 }
1404 }
1405 }
1406
1407 template<class TagStore>
1408 void
1409 Cache<TagStore>::MemSidePort::recvRetry()
1410 {
1411 assert(waitingOnRetry);
1412 sendPacket();
1413 }
1414
1415
1416 template<class TagStore>
1417 void
1418 Cache<TagStore>::MemSidePort::processSendEvent()
1419 {
1420 assert(!waitingOnRetry);
1421 sendPacket();
1422 }
1423
1424
1425 template<class TagStore>
1426 Cache<TagStore>::
1427 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1428 std::vector<Range<Addr> > filterRanges)
1429 : BaseCache::CachePort(_name, _cache, filterRanges)
1430 {
1431 // override default send event from SimpleTimingPort
1432 delete sendEvent;
1433 sendEvent = new SendEvent(this);
1434 }