Merge from head.
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Dave Greene
30 * Nathan Binkert
31 * Steve Reinhardt
32 * Ron Dreslinski
33 */
34
35 /**
36 * @file
37 * Cache definitions.
38 */
39
40 #include "sim/host.hh"
41 #include "base/misc.hh"
42
43 #include "mem/cache/cache.hh"
44 #include "mem/cache/cache_blk.hh"
45 #include "mem/cache/miss/mshr.hh"
46 #include "mem/cache/prefetch/base_prefetcher.hh"
47
48 #include "sim/sim_exit.hh" // for SimExitEvent
49
50
51 template<class TagStore>
52 Cache<TagStore>::Cache(const std::string &_name,
53 Cache<TagStore>::Params &params)
54 : BaseCache(_name, params.baseParams),
55 prefetchAccess(params.prefetchAccess),
56 tags(params.tags),
57 prefetcher(params.prefetcher),
58 doFastWrites(params.doFastWrites),
59 prefetchMiss(params.prefetchMiss)
60 {
61 tempBlock = new BlkType();
62 tempBlock->data = new uint8_t[blkSize];
63
64 cpuSidePort = new CpuSidePort(_name + "-cpu_side_port", this);
65 memSidePort = new MemSidePort(_name + "-mem_side_port", this);
66 cpuSidePort->setOtherPort(memSidePort);
67 memSidePort->setOtherPort(cpuSidePort);
68
69 tags->setCache(this);
70 prefetcher->setCache(this);
71 }
72
73 template<class TagStore>
74 void
75 Cache<TagStore>::regStats()
76 {
77 BaseCache::regStats();
78 tags->regStats(name());
79 prefetcher->regStats(name());
80 }
81
82 template<class TagStore>
83 Port *
84 Cache<TagStore>::getPort(const std::string &if_name, int idx)
85 {
86 if (if_name == "" || if_name == "cpu_side") {
87 return cpuSidePort;
88 } else if (if_name == "mem_side") {
89 return memSidePort;
90 } else if (if_name == "functional") {
91 return new CpuSidePort(name() + "-cpu_side_funcport", this);
92 } else {
93 panic("Port name %s unrecognized\n", if_name);
94 }
95 }
96
97 template<class TagStore>
98 void
99 Cache<TagStore>::deletePortRefs(Port *p)
100 {
101 if (cpuSidePort == p || memSidePort == p)
102 panic("Can only delete functional ports\n");
103
104 delete p;
105 }
106
107
108 template<class TagStore>
109 void
110 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
111 {
112 uint64_t overwrite_val;
113 bool overwrite_mem;
114 uint64_t condition_val64;
115 uint32_t condition_val32;
116
117 int offset = tags->extractBlkOffset(pkt->getAddr());
118 uint8_t *blk_data = blk->data + offset;
119
120 assert(sizeof(uint64_t) >= pkt->getSize());
121
122 overwrite_mem = true;
123 // keep a copy of our possible write value, and copy what is at the
124 // memory address into the packet
125 pkt->writeData((uint8_t *)&overwrite_val);
126 pkt->setData(blk_data);
127
128 if (pkt->req->isCondSwap()) {
129 if (pkt->getSize() == sizeof(uint64_t)) {
130 condition_val64 = pkt->req->getExtraData();
131 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
132 sizeof(uint64_t));
133 } else if (pkt->getSize() == sizeof(uint32_t)) {
134 condition_val32 = (uint32_t)pkt->req->getExtraData();
135 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
136 sizeof(uint32_t));
137 } else
138 panic("Invalid size for conditional read/write\n");
139 }
140
141 if (overwrite_mem)
142 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
143 }
144
145
146 template<class TagStore>
147 void
148 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
149 {
150 assert(blk);
151 // Occasionally this is not true... if we are a lower-level cache
152 // satisfying a string of Read and ReadEx requests from
153 // upper-level caches, a Read will mark the block as shared but we
154 // can satisfy a following ReadEx anyway since we can rely on the
155 // Read requester(s) to have buffered the ReadEx snoop and to
156 // invalidate their blocks after receiving them.
157 // assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
158 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
159
160 // Check RMW operations first since both isRead() and
161 // isWrite() will be true for them
162 if (pkt->cmd == MemCmd::SwapReq) {
163 cmpAndSwap(blk, pkt);
164 } else if (pkt->isWrite()) {
165 if (blk->checkWrite(pkt)) {
166 blk->status |= BlkDirty;
167 pkt->writeDataToBlock(blk->data, blkSize);
168 }
169 } else if (pkt->isRead()) {
170 if (pkt->isLocked()) {
171 blk->trackLoadLocked(pkt);
172 }
173 pkt->setDataFromBlock(blk->data, blkSize);
174 if (pkt->getSize() == blkSize) {
175 // special handling for coherent block requests from
176 // upper-level caches
177 if (pkt->needsExclusive()) {
178 // on ReadExReq we give up our copy
179 tags->invalidateBlk(blk);
180 } else {
181 // on ReadReq we create shareable copies here and in
182 // the requester
183 pkt->assertShared();
184 blk->status &= ~BlkWritable;
185 }
186 }
187 } else {
188 // Not a read or write... must be an upgrade. it's OK
189 // to just ack those as long as we have an exclusive
190 // copy at this level.
191 assert(pkt->cmd == MemCmd::UpgradeReq);
192 tags->invalidateBlk(blk);
193 }
194 }
195
196
197 /////////////////////////////////////////////////////
198 //
199 // MSHR helper functions
200 //
201 /////////////////////////////////////////////////////
202
203
204 template<class TagStore>
205 void
206 Cache<TagStore>::markInService(MSHR *mshr)
207 {
208 markInServiceInternal(mshr);
209 #if 0
210 if (mshr->originalCmd == MemCmd::HardPFReq) {
211 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
212 name());
213 //Also clear pending if need be
214 if (!prefetcher->havePending())
215 {
216 deassertMemSideBusRequest(Request_PF);
217 }
218 }
219 #endif
220 }
221
222
223 template<class TagStore>
224 void
225 Cache<TagStore>::squash(int threadNum)
226 {
227 bool unblock = false;
228 BlockedCause cause = NUM_BLOCKED_CAUSES;
229
230 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
231 noTargetMSHR = NULL;
232 unblock = true;
233 cause = Blocked_NoTargets;
234 }
235 if (mshrQueue.isFull()) {
236 unblock = true;
237 cause = Blocked_NoMSHRs;
238 }
239 mshrQueue.squash(threadNum);
240 if (unblock && !mshrQueue.isFull()) {
241 clearBlocked(cause);
242 }
243 }
244
245 /////////////////////////////////////////////////////
246 //
247 // Access path: requests coming in from the CPU side
248 //
249 /////////////////////////////////////////////////////
250
251 template<class TagStore>
252 bool
253 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, int &lat)
254 {
255 if (pkt->req->isUncacheable()) {
256 blk = NULL;
257 lat = hitLatency;
258 return false;
259 }
260
261 bool satisfied = false; // assume the worst
262 blk = tags->findBlock(pkt->getAddr(), lat);
263
264 if (prefetchAccess) {
265 //We are determining prefetches on access stream, call prefetcher
266 prefetcher->handleMiss(pkt, curTick);
267 }
268
269 DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(), pkt->getAddr(),
270 (blk) ? "hit" : "miss");
271
272 if (blk != NULL) {
273 // HIT
274 if (blk->isPrefetch()) {
275 //Signal that this was a hit under prefetch (no need for
276 //use prefetch (only can get here if true)
277 DPRINTF(HWPrefetch, "Hit a block that was prefetched\n");
278 blk->status &= ~BlkHWPrefetched;
279 if (prefetchMiss) {
280 //If we are using the miss stream, signal the
281 //prefetcher otherwise the access stream would have
282 //already signaled this hit
283 prefetcher->handleMiss(pkt, curTick);
284 }
285 }
286
287 if (pkt->needsExclusive() ? blk->isWritable() : blk->isValid()) {
288 // OK to satisfy access
289 hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
290 satisfied = true;
291 satisfyCpuSideRequest(pkt, blk);
292 } else if (pkt->cmd == MemCmd::Writeback) {
293 // special case: writeback to read-only block (e.g., from
294 // L1 into L2). since we're really just passing ownership
295 // from one cache to another, we can update this cache to
296 // be the owner without making the block writeable
297 assert(!blk->isWritable() /* && !blk->isDirty() */);
298 assert(blkSize == pkt->getSize());
299 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
300 blk->status |= BlkDirty;
301 satisfied = true;
302 // nothing else to do; writeback doesn't expect response
303 assert(!pkt->needsResponse());
304 } else {
305 // permission violation... nothing to do here, leave unsatisfied
306 // for statistics purposes this counts like a complete miss
307 incMissCount(pkt);
308 }
309 } else {
310 // complete miss (no matching block)
311 incMissCount(pkt);
312
313 if (pkt->isLocked() && pkt->isWrite()) {
314 // miss on store conditional... just give up now
315 pkt->req->setExtraData(0);
316 satisfied = true;
317 }
318 }
319
320 return satisfied;
321 }
322
323
324 class ForwardResponseRecord : public Packet::SenderState
325 {
326 Packet::SenderState *prevSenderState;
327 int prevSrc;
328 #ifndef NDEBUG
329 BaseCache *cache;
330 #endif
331 public:
332 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
333 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
334 #ifndef NDEBUG
335 , cache(_cache)
336 #endif
337 {}
338 void restore(Packet *pkt, BaseCache *_cache)
339 {
340 assert(_cache == cache);
341 pkt->senderState = prevSenderState;
342 pkt->setDest(prevSrc);
343 }
344 };
345
346
347 template<class TagStore>
348 bool
349 Cache<TagStore>::timingAccess(PacketPtr pkt)
350 {
351 //@todo Add back in MemDebug Calls
352 // MemDebug::cacheAccess(pkt);
353
354 // we charge hitLatency for doing just about anything here
355 Tick time = curTick + hitLatency;
356
357 if (pkt->isResponse()) {
358 // must be cache-to-cache response from upper to lower level
359 ForwardResponseRecord *rec =
360 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
361 assert(rec != NULL);
362 rec->restore(pkt, this);
363 delete rec;
364 memSidePort->respond(pkt, time);
365 return true;
366 }
367
368 assert(pkt->isRequest());
369
370 if (pkt->memInhibitAsserted()) {
371 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
372 pkt->getAddr());
373 assert(!pkt->req->isUncacheable());
374 // Special tweak for multilevel coherence: snoop downward here
375 // on invalidates since there may be other caches below here
376 // that have shared copies. Not necessary if we know that
377 // supplier had exclusive copy to begin with.
378 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
379 Packet *snoopPkt = new Packet(pkt, true); // clear flags
380 snoopPkt->setExpressSnoop();
381 snoopPkt->assertMemInhibit();
382 memSidePort->sendTiming(snoopPkt);
383 // main memory will delete snoopPkt
384 }
385 return true;
386 }
387
388 if (pkt->req->isUncacheable()) {
389 // writes go in write buffer, reads use MSHR
390 if (pkt->isWrite() && !pkt->isRead()) {
391 allocateWriteBuffer(pkt, time, true);
392 } else {
393 allocateUncachedReadBuffer(pkt, time, true);
394 }
395 assert(pkt->needsResponse()); // else we should delete it here??
396 return true;
397 }
398
399 int lat = hitLatency;
400 bool satisfied = false;
401
402 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
403 MSHR *mshr = mshrQueue.findMatch(blk_addr);
404
405 if (!mshr) {
406 // no outstanding access to this block, look up in cache
407 // (otherwise if we allow reads while there's an outstanding
408 // write miss, the read could return stale data out of the
409 // cache block... a more aggressive system could detect the
410 // overlap (if any) and forward data out of the MSHRs, but we
411 // don't do that yet)
412 BlkType *blk = NULL;
413 satisfied = access(pkt, blk, lat);
414 }
415
416 #if 0
417 PacketList writebacks;
418
419 // If this is a block size write/hint (WH64) allocate the block here
420 // if the coherence protocol allows it.
421 /** @todo make the fast write alloc (wh64) work with coherence. */
422 /** @todo Do we want to do fast writes for writebacks as well? */
423 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
424 (pkt->cmd == MemCmd::WriteReq
425 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
426 // not outstanding misses, can do this
427 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
428 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
429 if (outstanding_miss) {
430 warn("WriteInv doing a fastallocate"
431 "with an outstanding miss to the same address\n");
432 }
433 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
434 writebacks);
435 ++fastWrites;
436 }
437 }
438
439 // copy writebacks to write buffer
440 while (!writebacks.empty()) {
441 PacketPtr wbPkt = writebacks.front();
442 allocateWriteBuffer(wbPkt, time, true);
443 writebacks.pop_front();
444 }
445 #endif
446
447 bool needsResponse = pkt->needsResponse();
448
449 if (satisfied) {
450 if (needsResponse) {
451 pkt->makeTimingResponse();
452 cpuSidePort->respond(pkt, curTick+lat);
453 } else {
454 delete pkt;
455 }
456 } else {
457 // miss
458 if (prefetchMiss)
459 prefetcher->handleMiss(pkt, time);
460
461 if (mshr) {
462 // MSHR hit
463 //@todo remove hw_pf here
464 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
465 if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) {
466 mshr->threadNum = -1;
467 }
468 mshr->allocateTarget(pkt, time, order++);
469 if (mshr->getNumTargets() == numTarget) {
470 noTargetMSHR = mshr;
471 setBlocked(Blocked_NoTargets);
472 // need to be careful with this... if this mshr isn't
473 // ready yet (i.e. time > curTick_, we don't want to
474 // move it ahead of mshrs that are ready
475 // mshrQueue.moveToFront(mshr);
476 }
477 } else {
478 // no MSHR
479 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
480 // always mark as cache fill for now... if we implement
481 // no-write-allocate or bypass accesses this will have to
482 // be changed.
483 if (pkt->cmd == MemCmd::Writeback) {
484 allocateWriteBuffer(pkt, time, true);
485 } else {
486 allocateMissBuffer(pkt, time, true);
487 }
488 }
489 }
490
491 return true;
492 }
493
494
495 template<class TagStore>
496 PacketPtr
497 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
498 bool needsExclusive)
499 {
500 bool blkValid = blk && blk->isValid();
501
502 if (cpu_pkt->req->isUncacheable()) {
503 assert(blk == NULL);
504 return NULL;
505 }
506
507 if (!blkValid &&
508 (cpu_pkt->cmd == MemCmd::Writeback ||
509 cpu_pkt->cmd == MemCmd::UpgradeReq)) {
510 // For now, writebacks from upper-level caches that
511 // completely miss in the cache just go through. If we had
512 // "fast write" support (where we could write the whole
513 // block w/o fetching new data) we might want to allocate
514 // on writeback misses instead.
515 return NULL;
516 }
517
518 assert(cpu_pkt->needsResponse());
519
520 MemCmd cmd;
521 // @TODO make useUpgrades a parameter.
522 // Note that ownership protocols require upgrade, otherwise a
523 // write miss on a shared owned block will generate a ReadExcl,
524 // which will clobber the owned copy.
525 const bool useUpgrades = true;
526 if (blkValid && useUpgrades) {
527 // only reason to be here is that blk is shared
528 // (read-only) and we need exclusive
529 assert(needsExclusive && !blk->isWritable());
530 cmd = MemCmd::UpgradeReq;
531 } else {
532 // block is invalid
533 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
534 }
535 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
536
537 pkt->allocate();
538 return pkt;
539 }
540
541
542 template<class TagStore>
543 Tick
544 Cache<TagStore>::atomicAccess(PacketPtr pkt)
545 {
546 int lat = hitLatency;
547
548 // @TODO: make this a parameter
549 bool last_level_cache = false;
550
551 if (pkt->memInhibitAsserted()) {
552 assert(!pkt->req->isUncacheable());
553 // have to invalidate ourselves and any lower caches even if
554 // upper cache will be responding
555 if (pkt->isInvalidate()) {
556 BlkType *blk = tags->findBlock(pkt->getAddr());
557 if (blk && blk->isValid()) {
558 tags->invalidateBlk(blk);
559 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
560 pkt->cmdString(), pkt->getAddr());
561 }
562 if (!last_level_cache) {
563 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
564 pkt->cmdString(), pkt->getAddr());
565 lat += memSidePort->sendAtomic(pkt);
566 }
567 } else {
568 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
569 pkt->cmdString(), pkt->getAddr());
570 }
571
572 return lat;
573 }
574
575 // should assert here that there are no outstanding MSHRs or
576 // writebacks... that would mean that someone used an atomic
577 // access in timing mode
578
579 BlkType *blk = NULL;
580
581 if (!access(pkt, blk, lat)) {
582 // MISS
583 PacketPtr busPkt = getBusPacket(pkt, blk, pkt->needsExclusive());
584
585 bool isCacheFill = (busPkt != NULL);
586
587 if (busPkt == NULL) {
588 // just forwarding the same request to the next level
589 // no local cache operation involved
590 busPkt = pkt;
591 }
592
593 DPRINTF(Cache, "Sending an atomic %s for %x\n",
594 busPkt->cmdString(), busPkt->getAddr());
595
596 #if TRACING_ON
597 CacheBlk::State old_state = blk ? blk->status : 0;
598 #endif
599
600 lat += memSidePort->sendAtomic(busPkt);
601
602 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
603 busPkt->cmdString(), busPkt->getAddr(), old_state);
604
605 if (isCacheFill) {
606 PacketList writebacks;
607 blk = handleFill(busPkt, blk, writebacks);
608 satisfyCpuSideRequest(pkt, blk);
609 delete busPkt;
610
611 // Handle writebacks if needed
612 while (!writebacks.empty()){
613 PacketPtr wbPkt = writebacks.front();
614 memSidePort->sendAtomic(wbPkt);
615 writebacks.pop_front();
616 delete wbPkt;
617 }
618 }
619 }
620
621 // We now have the block one way or another (hit or completed miss)
622
623 if (pkt->needsResponse()) {
624 pkt->makeAtomicResponse();
625 }
626
627 return lat;
628 }
629
630
631 template<class TagStore>
632 void
633 Cache<TagStore>::functionalAccess(PacketPtr pkt,
634 CachePort *otherSidePort)
635 {
636 Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
637 BlkType *blk = tags->findBlock(pkt->getAddr());
638
639 if (blk && pkt->checkFunctional(blk_addr, blkSize, blk->data)) {
640 // request satisfied from block
641 return;
642 }
643
644 // Need to check for outstanding misses and writes; if neither one
645 // satisfies, then forward to other side of cache.
646 if (!(mshrQueue.checkFunctional(pkt, blk_addr) ||
647 writeBuffer.checkFunctional(pkt, blk_addr))) {
648 otherSidePort->checkAndSendFunctional(pkt);
649 }
650 }
651
652
653 /////////////////////////////////////////////////////
654 //
655 // Response handling: responses from the memory side
656 //
657 /////////////////////////////////////////////////////
658
659
660 template<class TagStore>
661 void
662 Cache<TagStore>::handleResponse(PacketPtr pkt)
663 {
664 Tick time = curTick + hitLatency;
665 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
666 assert(mshr);
667
668 if (pkt->wasNacked()) {
669 //pkt->reinitFromRequest();
670 warn("NACKs from devices not connected to the same bus "
671 "not implemented\n");
672 return;
673 }
674 assert(!pkt->isError());
675 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
676
677 MSHRQueue *mq = mshr->queue;
678 bool wasFull = mq->isFull();
679
680 if (mshr == noTargetMSHR) {
681 // we always clear at least one target
682 clearBlocked(Blocked_NoTargets);
683 noTargetMSHR = NULL;
684 }
685
686 // Initial target is used just for stats
687 MSHR::Target *initial_tgt = mshr->getTarget();
688 BlkType *blk = tags->findBlock(pkt->getAddr());
689 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
690 Tick miss_latency = curTick - initial_tgt->recvTime;
691 PacketList writebacks;
692
693 if (pkt->req->isUncacheable()) {
694 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
695 miss_latency;
696 } else {
697 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
698 miss_latency;
699 }
700
701 if (mshr->isCacheFill) {
702 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
703 pkt->getAddr());
704
705 // give mshr a chance to do some dirty work
706 mshr->handleFill(pkt, blk);
707
708 blk = handleFill(pkt, blk, writebacks);
709 assert(blk != NULL);
710 }
711
712 // First offset for critical word first calculations
713 int initial_offset = 0;
714
715 if (mshr->hasTargets()) {
716 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
717 }
718
719 while (mshr->hasTargets()) {
720 MSHR::Target *target = mshr->getTarget();
721
722 if (target->isCpuSide()) {
723 Tick completion_time;
724 if (blk != NULL) {
725 satisfyCpuSideRequest(target->pkt, blk);
726 // How many bytes past the first request is this one
727 int transfer_offset =
728 target->pkt->getOffset(blkSize) - initial_offset;
729 if (transfer_offset < 0) {
730 transfer_offset += blkSize;
731 }
732
733 // If critical word (no offset) return first word time
734 completion_time = tags->getHitLatency() +
735 transfer_offset ? pkt->finishTime : pkt->firstWordTime;
736
737 assert(!target->pkt->req->isUncacheable());
738 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
739 completion_time - target->recvTime;
740 } else {
741 // not a cache fill, just forwarding response
742 completion_time = tags->getHitLatency() + pkt->finishTime;
743 if (pkt->isRead()) {
744 target->pkt->setData(pkt->getPtr<uint8_t>());
745 }
746 }
747 target->pkt->makeTimingResponse();
748 cpuSidePort->respond(target->pkt, completion_time);
749 } else {
750 // response to snoop request
751 DPRINTF(Cache, "processing deferred snoop...\n");
752 handleSnoop(target->pkt, blk, true, true);
753 }
754
755 mshr->popTarget();
756 }
757
758 if (mshr->promoteDeferredTargets()) {
759 MSHRQueue *mq = mshr->queue;
760 mq->markPending(mshr);
761 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
762 } else {
763 mq->deallocate(mshr);
764 if (wasFull && !mq->isFull()) {
765 clearBlocked((BlockedCause)mq->index);
766 }
767 }
768
769 // copy writebacks to write buffer
770 while (!writebacks.empty()) {
771 PacketPtr wbPkt = writebacks.front();
772 allocateWriteBuffer(wbPkt, time, true);
773 writebacks.pop_front();
774 }
775 // if we used temp block, clear it out
776 if (blk == tempBlock) {
777 if (blk->isDirty()) {
778 allocateWriteBuffer(writebackBlk(blk), time, true);
779 }
780 tags->invalidateBlk(blk);
781 }
782
783 delete pkt;
784 }
785
786
787
788
789 template<class TagStore>
790 PacketPtr
791 Cache<TagStore>::writebackBlk(BlkType *blk)
792 {
793 assert(blk && blk->isValid() && blk->isDirty());
794
795 writebacks[0/*pkt->req->getThreadNum()*/]++;
796
797 Request *writebackReq =
798 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
799 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
800 writeback->allocate();
801 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
802
803 blk->status &= ~BlkDirty;
804 return writeback;
805 }
806
807
808 // Note that the reason we return a list of writebacks rather than
809 // inserting them directly in the write buffer is that this function
810 // is called by both atomic and timing-mode accesses, and in atomic
811 // mode we don't mess with the write buffer (we just perform the
812 // writebacks atomically once the original request is complete).
813 template<class TagStore>
814 typename Cache<TagStore>::BlkType*
815 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
816 PacketList &writebacks)
817 {
818 Addr addr = pkt->getAddr();
819 #if TRACING_ON
820 CacheBlk::State old_state = blk ? blk->status : 0;
821 #endif
822
823 if (blk == NULL) {
824 // better have read new data...
825 assert(pkt->isRead());
826
827 // need to do a replacement
828 blk = tags->findReplacement(addr, writebacks);
829 if (blk->isValid()) {
830 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
831 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
832 if (repl_mshr) {
833 // must be an outstanding upgrade request on block
834 // we're about to replace...
835 assert(!blk->isWritable());
836 assert(repl_mshr->needsExclusive());
837 // too hard to replace block with transient state;
838 // just use temporary storage to complete the current
839 // request and then get rid of it
840 assert(!tempBlock->isValid());
841 blk = tempBlock;
842 tempBlock->set = tags->extractSet(addr);
843 DPRINTF(Cache, "using temp block for %x\n", addr);
844 } else {
845 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
846 repl_addr, addr,
847 blk->isDirty() ? "writeback" : "clean");
848
849 if (blk->isDirty()) {
850 // Save writeback packet for handling by caller
851 writebacks.push_back(writebackBlk(blk));
852 }
853 }
854 }
855
856 blk->tag = tags->extractTag(addr);
857 } else {
858 // existing block... probably an upgrade
859 assert(blk->tag == tags->extractTag(addr));
860 // either we're getting new data or the block should already be valid
861 assert(pkt->isRead() || blk->isValid());
862 }
863
864 if (pkt->needsExclusive() || !pkt->sharedAsserted()) {
865 blk->status = BlkValid | BlkWritable;
866 } else {
867 blk->status = BlkValid;
868 }
869
870 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
871 addr, old_state, blk->status);
872
873 // if we got new data, copy it in
874 if (pkt->isRead()) {
875 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
876 }
877
878 blk->whenReady = pkt->finishTime;
879
880 return blk;
881 }
882
883
884 /////////////////////////////////////////////////////
885 //
886 // Snoop path: requests coming in from the memory side
887 //
888 /////////////////////////////////////////////////////
889
890 template<class TagStore>
891 void
892 Cache<TagStore>::doTimingSupplyResponse(PacketPtr req_pkt,
893 uint8_t *blk_data,
894 bool already_copied)
895 {
896 // timing-mode snoop responses require a new packet, unless we
897 // already made a copy...
898 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt, true);
899 if (!req_pkt->isInvalidate()) {
900 // note that we're ignoring the shared flag on req_pkt... it's
901 // basically irrelveant, as we'll always assert shared unless
902 // it's an exclusive request, in which case the shared line
903 // should never be asserted1
904 pkt->assertShared();
905 }
906 pkt->allocate();
907 pkt->makeTimingResponse();
908 if (pkt->isRead()) {
909 pkt->setDataFromBlock(blk_data, blkSize);
910 }
911 memSidePort->respond(pkt, curTick + hitLatency);
912 }
913
914 template<class TagStore>
915 void
916 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
917 bool is_timing, bool is_deferred)
918 {
919 assert(pkt->isRequest());
920
921 // first propagate snoop upward to see if anyone above us wants to
922 // handle it. save & restore packet src since it will get
923 // rewritten to be relative to cpu-side bus (if any)
924 bool alreadyResponded = pkt->memInhibitAsserted();
925 if (is_timing) {
926 Packet *snoopPkt = new Packet(pkt, true); // clear flags
927 snoopPkt->setExpressSnoop();
928 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
929 cpuSidePort->sendTiming(snoopPkt);
930 if (snoopPkt->memInhibitAsserted()) {
931 // cache-to-cache response from some upper cache
932 assert(!alreadyResponded);
933 pkt->assertMemInhibit();
934 } else {
935 delete snoopPkt->senderState;
936 }
937 if (snoopPkt->sharedAsserted()) {
938 pkt->assertShared();
939 }
940 delete snoopPkt;
941 } else {
942 int origSrc = pkt->getSrc();
943 cpuSidePort->sendAtomic(pkt);
944 if (!alreadyResponded && pkt->memInhibitAsserted()) {
945 // cache-to-cache response from some upper cache:
946 // forward response to original requester
947 assert(pkt->isResponse());
948 }
949 pkt->setSrc(origSrc);
950 }
951
952 if (!blk || !blk->isValid()) {
953 return;
954 }
955
956 // we may end up modifying both the block state and the packet (if
957 // we respond in atomic mode), so just figure out what to do now
958 // and then do it later
959 bool respond = blk->isDirty() && pkt->needsResponse();
960 bool have_exclusive = blk->isWritable();
961 bool invalidate = pkt->isInvalidate();
962
963 if (pkt->isRead() && !pkt->isInvalidate()) {
964 assert(!pkt->needsExclusive());
965 pkt->assertShared();
966 int bits_to_clear = BlkWritable;
967 const bool haveOwnershipState = true; // for now
968 if (!haveOwnershipState) {
969 // if we don't support pure ownership (dirty && !writable),
970 // have to clear dirty bit here, assume memory snarfs data
971 // on cache-to-cache xfer
972 bits_to_clear |= BlkDirty;
973 }
974 blk->status &= ~bits_to_clear;
975 }
976
977 if (respond) {
978 assert(!pkt->memInhibitAsserted());
979 pkt->assertMemInhibit();
980 if (have_exclusive) {
981 pkt->setSupplyExclusive();
982 }
983 if (is_timing) {
984 doTimingSupplyResponse(pkt, blk->data, is_deferred);
985 } else {
986 pkt->makeAtomicResponse();
987 pkt->setDataFromBlock(blk->data, blkSize);
988 }
989 }
990
991 // Do this last in case it deallocates block data or something
992 // like that
993 if (invalidate) {
994 tags->invalidateBlk(blk);
995 }
996
997 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
998 pkt->cmdString(), blockAlign(pkt->getAddr()),
999 respond ? "responding, " : "", blk->status);
1000 }
1001
1002
1003 template<class TagStore>
1004 void
1005 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1006 {
1007 // Note that some deferred snoops don't have requests, since the
1008 // original access may have already completed
1009 if ((pkt->req && pkt->req->isUncacheable()) ||
1010 pkt->cmd == MemCmd::Writeback) {
1011 //Can't get a hit on an uncacheable address
1012 //Revisit this for multi level coherence
1013 return;
1014 }
1015
1016 BlkType *blk = tags->findBlock(pkt->getAddr());
1017
1018 Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
1019 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1020
1021 // Let the MSHR itself track the snoop and decide whether we want
1022 // to go ahead and do the regular cache snoop
1023 if (mshr && mshr->handleSnoop(pkt, order++)) {
1024 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1025 blk_addr);
1026 if (mshr->getNumTargets() > numTarget)
1027 warn("allocating bonus target for snoop"); //handle later
1028 return;
1029 }
1030
1031 //We also need to check the writeback buffers and handle those
1032 std::vector<MSHR *> writebacks;
1033 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1034 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1035 pkt->getAddr());
1036
1037 //Look through writebacks for any non-uncachable writes, use that
1038 for (int i=0; i<writebacks.size(); i++) {
1039 mshr = writebacks[i];
1040 assert(!mshr->isUncacheable());
1041 assert(mshr->getNumTargets() == 1);
1042 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1043 assert(wb_pkt->cmd == MemCmd::Writeback);
1044
1045 assert(!pkt->memInhibitAsserted());
1046 pkt->assertMemInhibit();
1047 if (!pkt->needsExclusive()) {
1048 pkt->assertShared();
1049 } else {
1050 // if we're not asserting the shared line, we need to
1051 // invalidate our copy. we'll do that below as long as
1052 // the packet's invalidate flag is set...
1053 assert(pkt->isInvalidate());
1054 }
1055 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(), false);
1056
1057 if (pkt->isInvalidate()) {
1058 // Invalidation trumps our writeback... discard here
1059 markInService(mshr);
1060 }
1061
1062 // If this was a shared writeback, there may still be
1063 // other shared copies above that require invalidation.
1064 // We could be more selective and return here if the
1065 // request is non-exclusive or if the writeback is
1066 // exclusive.
1067 break;
1068 }
1069 }
1070
1071 handleSnoop(pkt, blk, true, false);
1072 }
1073
1074
1075 template<class TagStore>
1076 Tick
1077 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1078 {
1079 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1080 // Can't get a hit on an uncacheable address
1081 // Revisit this for multi level coherence
1082 return hitLatency;
1083 }
1084
1085 BlkType *blk = tags->findBlock(pkt->getAddr());
1086 handleSnoop(pkt, blk, false, false);
1087 return hitLatency;
1088 }
1089
1090
1091 template<class TagStore>
1092 MSHR *
1093 Cache<TagStore>::getNextMSHR()
1094 {
1095 // Check both MSHR queue and write buffer for potential requests
1096 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1097 MSHR *write_mshr = writeBuffer.getNextMSHR();
1098
1099 // Now figure out which one to send... some cases are easy
1100 if (miss_mshr && !write_mshr) {
1101 return miss_mshr;
1102 }
1103 if (write_mshr && !miss_mshr) {
1104 return write_mshr;
1105 }
1106
1107 if (miss_mshr && write_mshr) {
1108 // We have one of each... normally we favor the miss request
1109 // unless the write buffer is full
1110 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1111 // Write buffer is full, so we'd like to issue a write;
1112 // need to search MSHR queue for conflicting earlier miss.
1113 MSHR *conflict_mshr =
1114 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1115
1116 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1117 // Service misses in order until conflict is cleared.
1118 return conflict_mshr;
1119 }
1120
1121 // No conflicts; issue write
1122 return write_mshr;
1123 }
1124
1125 // Write buffer isn't full, but need to check it for
1126 // conflicting earlier writeback
1127 MSHR *conflict_mshr =
1128 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1129 if (conflict_mshr) {
1130 // not sure why we don't check order here... it was in the
1131 // original code but commented out.
1132
1133 // The only way this happens is if we are
1134 // doing a write and we didn't have permissions
1135 // then subsequently saw a writeback (owned got evicted)
1136 // We need to make sure to perform the writeback first
1137 // To preserve the dirty data, then we can issue the write
1138
1139 // should we return write_mshr here instead? I.e. do we
1140 // have to flush writes in order? I don't think so... not
1141 // for Alpha anyway. Maybe for x86?
1142 return conflict_mshr;
1143 }
1144
1145 // No conflicts; issue read
1146 return miss_mshr;
1147 }
1148
1149 // fall through... no pending requests. Try a prefetch.
1150 assert(!miss_mshr && !write_mshr);
1151 if (!mshrQueue.isFull()) {
1152 // If we have a miss queue slot, we can try a prefetch
1153 PacketPtr pkt = prefetcher->getPacket();
1154 if (pkt) {
1155 // Update statistic on number of prefetches issued
1156 // (hwpf_mshr_misses)
1157 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
1158 // Don't request bus, since we already have it
1159 return allocateMissBuffer(pkt, curTick, false);
1160 }
1161 }
1162
1163 return NULL;
1164 }
1165
1166
1167 template<class TagStore>
1168 PacketPtr
1169 Cache<TagStore>::getTimingPacket()
1170 {
1171 MSHR *mshr = getNextMSHR();
1172
1173 if (mshr == NULL) {
1174 return NULL;
1175 }
1176
1177 // use request from 1st target
1178 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1179 PacketPtr pkt = NULL;
1180
1181 if (mshr->isSimpleForward()) {
1182 // no response expected, just forward packet as it is
1183 assert(tags->findBlock(mshr->addr) == NULL);
1184 pkt = tgt_pkt;
1185 } else {
1186 BlkType *blk = tags->findBlock(mshr->addr);
1187 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1188
1189 mshr->isCacheFill = (pkt != NULL);
1190
1191 if (pkt == NULL) {
1192 // not a cache block request, but a response is expected
1193 assert(!mshr->isSimpleForward());
1194 // make copy of current packet to forward, keep current
1195 // copy for response handling
1196 pkt = new Packet(tgt_pkt);
1197 pkt->allocate();
1198 if (pkt->isWrite()) {
1199 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1200 }
1201 }
1202 }
1203
1204 assert(pkt != NULL);
1205 pkt->senderState = mshr;
1206 return pkt;
1207 }
1208
1209
1210 ///////////////
1211 //
1212 // CpuSidePort
1213 //
1214 ///////////////
1215
1216 template<class TagStore>
1217 void
1218 Cache<TagStore>::CpuSidePort::
1219 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1220 {
1221 // CPU side port doesn't snoop; it's a target only.
1222 bool dummy;
1223 otherPort->getPeerAddressRanges(resp, dummy);
1224 snoop = false;
1225 }
1226
1227
1228 template<class TagStore>
1229 bool
1230 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1231 {
1232 // illegal to block responses... can lead to deadlock
1233 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1234 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1235 mustSendRetry = true;
1236 return false;
1237 }
1238
1239 myCache()->timingAccess(pkt);
1240 return true;
1241 }
1242
1243
1244 template<class TagStore>
1245 Tick
1246 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1247 {
1248 return myCache()->atomicAccess(pkt);
1249 }
1250
1251
1252 template<class TagStore>
1253 void
1254 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1255 {
1256 if (!checkFunctional(pkt)) {
1257 myCache()->functionalAccess(pkt, cache->memSidePort);
1258 }
1259 }
1260
1261
1262 template<class TagStore>
1263 Cache<TagStore>::
1264 CpuSidePort::CpuSidePort(const std::string &_name,
1265 Cache<TagStore> *_cache)
1266 : BaseCache::CachePort(_name, _cache)
1267 {
1268 }
1269
1270 ///////////////
1271 //
1272 // MemSidePort
1273 //
1274 ///////////////
1275
1276 template<class TagStore>
1277 void
1278 Cache<TagStore>::MemSidePort::
1279 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1280 {
1281 otherPort->getPeerAddressRanges(resp, snoop);
1282 // Memory-side port always snoops, so unconditionally set flag for
1283 // caller.
1284 snoop = true;
1285 }
1286
1287
1288 template<class TagStore>
1289 bool
1290 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1291 {
1292 // this needs to be fixed so that the cache updates the mshr and sends the
1293 // packet back out on the link, but it probably won't happen so until this
1294 // gets fixed, just panic when it does
1295 if (pkt->wasNacked())
1296 panic("Need to implement cache resending nacked packets!\n");
1297
1298 if (pkt->isRequest() && blocked) {
1299 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1300 mustSendRetry = true;
1301 return false;
1302 }
1303
1304 if (pkt->isResponse()) {
1305 myCache()->handleResponse(pkt);
1306 } else {
1307 myCache()->snoopTiming(pkt);
1308 }
1309 return true;
1310 }
1311
1312
1313 template<class TagStore>
1314 Tick
1315 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1316 {
1317 // in atomic mode, responses go back to the sender via the
1318 // function return from sendAtomic(), not via a separate
1319 // sendAtomic() from the responder. Thus we should never see a
1320 // response packet in recvAtomic() (anywhere, not just here).
1321 assert(!pkt->isResponse());
1322 return myCache()->snoopAtomic(pkt);
1323 }
1324
1325
1326 template<class TagStore>
1327 void
1328 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1329 {
1330 if (!checkFunctional(pkt)) {
1331 myCache()->functionalAccess(pkt, cache->cpuSidePort);
1332 }
1333 }
1334
1335
1336
1337 template<class TagStore>
1338 void
1339 Cache<TagStore>::MemSidePort::sendPacket()
1340 {
1341 // if we have responses that are ready, they take precedence
1342 if (deferredPacketReady()) {
1343 bool success = sendTiming(transmitList.front().pkt);
1344
1345 if (success) {
1346 //send successful, remove packet
1347 transmitList.pop_front();
1348 }
1349
1350 waitingOnRetry = !success;
1351 } else {
1352 // check for non-response packets (requests & writebacks)
1353 PacketPtr pkt = myCache()->getTimingPacket();
1354 if (pkt == NULL) {
1355 // can happen if e.g. we attempt a writeback and fail, but
1356 // before the retry, the writeback is eliminated because
1357 // we snoop another cache's ReadEx.
1358 waitingOnRetry = false;
1359 } else {
1360 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1361
1362 bool success = sendTiming(pkt);
1363 DPRINTF(CachePort,
1364 "Address %x was %s in sending the timing request\n",
1365 pkt->getAddr(), success ? "successful" : "unsuccessful");
1366
1367 waitingOnRetry = !success;
1368 if (waitingOnRetry) {
1369 DPRINTF(CachePort, "now waiting on a retry\n");
1370 if (!mshr->isSimpleForward()) {
1371 delete pkt;
1372 }
1373 } else {
1374 myCache()->markInService(mshr);
1375 }
1376 }
1377 }
1378
1379
1380 // tried to send packet... if it was successful (no retry), see if
1381 // we need to rerequest bus or not
1382 if (!waitingOnRetry) {
1383 Tick nextReady = std::min(deferredPacketReadyTime(),
1384 myCache()->nextMSHRReadyTime());
1385 // @TODO: need to facotr in prefetch requests here somehow
1386 if (nextReady != MaxTick) {
1387 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1388 sendEvent->schedule(std::max(nextReady, curTick + 1));
1389 } else {
1390 // no more to send right now: if we're draining, we may be done
1391 if (drainEvent) {
1392 drainEvent->process();
1393 drainEvent = NULL;
1394 }
1395 }
1396 }
1397 }
1398
1399 template<class TagStore>
1400 void
1401 Cache<TagStore>::MemSidePort::recvRetry()
1402 {
1403 assert(waitingOnRetry);
1404 sendPacket();
1405 }
1406
1407
1408 template<class TagStore>
1409 void
1410 Cache<TagStore>::MemSidePort::processSendEvent()
1411 {
1412 assert(!waitingOnRetry);
1413 sendPacket();
1414 }
1415
1416
1417 template<class TagStore>
1418 Cache<TagStore>::
1419 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache)
1420 : BaseCache::CachePort(_name, _cache)
1421 {
1422 // override default send event from SimpleTimingPort
1423 delete sendEvent;
1424 sendEvent = new SendEvent(this);
1425 }