2 * Copyright (c) 2010 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Erik Hallnor
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "mem/cache/blk.hh"
58 #include "mem/cache/cache.hh"
59 #include "mem/cache/mshr.hh"
60 #include "mem/cache/prefetch/base.hh"
61 #include "sim/sim_exit.hh"
63 template<class TagStore>
64 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
69 prefetchOnAccess(p->prefetch_on_access)
71 tempBlock = new BlkType();
72 tempBlock->data = new uint8_t[blkSize];
74 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
76 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
78 cpuSidePort->setOtherPort(memSidePort);
79 memSidePort->setOtherPort(cpuSidePort);
83 prefetcher->setCache(this);
86 template<class TagStore>
88 Cache<TagStore>::regStats()
90 BaseCache::regStats();
91 tags->regStats(name());
93 prefetcher->regStats(name());
96 template<class TagStore>
98 Cache<TagStore>::getPort(const std::string &if_name, int idx)
100 if (if_name == "" || if_name == "cpu_side") {
102 } else if (if_name == "mem_side") {
104 } else if (if_name == "functional") {
105 CpuSidePort *funcPort =
106 new CpuSidePort(name() + "-cpu_side_funcport", this,
108 funcPort->setOtherPort(memSidePort);
111 panic("Port name %s unrecognized\n", if_name);
115 template<class TagStore>
117 Cache<TagStore>::deletePortRefs(Port *p)
119 if (cpuSidePort == p || memSidePort == p)
120 panic("Can only delete functional ports\n");
126 template<class TagStore>
128 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
130 uint64_t overwrite_val;
132 uint64_t condition_val64;
133 uint32_t condition_val32;
135 int offset = tags->extractBlkOffset(pkt->getAddr());
136 uint8_t *blk_data = blk->data + offset;
138 assert(sizeof(uint64_t) >= pkt->getSize());
140 overwrite_mem = true;
141 // keep a copy of our possible write value, and copy what is at the
142 // memory address into the packet
143 pkt->writeData((uint8_t *)&overwrite_val);
144 pkt->setData(blk_data);
146 if (pkt->req->isCondSwap()) {
147 if (pkt->getSize() == sizeof(uint64_t)) {
148 condition_val64 = pkt->req->getExtraData();
149 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
151 } else if (pkt->getSize() == sizeof(uint32_t)) {
152 condition_val32 = (uint32_t)pkt->req->getExtraData();
153 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
156 panic("Invalid size for conditional read/write\n");
160 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
161 blk->status |= BlkDirty;
166 template<class TagStore>
168 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
169 bool deferred_response,
170 bool pending_downgrade)
172 assert(blk && blk->isValid());
173 // Occasionally this is not true... if we are a lower-level cache
174 // satisfying a string of Read and ReadEx requests from
175 // upper-level caches, a Read will mark the block as shared but we
176 // can satisfy a following ReadEx anyway since we can rely on the
177 // Read requester(s) to have buffered the ReadEx snoop and to
178 // invalidate their blocks after receiving them.
179 // assert(!pkt->needsExclusive() || blk->isWritable());
180 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
182 // Check RMW operations first since both isRead() and
183 // isWrite() will be true for them
184 if (pkt->cmd == MemCmd::SwapReq) {
185 cmpAndSwap(blk, pkt);
186 } else if (pkt->isWrite()) {
187 if (blk->checkWrite(pkt)) {
188 pkt->writeDataToBlock(blk->data, blkSize);
189 blk->status |= BlkDirty;
191 } else if (pkt->isRead()) {
193 blk->trackLoadLocked(pkt);
195 pkt->setDataFromBlock(blk->data, blkSize);
196 if (pkt->getSize() == blkSize) {
197 // special handling for coherent block requests from
198 // upper-level caches
199 if (pkt->needsExclusive()) {
200 // if we have a dirty copy, make sure the recipient
201 // keeps it marked dirty
202 if (blk->isDirty()) {
203 pkt->assertMemInhibit();
205 // on ReadExReq we give up our copy unconditionally
206 tags->invalidateBlk(blk);
207 } else if (blk->isWritable() && !pending_downgrade
208 && !pkt->sharedAsserted()) {
209 // we can give the requester an exclusive copy (by not
210 // asserting shared line) on a read request if:
211 // - we have an exclusive copy at this level (& below)
212 // - we don't have a pending snoop from below
213 // signaling another read request
214 // - no other cache above has a copy (otherwise it
215 // would have asseretd shared line on request)
217 if (blk->isDirty()) {
218 // special considerations if we're owner:
219 if (!deferred_response) {
220 // if we are responding immediately and can
221 // signal that we're transferring ownership
222 // along with exclusivity, do so
223 pkt->assertMemInhibit();
224 blk->status &= ~BlkDirty;
226 // if we're responding after our own miss,
227 // there's a window where the recipient didn't
228 // know it was getting ownership and may not
229 // have responded to snoops correctly, so we
230 // can't pass off ownership *or* exclusivity
235 // otherwise only respond with a shared copy
240 // Not a read or write... must be an upgrade. it's OK
241 // to just ack those as long as we have an exclusive
242 // copy at this level.
243 assert(pkt->isUpgrade());
244 tags->invalidateBlk(blk);
249 /////////////////////////////////////////////////////
251 // MSHR helper functions
253 /////////////////////////////////////////////////////
256 template<class TagStore>
258 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
260 markInServiceInternal(mshr, pkt);
262 if (mshr->originalCmd == MemCmd::HardPFReq) {
263 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
265 //Also clear pending if need be
266 if (!prefetcher->havePending())
268 deassertMemSideBusRequest(Request_PF);
275 template<class TagStore>
277 Cache<TagStore>::squash(int threadNum)
279 bool unblock = false;
280 BlockedCause cause = NUM_BLOCKED_CAUSES;
282 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
285 cause = Blocked_NoTargets;
287 if (mshrQueue.isFull()) {
289 cause = Blocked_NoMSHRs;
291 mshrQueue.squash(threadNum);
292 if (unblock && !mshrQueue.isFull()) {
297 /////////////////////////////////////////////////////
299 // Access path: requests coming in from the CPU side
301 /////////////////////////////////////////////////////
303 template<class TagStore>
305 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
306 int &lat, PacketList &writebacks)
308 if (pkt->req->isUncacheable()) {
309 if (pkt->req->isClrex()) {
312 blk = tags->findBlock(pkt->getAddr());
314 tags->invalidateBlk(blk);
323 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
324 blk = tags->accessBlock(pkt->getAddr(), lat, id);
326 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
327 pkt->req->isInstFetch() ? " (ifetch)" : "",
328 pkt->getAddr(), (blk) ? "hit" : "miss");
332 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
333 // OK to satisfy access
334 incHitCount(pkt, id);
335 satisfyCpuSideRequest(pkt, blk);
340 // Can't satisfy access normally... either no block (blk == NULL)
341 // or have block but need exclusive & only have shared.
343 // Writeback handling is special case. We can write the block
344 // into the cache without having a writeable copy (or any copy at
346 if (pkt->cmd == MemCmd::Writeback) {
347 assert(blkSize == pkt->getSize());
349 // need to do a replacement
350 blk = allocateBlock(pkt->getAddr(), writebacks);
352 // no replaceable block available, give up.
353 // writeback will be forwarded to next level.
354 incMissCount(pkt, id);
357 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
358 tags->insertBlock(pkt->getAddr(), blk, id);
359 blk->status = BlkValid | BlkReadable;
361 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
362 blk->status |= BlkDirty;
363 // nothing else to do; writeback doesn't expect response
364 assert(!pkt->needsResponse());
365 incHitCount(pkt, id);
369 incMissCount(pkt, id);
371 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
372 // complete miss on store conditional... just give up now
373 pkt->req->setExtraData(0);
381 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
383 Packet::SenderState *prevSenderState;
389 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
390 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
395 void restore(Packet *pkt, BaseCache *_cache)
397 assert(_cache == cache);
398 pkt->senderState = prevSenderState;
399 pkt->setDest(prevSrc);
404 template<class TagStore>
406 Cache<TagStore>::timingAccess(PacketPtr pkt)
408 //@todo Add back in MemDebug Calls
409 // MemDebug::cacheAccess(pkt);
411 // we charge hitLatency for doing just about anything here
412 Tick time = curTick + hitLatency;
414 if (pkt->isResponse()) {
415 // must be cache-to-cache response from upper to lower level
416 ForwardResponseRecord *rec =
417 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
419 rec->restore(pkt, this);
421 memSidePort->respond(pkt, time);
425 assert(pkt->isRequest());
427 if (pkt->memInhibitAsserted()) {
428 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
430 assert(!pkt->req->isUncacheable());
431 // Special tweak for multilevel coherence: snoop downward here
432 // on invalidates since there may be other caches below here
433 // that have shared copies. Not necessary if we know that
434 // supplier had exclusive copy to begin with.
435 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
436 Packet *snoopPkt = new Packet(pkt, true); // clear flags
437 snoopPkt->setExpressSnoop();
438 snoopPkt->assertMemInhibit();
439 memSidePort->sendTiming(snoopPkt);
440 // main memory will delete snoopPkt
442 // since we're the official target but we aren't responding,
443 // delete the packet now.
448 if (pkt->req->isUncacheable()) {
449 if (pkt->req->isClrex()) {
452 BlkType *blk = tags->findBlock(pkt->getAddr());
454 tags->invalidateBlk(blk);
458 // writes go in write buffer, reads use MSHR
459 if (pkt->isWrite() && !pkt->isRead()) {
460 allocateWriteBuffer(pkt, time, true);
462 allocateUncachedReadBuffer(pkt, time, true);
464 assert(pkt->needsResponse()); // else we should delete it here??
468 int lat = hitLatency;
470 PacketList writebacks;
472 bool satisfied = access(pkt, blk, lat, writebacks);
475 /** @todo make the fast write alloc (wh64) work with coherence. */
477 // If this is a block size write/hint (WH64) allocate the block here
478 // if the coherence protocol allows it.
479 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
480 (pkt->cmd == MemCmd::WriteReq
481 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
482 // not outstanding misses, can do this
483 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
484 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
485 if (outstanding_miss) {
486 warn("WriteInv doing a fastallocate"
487 "with an outstanding miss to the same address\n");
489 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
496 // track time of availability of next prefetch, if any
497 Tick next_pf_time = 0;
499 bool needsResponse = pkt->needsResponse();
503 pkt->makeTimingResponse();
504 cpuSidePort->respond(pkt, curTick+lat);
509 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
511 blk->status &= ~BlkHWPrefetched;
512 next_pf_time = prefetcher->notify(pkt, time);
517 Addr blk_addr = blockAlign(pkt->getAddr());
518 MSHR *mshr = mshrQueue.findMatch(blk_addr);
522 //@todo remove hw_pf here
523 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
524 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
525 mshr->threadNum = -1;
527 mshr->allocateTarget(pkt, time, order++);
528 if (mshr->getNumTargets() == numTarget) {
530 setBlocked(Blocked_NoTargets);
531 // need to be careful with this... if this mshr isn't
532 // ready yet (i.e. time > curTick_, we don't want to
533 // move it ahead of mshrs that are ready
534 // mshrQueue.moveToFront(mshr);
538 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
539 // always mark as cache fill for now... if we implement
540 // no-write-allocate or bypass accesses this will have to
542 if (pkt->cmd == MemCmd::Writeback) {
543 allocateWriteBuffer(pkt, time, true);
545 if (blk && blk->isValid()) {
546 // If we have a write miss to a valid block, we
547 // need to mark the block non-readable. Otherwise
548 // if we allow reads while there's an outstanding
549 // write miss, the read could return stale data
550 // out of the cache block... a more aggressive
551 // system could detect the overlap (if any) and
552 // forward data out of the MSHRs, but we don't do
553 // that yet. Note that we do need to leave the
554 // block valid so that it stays in the cache, in
555 // case we get an upgrade response (and hence no
556 // new data) when the write miss completes.
557 // As long as CPUs do proper store/load forwarding
558 // internally, and have a sufficiently weak memory
559 // model, this is probably unnecessary, but at some
560 // point it must have seemed like we needed it...
561 assert(pkt->needsExclusive() && !blk->isWritable());
562 blk->status &= ~BlkReadable;
565 allocateMissBuffer(pkt, time, true);
569 next_pf_time = prefetcher->notify(pkt, time);
574 if (next_pf_time != 0)
575 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
577 // copy writebacks to write buffer
578 while (!writebacks.empty()) {
579 PacketPtr wbPkt = writebacks.front();
580 allocateWriteBuffer(wbPkt, time, true);
581 writebacks.pop_front();
588 // See comment in cache.hh.
589 template<class TagStore>
591 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
594 bool blkValid = blk && blk->isValid();
596 if (cpu_pkt->req->isUncacheable()) {
597 //assert(blk == NULL);
602 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
603 // Writebacks that weren't allocated in access() and upgrades
604 // from upper-level caches that missed completely just go
609 assert(cpu_pkt->needsResponse());
612 // @TODO make useUpgrades a parameter.
613 // Note that ownership protocols require upgrade, otherwise a
614 // write miss on a shared owned block will generate a ReadExcl,
615 // which will clobber the owned copy.
616 const bool useUpgrades = true;
617 if (blkValid && useUpgrades) {
618 // only reason to be here is that blk is shared
619 // (read-only) and we need exclusive
620 assert(needsExclusive && !blk->isWritable());
621 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
624 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
626 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
633 template<class TagStore>
635 Cache<TagStore>::atomicAccess(PacketPtr pkt)
637 int lat = hitLatency;
639 // @TODO: make this a parameter
640 bool last_level_cache = false;
642 if (pkt->memInhibitAsserted()) {
643 assert(!pkt->req->isUncacheable());
644 // have to invalidate ourselves and any lower caches even if
645 // upper cache will be responding
646 if (pkt->isInvalidate()) {
647 BlkType *blk = tags->findBlock(pkt->getAddr());
648 if (blk && blk->isValid()) {
649 tags->invalidateBlk(blk);
650 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
651 pkt->cmdString(), pkt->getAddr());
653 if (!last_level_cache) {
654 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
655 pkt->cmdString(), pkt->getAddr());
656 lat += memSidePort->sendAtomic(pkt);
659 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
660 pkt->cmdString(), pkt->getAddr());
666 // should assert here that there are no outstanding MSHRs or
667 // writebacks... that would mean that someone used an atomic
668 // access in timing mode
671 PacketList writebacks;
673 if (!access(pkt, blk, lat, writebacks)) {
675 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
677 bool is_forward = (bus_pkt == NULL);
680 // just forwarding the same request to the next level
681 // no local cache operation involved
685 DPRINTF(Cache, "Sending an atomic %s for %x\n",
686 bus_pkt->cmdString(), bus_pkt->getAddr());
689 CacheBlk::State old_state = blk ? blk->status : 0;
692 lat += memSidePort->sendAtomic(bus_pkt);
694 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
695 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
697 assert(!bus_pkt->wasNacked());
699 // If packet was a forward, the response (if any) is already
700 // in place in the bus_pkt == pkt structure, so we don't need
701 // to do anything. Otherwise, use the separate bus_pkt to
702 // generate response to pkt and then delete it.
704 if (pkt->needsResponse()) {
705 assert(bus_pkt->isResponse());
706 if (bus_pkt->isError()) {
707 pkt->makeAtomicResponse();
708 pkt->copyError(bus_pkt);
709 } else if (bus_pkt->isRead() ||
710 bus_pkt->cmd == MemCmd::UpgradeResp) {
711 // we're updating cache state to allow us to
712 // satisfy the upstream request from the cache
713 blk = handleFill(bus_pkt, blk, writebacks);
714 satisfyCpuSideRequest(pkt, blk);
716 // we're satisfying the upstream request without
717 // modifying cache state, e.g., a write-through
718 pkt->makeAtomicResponse();
725 // Note that we don't invoke the prefetcher at all in atomic mode.
726 // It's not clear how to do it properly, particularly for
727 // prefetchers that aggressively generate prefetch candidates and
728 // rely on bandwidth contention to throttle them; these will tend
729 // to pollute the cache in atomic mode since there is no bandwidth
730 // contention. If we ever do want to enable prefetching in atomic
731 // mode, though, this is the place to do it... see timingAccess()
732 // for an example (though we'd want to issue the prefetch(es)
733 // immediately rather than calling requestMemSideBus() as we do
736 // Handle writebacks if needed
737 while (!writebacks.empty()){
738 PacketPtr wbPkt = writebacks.front();
739 memSidePort->sendAtomic(wbPkt);
740 writebacks.pop_front();
744 // We now have the block one way or another (hit or completed miss)
746 if (pkt->needsResponse()) {
747 pkt->makeAtomicResponse();
754 template<class TagStore>
756 Cache<TagStore>::functionalAccess(PacketPtr pkt,
757 CachePort *incomingPort,
758 CachePort *otherSidePort)
760 Addr blk_addr = blockAlign(pkt->getAddr());
761 BlkType *blk = tags->findBlock(pkt->getAddr());
763 pkt->pushLabel(name());
765 CacheBlkPrintWrapper cbpw(blk);
767 (blk && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data))
768 || incomingPort->checkFunctional(pkt)
769 || mshrQueue.checkFunctional(pkt, blk_addr)
770 || writeBuffer.checkFunctional(pkt, blk_addr)
771 || otherSidePort->checkFunctional(pkt);
773 // We're leaving the cache, so pop cache->name() label
777 otherSidePort->sendFunctional(pkt);
782 /////////////////////////////////////////////////////
784 // Response handling: responses from the memory side
786 /////////////////////////////////////////////////////
789 template<class TagStore>
791 Cache<TagStore>::handleResponse(PacketPtr pkt)
793 Tick time = curTick + hitLatency;
794 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
795 bool is_error = pkt->isError();
799 if (pkt->wasNacked()) {
800 //pkt->reinitFromRequest();
801 warn("NACKs from devices not connected to the same bus "
802 "not implemented\n");
806 DPRINTF(Cache, "Cache received packet with error for address %x, "
807 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
810 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
812 MSHRQueue *mq = mshr->queue;
813 bool wasFull = mq->isFull();
815 if (mshr == noTargetMSHR) {
816 // we always clear at least one target
817 clearBlocked(Blocked_NoTargets);
821 // Initial target is used just for stats
822 MSHR::Target *initial_tgt = mshr->getTarget();
823 BlkType *blk = tags->findBlock(pkt->getAddr());
824 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
825 Tick miss_latency = curTick - initial_tgt->recvTime;
826 PacketList writebacks;
828 if (pkt->req->isUncacheable()) {
829 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
832 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
836 bool is_fill = !mshr->isForward &&
837 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
839 if (is_fill && !is_error) {
840 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
843 // give mshr a chance to do some dirty work
844 mshr->handleFill(pkt, blk);
846 blk = handleFill(pkt, blk, writebacks);
850 // First offset for critical word first calculations
851 int initial_offset = 0;
853 if (mshr->hasTargets()) {
854 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
857 while (mshr->hasTargets()) {
858 MSHR::Target *target = mshr->getTarget();
860 switch (target->source) {
861 case MSHR::Target::FromCPU:
862 Tick completion_time;
864 satisfyCpuSideRequest(target->pkt, blk,
865 true, mshr->hasPostDowngrade());
866 // How many bytes past the first request is this one
867 int transfer_offset =
868 target->pkt->getOffset(blkSize) - initial_offset;
869 if (transfer_offset < 0) {
870 transfer_offset += blkSize;
873 // If critical word (no offset) return first word time
874 completion_time = tags->getHitLatency() +
875 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
877 assert(!target->pkt->req->isUncacheable());
878 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
879 completion_time - target->recvTime;
880 } else if (target->pkt->cmd == MemCmd::StoreCondReq &&
881 pkt->cmd == MemCmd::UpgradeFailResp) {
882 // failed StoreCond upgrade
883 completion_time = tags->getHitLatency() + pkt->finishTime;
884 target->pkt->req->setExtraData(0);
886 // not a cache fill, just forwarding response
887 completion_time = tags->getHitLatency() + pkt->finishTime;
888 if (pkt->isRead() && !is_error) {
889 target->pkt->setData(pkt->getPtr<uint8_t>());
892 target->pkt->makeTimingResponse();
893 // if this packet is an error copy that to the new packet
895 target->pkt->copyError(pkt);
896 if (target->pkt->cmd == MemCmd::ReadResp &&
897 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
898 // If intermediate cache got ReadRespWithInvalidate,
899 // propagate that. Response should not have
900 // isInvalidate() set otherwise.
901 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
903 cpuSidePort->respond(target->pkt, completion_time);
906 case MSHR::Target::FromPrefetcher:
907 assert(target->pkt->cmd == MemCmd::HardPFReq);
909 blk->status |= BlkHWPrefetched;
910 delete target->pkt->req;
914 case MSHR::Target::FromSnoop:
915 // I don't believe that a snoop can be in an error state
917 // response to snoop request
918 DPRINTF(Cache, "processing deferred snoop...\n");
919 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
920 handleSnoop(target->pkt, blk, true, true,
921 mshr->hasPostInvalidate());
925 panic("Illegal target->source enum %d\n", target->source);
932 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
933 tags->invalidateBlk(blk);
934 } else if (mshr->hasPostDowngrade()) {
935 blk->status &= ~BlkWritable;
939 if (mshr->promoteDeferredTargets()) {
940 // avoid later read getting stale data while write miss is
941 // outstanding.. see comment in timingAccess()
943 blk->status &= ~BlkReadable;
945 MSHRQueue *mq = mshr->queue;
946 mq->markPending(mshr);
947 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
949 mq->deallocate(mshr);
950 if (wasFull && !mq->isFull()) {
951 clearBlocked((BlockedCause)mq->index);
955 // copy writebacks to write buffer
956 while (!writebacks.empty()) {
957 PacketPtr wbPkt = writebacks.front();
958 allocateWriteBuffer(wbPkt, time, true);
959 writebacks.pop_front();
961 // if we used temp block, clear it out
962 if (blk == tempBlock) {
963 if (blk->isDirty()) {
964 allocateWriteBuffer(writebackBlk(blk), time, true);
966 tags->invalidateBlk(blk);
975 template<class TagStore>
977 Cache<TagStore>::writebackBlk(BlkType *blk)
979 assert(blk && blk->isValid() && blk->isDirty());
981 writebacks[0/*pkt->req->threadId()*/]++;
983 Request *writebackReq =
984 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
985 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
986 writeback->allocate();
987 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
989 blk->status &= ~BlkDirty;
994 template<class TagStore>
995 typename Cache<TagStore>::BlkType*
996 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
998 BlkType *blk = tags->findVictim(addr, writebacks);
1000 if (blk->isValid()) {
1001 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1002 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1004 // must be an outstanding upgrade request on block
1005 // we're about to replace...
1006 assert(!blk->isWritable());
1007 assert(repl_mshr->needsExclusive());
1008 // too hard to replace block with transient state
1009 // allocation failed, block not inserted
1012 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1014 blk->isDirty() ? "writeback" : "clean");
1016 if (blk->isDirty()) {
1017 // Save writeback packet for handling by caller
1018 writebacks.push_back(writebackBlk(blk));
1027 // Note that the reason we return a list of writebacks rather than
1028 // inserting them directly in the write buffer is that this function
1029 // is called by both atomic and timing-mode accesses, and in atomic
1030 // mode we don't mess with the write buffer (we just perform the
1031 // writebacks atomically once the original request is complete).
1032 template<class TagStore>
1033 typename Cache<TagStore>::BlkType*
1034 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1035 PacketList &writebacks)
1037 Addr addr = pkt->getAddr();
1039 CacheBlk::State old_state = blk ? blk->status : 0;
1043 // better have read new data...
1044 assert(pkt->hasData());
1045 // need to do a replacement
1046 blk = allocateBlock(addr, writebacks);
1048 // No replaceable block... just use temporary storage to
1049 // complete the current request and then get rid of it
1050 assert(!tempBlock->isValid());
1052 tempBlock->set = tags->extractSet(addr);
1053 tempBlock->tag = tags->extractTag(addr);
1054 DPRINTF(Cache, "using temp block for %x\n", addr);
1056 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
1057 tags->insertBlock(pkt->getAddr(), blk, id);
1060 // starting from scratch with a new block
1063 // existing block... probably an upgrade
1064 assert(blk->tag == tags->extractTag(addr));
1065 // either we're getting new data or the block should already be valid
1066 assert(pkt->hasData() || blk->isValid());
1067 // don't clear block status... if block is already dirty we
1068 // don't want to lose that
1071 blk->status |= BlkValid | BlkReadable;
1073 if (!pkt->sharedAsserted()) {
1074 blk->status |= BlkWritable;
1075 // If we got this via cache-to-cache transfer (i.e., from a
1076 // cache that was an owner) and took away that owner's copy,
1077 // then we need to write it back. Normally this happens
1078 // anyway as a side effect of getting a copy to write it, but
1079 // there are cases (such as failed store conditionals or
1080 // compare-and-swaps) where we'll demand an exclusive copy but
1081 // end up not writing it.
1082 if (pkt->memInhibitAsserted())
1083 blk->status |= BlkDirty;
1086 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1087 addr, old_state, blk->status);
1089 // if we got new data, copy it in
1090 if (pkt->isRead()) {
1091 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1094 blk->whenReady = pkt->finishTime;
1100 /////////////////////////////////////////////////////
1102 // Snoop path: requests coming in from the memory side
1104 /////////////////////////////////////////////////////
1106 template<class TagStore>
1109 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1110 bool already_copied, bool pending_inval)
1112 // timing-mode snoop responses require a new packet, unless we
1113 // already made a copy...
1114 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1115 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1117 pkt->makeTimingResponse();
1118 if (pkt->isRead()) {
1119 pkt->setDataFromBlock(blk_data, blkSize);
1121 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1122 // Assume we defer a response to a read from a far-away cache
1123 // A, then later defer a ReadExcl from a cache B on the same
1124 // bus as us. We'll assert MemInhibit in both cases, but in
1125 // the latter case MemInhibit will keep the invalidation from
1126 // reaching cache A. This special response tells cache A that
1127 // it gets the block to satisfy its read, but must immediately
1129 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1131 memSidePort->respond(pkt, curTick + hitLatency);
1134 template<class TagStore>
1136 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1137 bool is_timing, bool is_deferred,
1140 // deferred snoops can only happen in timing mode
1141 assert(!(is_deferred && !is_timing));
1142 // pending_inval only makes sense on deferred snoops
1143 assert(!(pending_inval && !is_deferred));
1144 assert(pkt->isRequest());
1146 // the packet may get modified if we or a forwarded snooper
1147 // responds in atomic mode, so remember a few things about the
1148 // original packet up front
1149 bool invalidate = pkt->isInvalidate();
1150 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1152 if (forwardSnoops) {
1153 // first propagate snoop upward to see if anyone above us wants to
1154 // handle it. save & restore packet src since it will get
1155 // rewritten to be relative to cpu-side bus (if any)
1156 bool alreadyResponded = pkt->memInhibitAsserted();
1158 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1159 snoopPkt->setExpressSnoop();
1160 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1161 cpuSidePort->sendTiming(snoopPkt);
1162 if (snoopPkt->memInhibitAsserted()) {
1163 // cache-to-cache response from some upper cache
1164 assert(!alreadyResponded);
1165 pkt->assertMemInhibit();
1167 delete snoopPkt->senderState;
1169 if (snoopPkt->sharedAsserted()) {
1170 pkt->assertShared();
1174 int origSrc = pkt->getSrc();
1175 cpuSidePort->sendAtomic(pkt);
1176 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1177 // cache-to-cache response from some upper cache:
1178 // forward response to original requester
1179 assert(pkt->isResponse());
1181 pkt->setSrc(origSrc);
1185 if (!blk || !blk->isValid()) {
1189 // we may end up modifying both the block state and the packet (if
1190 // we respond in atomic mode), so just figure out what to do now
1191 // and then do it later
1192 bool respond = blk->isDirty() && pkt->needsResponse();
1193 bool have_exclusive = blk->isWritable();
1195 if (pkt->isRead() && !invalidate) {
1196 assert(!needs_exclusive);
1197 pkt->assertShared();
1198 int bits_to_clear = BlkWritable;
1199 const bool haveOwnershipState = true; // for now
1200 if (!haveOwnershipState) {
1201 // if we don't support pure ownership (dirty && !writable),
1202 // have to clear dirty bit here, assume memory snarfs data
1203 // on cache-to-cache xfer
1204 bits_to_clear |= BlkDirty;
1206 blk->status &= ~bits_to_clear;
1209 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1210 pkt->cmdString(), blockAlign(pkt->getAddr()),
1211 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1214 assert(!pkt->memInhibitAsserted());
1215 pkt->assertMemInhibit();
1216 if (have_exclusive) {
1217 pkt->setSupplyExclusive();
1220 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1222 pkt->makeAtomicResponse();
1223 pkt->setDataFromBlock(blk->data, blkSize);
1225 } else if (is_timing && is_deferred) {
1226 // if it's a deferred timing snoop then we've made a copy of
1227 // the packet, and so if we're not using that copy to respond
1228 // then we need to delete it here.
1232 // Do this last in case it deallocates block data or something
1235 tags->invalidateBlk(blk);
1240 template<class TagStore>
1242 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1244 // Note that some deferred snoops don't have requests, since the
1245 // original access may have already completed
1246 if ((pkt->req && pkt->req->isUncacheable()) ||
1247 pkt->cmd == MemCmd::Writeback) {
1248 //Can't get a hit on an uncacheable address
1249 //Revisit this for multi level coherence
1253 BlkType *blk = tags->findBlock(pkt->getAddr());
1255 Addr blk_addr = blockAlign(pkt->getAddr());
1256 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1258 // Let the MSHR itself track the snoop and decide whether we want
1259 // to go ahead and do the regular cache snoop
1260 if (mshr && mshr->handleSnoop(pkt, order++)) {
1261 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1263 if (mshr->getNumTargets() > numTarget)
1264 warn("allocating bonus target for snoop"); //handle later
1268 //We also need to check the writeback buffers and handle those
1269 std::vector<MSHR *> writebacks;
1270 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1271 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1274 //Look through writebacks for any non-uncachable writes, use that
1275 for (int i = 0; i < writebacks.size(); i++) {
1276 mshr = writebacks[i];
1277 assert(!mshr->isUncacheable());
1278 assert(mshr->getNumTargets() == 1);
1279 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1280 assert(wb_pkt->cmd == MemCmd::Writeback);
1282 assert(!pkt->memInhibitAsserted());
1283 pkt->assertMemInhibit();
1284 if (!pkt->needsExclusive()) {
1285 pkt->assertShared();
1287 // if we're not asserting the shared line, we need to
1288 // invalidate our copy. we'll do that below as long as
1289 // the packet's invalidate flag is set...
1290 assert(pkt->isInvalidate());
1292 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1295 if (pkt->isInvalidate()) {
1296 // Invalidation trumps our writeback... discard here
1297 markInService(mshr);
1301 // If this was a shared writeback, there may still be
1302 // other shared copies above that require invalidation.
1303 // We could be more selective and return here if the
1304 // request is non-exclusive or if the writeback is
1310 handleSnoop(pkt, blk, true, false, false);
1314 template<class TagStore>
1316 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1318 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1319 // Can't get a hit on an uncacheable address
1320 // Revisit this for multi level coherence
1324 BlkType *blk = tags->findBlock(pkt->getAddr());
1325 handleSnoop(pkt, blk, false, false, false);
1330 template<class TagStore>
1332 Cache<TagStore>::getNextMSHR()
1334 // Check both MSHR queue and write buffer for potential requests
1335 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1336 MSHR *write_mshr = writeBuffer.getNextMSHR();
1338 // Now figure out which one to send... some cases are easy
1339 if (miss_mshr && !write_mshr) {
1342 if (write_mshr && !miss_mshr) {
1346 if (miss_mshr && write_mshr) {
1347 // We have one of each... normally we favor the miss request
1348 // unless the write buffer is full
1349 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1350 // Write buffer is full, so we'd like to issue a write;
1351 // need to search MSHR queue for conflicting earlier miss.
1352 MSHR *conflict_mshr =
1353 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1355 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1356 // Service misses in order until conflict is cleared.
1357 return conflict_mshr;
1360 // No conflicts; issue write
1364 // Write buffer isn't full, but need to check it for
1365 // conflicting earlier writeback
1366 MSHR *conflict_mshr =
1367 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1368 if (conflict_mshr) {
1369 // not sure why we don't check order here... it was in the
1370 // original code but commented out.
1372 // The only way this happens is if we are
1373 // doing a write and we didn't have permissions
1374 // then subsequently saw a writeback (owned got evicted)
1375 // We need to make sure to perform the writeback first
1376 // To preserve the dirty data, then we can issue the write
1378 // should we return write_mshr here instead? I.e. do we
1379 // have to flush writes in order? I don't think so... not
1380 // for Alpha anyway. Maybe for x86?
1381 return conflict_mshr;
1384 // No conflicts; issue read
1388 // fall through... no pending requests. Try a prefetch.
1389 assert(!miss_mshr && !write_mshr);
1390 if (prefetcher && !mshrQueue.isFull()) {
1391 // If we have a miss queue slot, we can try a prefetch
1392 PacketPtr pkt = prefetcher->getPacket();
1394 Addr pf_addr = blockAlign(pkt->getAddr());
1395 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) {
1396 // Update statistic on number of prefetches issued
1397 // (hwpf_mshr_misses)
1398 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1399 // Don't request bus, since we already have it
1400 return allocateMissBuffer(pkt, curTick, false);
1409 template<class TagStore>
1411 Cache<TagStore>::getTimingPacket()
1413 MSHR *mshr = getNextMSHR();
1419 // use request from 1st target
1420 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1421 PacketPtr pkt = NULL;
1423 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq) {
1424 // SCUpgradeReq saw invalidation while queued in MSHR, so now
1425 // that we are getting around to processing it, just treat it
1426 // as if we got a failure response
1427 pkt = new Packet(tgt_pkt);
1428 pkt->cmd = MemCmd::UpgradeFailResp;
1429 pkt->senderState = mshr;
1430 pkt->firstWordTime = pkt->finishTime = curTick;
1431 handleResponse(pkt);
1433 } else if (mshr->isForwardNoResponse()) {
1434 // no response expected, just forward packet as it is
1435 assert(tags->findBlock(mshr->addr) == NULL);
1438 BlkType *blk = tags->findBlock(mshr->addr);
1439 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1441 mshr->isForward = (pkt == NULL);
1443 if (mshr->isForward) {
1444 // not a cache block request, but a response is expected
1445 // make copy of current packet to forward, keep current
1446 // copy for response handling
1447 pkt = new Packet(tgt_pkt);
1449 if (pkt->isWrite()) {
1450 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1455 assert(pkt != NULL);
1456 pkt->senderState = mshr;
1461 template<class TagStore>
1463 Cache<TagStore>::nextMSHRReadyTime()
1465 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1466 writeBuffer.nextMSHRReadyTime());
1469 nextReady = std::min(nextReady,
1470 prefetcher->nextPrefetchReadyTime());
1483 template<class TagStore>
1485 Cache<TagStore>::CpuSidePort::
1486 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1488 // CPU side port doesn't snoop; it's a target only. It can
1489 // potentially respond to any address.
1491 resp.push_back(myCache()->getAddrRange());
1495 template<class TagStore>
1497 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1499 // illegal to block responses... can lead to deadlock
1500 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1501 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1502 mustSendRetry = true;
1506 myCache()->timingAccess(pkt);
1511 template<class TagStore>
1513 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1515 return myCache()->atomicAccess(pkt);
1519 template<class TagStore>
1521 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1523 myCache()->functionalAccess(pkt, this, otherPort);
1527 template<class TagStore>
1529 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1530 const std::string &_label)
1531 : BaseCache::CachePort(_name, _cache, _label)
1541 template<class TagStore>
1543 Cache<TagStore>::MemSidePort::
1544 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1546 // Memory-side port always snoops, but never passes requests
1547 // through to targets on the cpu side (so we don't add anything to
1548 // the address range list).
1553 template<class TagStore>
1555 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1557 // this needs to be fixed so that the cache updates the mshr and sends the
1558 // packet back out on the link, but it probably won't happen so until this
1559 // gets fixed, just panic when it does
1560 if (pkt->wasNacked())
1561 panic("Need to implement cache resending nacked packets!\n");
1563 if (pkt->isRequest() && blocked) {
1564 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1565 mustSendRetry = true;
1569 if (pkt->isResponse()) {
1570 myCache()->handleResponse(pkt);
1572 myCache()->snoopTiming(pkt);
1578 template<class TagStore>
1580 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1582 // in atomic mode, responses go back to the sender via the
1583 // function return from sendAtomic(), not via a separate
1584 // sendAtomic() from the responder. Thus we should never see a
1585 // response packet in recvAtomic() (anywhere, not just here).
1586 assert(!pkt->isResponse());
1587 return myCache()->snoopAtomic(pkt);
1591 template<class TagStore>
1593 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1595 myCache()->functionalAccess(pkt, this, otherPort);
1600 template<class TagStore>
1602 Cache<TagStore>::MemSidePort::sendPacket()
1604 // if we have responses that are ready, they take precedence
1605 if (deferredPacketReady()) {
1606 bool success = sendTiming(transmitList.front().pkt);
1609 //send successful, remove packet
1610 transmitList.pop_front();
1613 waitingOnRetry = !success;
1615 // check for non-response packets (requests & writebacks)
1616 PacketPtr pkt = myCache()->getTimingPacket();
1618 // can happen if e.g. we attempt a writeback and fail, but
1619 // before the retry, the writeback is eliminated because
1620 // we snoop another cache's ReadEx.
1621 waitingOnRetry = false;
1623 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1625 bool success = sendTiming(pkt);
1627 waitingOnRetry = !success;
1628 if (waitingOnRetry) {
1629 DPRINTF(CachePort, "now waiting on a retry\n");
1630 if (!mshr->isForwardNoResponse()) {
1634 myCache()->markInService(mshr, pkt);
1640 // tried to send packet... if it was successful (no retry), see if
1641 // we need to rerequest bus or not
1642 if (!waitingOnRetry) {
1643 Tick nextReady = std::min(deferredPacketReadyTime(),
1644 myCache()->nextMSHRReadyTime());
1645 // @TODO: need to facotr in prefetch requests here somehow
1646 if (nextReady != MaxTick) {
1647 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1648 schedule(sendEvent, std::max(nextReady, curTick + 1));
1650 // no more to send right now: if we're draining, we may be done
1651 if (drainEvent && !sendEvent->scheduled()) {
1652 drainEvent->process();
1659 template<class TagStore>
1661 Cache<TagStore>::MemSidePort::recvRetry()
1663 assert(waitingOnRetry);
1668 template<class TagStore>
1670 Cache<TagStore>::MemSidePort::processSendEvent()
1672 assert(!waitingOnRetry);
1677 template<class TagStore>
1679 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1680 const std::string &_label)
1681 : BaseCache::CachePort(_name, _cache, _label)
1683 // override default send event from SimpleTimingPort
1685 sendEvent = new SendEvent(this);