2 * Copyright (c) 2010 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Erik Hallnor
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "mem/cache/blk.hh"
58 #include "mem/cache/cache.hh"
59 #include "mem/cache/mshr.hh"
60 #include "mem/cache/prefetch/base.hh"
61 #include "sim/sim_exit.hh"
63 template<class TagStore>
64 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
69 prefetchOnAccess(p->prefetch_on_access)
71 tempBlock = new BlkType();
72 tempBlock->data = new uint8_t[blkSize];
74 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
76 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
78 cpuSidePort->setOtherPort(memSidePort);
79 memSidePort->setOtherPort(cpuSidePort);
83 prefetcher->setCache(this);
86 template<class TagStore>
88 Cache<TagStore>::regStats()
90 BaseCache::regStats();
91 tags->regStats(name());
93 prefetcher->regStats(name());
96 template<class TagStore>
98 Cache<TagStore>::getPort(const std::string &if_name, int idx)
100 if (if_name == "" || if_name == "cpu_side") {
102 } else if (if_name == "mem_side") {
104 } else if (if_name == "functional") {
105 CpuSidePort *funcPort =
106 new CpuSidePort(name() + "-cpu_side_funcport", this,
108 funcPort->setOtherPort(memSidePort);
111 panic("Port name %s unrecognized\n", if_name);
115 template<class TagStore>
117 Cache<TagStore>::deletePortRefs(Port *p)
119 if (cpuSidePort == p || memSidePort == p)
120 panic("Can only delete functional ports\n");
126 template<class TagStore>
128 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
130 uint64_t overwrite_val;
132 uint64_t condition_val64;
133 uint32_t condition_val32;
135 int offset = tags->extractBlkOffset(pkt->getAddr());
136 uint8_t *blk_data = blk->data + offset;
138 assert(sizeof(uint64_t) >= pkt->getSize());
140 overwrite_mem = true;
141 // keep a copy of our possible write value, and copy what is at the
142 // memory address into the packet
143 pkt->writeData((uint8_t *)&overwrite_val);
144 pkt->setData(blk_data);
146 if (pkt->req->isCondSwap()) {
147 if (pkt->getSize() == sizeof(uint64_t)) {
148 condition_val64 = pkt->req->getExtraData();
149 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
151 } else if (pkt->getSize() == sizeof(uint32_t)) {
152 condition_val32 = (uint32_t)pkt->req->getExtraData();
153 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
156 panic("Invalid size for conditional read/write\n");
160 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
161 blk->status |= BlkDirty;
166 template<class TagStore>
168 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
169 bool deferred_response,
170 bool pending_downgrade)
172 assert(blk && blk->isValid());
173 // Occasionally this is not true... if we are a lower-level cache
174 // satisfying a string of Read and ReadEx requests from
175 // upper-level caches, a Read will mark the block as shared but we
176 // can satisfy a following ReadEx anyway since we can rely on the
177 // Read requester(s) to have buffered the ReadEx snoop and to
178 // invalidate their blocks after receiving them.
179 // assert(!pkt->needsExclusive() || blk->isWritable());
180 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
182 // Check RMW operations first since both isRead() and
183 // isWrite() will be true for them
184 if (pkt->cmd == MemCmd::SwapReq) {
185 cmpAndSwap(blk, pkt);
186 } else if (pkt->isWrite()) {
187 if (blk->checkWrite(pkt)) {
188 pkt->writeDataToBlock(blk->data, blkSize);
189 blk->status |= BlkDirty;
191 } else if (pkt->isRead()) {
193 blk->trackLoadLocked(pkt);
195 pkt->setDataFromBlock(blk->data, blkSize);
196 if (pkt->getSize() == blkSize) {
197 // special handling for coherent block requests from
198 // upper-level caches
199 if (pkt->needsExclusive()) {
200 // if we have a dirty copy, make sure the recipient
201 // keeps it marked dirty
202 if (blk->isDirty()) {
203 pkt->assertMemInhibit();
205 // on ReadExReq we give up our copy unconditionally
206 tags->invalidateBlk(blk);
207 } else if (blk->isWritable() && !pending_downgrade
208 && !pkt->sharedAsserted()) {
209 // we can give the requester an exclusive copy (by not
210 // asserting shared line) on a read request if:
211 // - we have an exclusive copy at this level (& below)
212 // - we don't have a pending snoop from below
213 // signaling another read request
214 // - no other cache above has a copy (otherwise it
215 // would have asseretd shared line on request)
217 if (blk->isDirty()) {
218 // special considerations if we're owner:
219 if (!deferred_response) {
220 // if we are responding immediately and can
221 // signal that we're transferring ownership
222 // along with exclusivity, do so
223 pkt->assertMemInhibit();
224 blk->status &= ~BlkDirty;
226 // if we're responding after our own miss,
227 // there's a window where the recipient didn't
228 // know it was getting ownership and may not
229 // have responded to snoops correctly, so we
230 // can't pass off ownership *or* exclusivity
235 // otherwise only respond with a shared copy
240 // Not a read or write... must be an upgrade. it's OK
241 // to just ack those as long as we have an exclusive
242 // copy at this level.
243 assert(pkt->isUpgrade());
244 tags->invalidateBlk(blk);
249 /////////////////////////////////////////////////////
251 // MSHR helper functions
253 /////////////////////////////////////////////////////
256 template<class TagStore>
258 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
260 markInServiceInternal(mshr, pkt);
262 if (mshr->originalCmd == MemCmd::HardPFReq) {
263 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
265 //Also clear pending if need be
266 if (!prefetcher->havePending())
268 deassertMemSideBusRequest(Request_PF);
275 template<class TagStore>
277 Cache<TagStore>::squash(int threadNum)
279 bool unblock = false;
280 BlockedCause cause = NUM_BLOCKED_CAUSES;
282 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
285 cause = Blocked_NoTargets;
287 if (mshrQueue.isFull()) {
289 cause = Blocked_NoMSHRs;
291 mshrQueue.squash(threadNum);
292 if (unblock && !mshrQueue.isFull()) {
297 /////////////////////////////////////////////////////
299 // Access path: requests coming in from the CPU side
301 /////////////////////////////////////////////////////
303 template<class TagStore>
305 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
306 int &lat, PacketList &writebacks)
308 if (pkt->req->isUncacheable()) {
309 if (pkt->req->isClrex()) {
312 blk = tags->findBlock(pkt->getAddr());
314 tags->invalidateBlk(blk);
323 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
324 blk = tags->accessBlock(pkt->getAddr(), lat, id);
326 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
327 pkt->req->isInstFetch() ? " (ifetch)" : "",
328 pkt->getAddr(), (blk) ? "hit" : "miss");
332 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
333 // OK to satisfy access
334 incHitCount(pkt, id);
335 satisfyCpuSideRequest(pkt, blk);
340 // Can't satisfy access normally... either no block (blk == NULL)
341 // or have block but need exclusive & only have shared.
343 // Writeback handling is special case. We can write the block
344 // into the cache without having a writeable copy (or any copy at
346 if (pkt->cmd == MemCmd::Writeback) {
347 assert(blkSize == pkt->getSize());
349 // need to do a replacement
350 blk = allocateBlock(pkt->getAddr(), writebacks);
352 // no replaceable block available, give up.
353 // writeback will be forwarded to next level.
354 incMissCount(pkt, id);
357 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
358 tags->insertBlock(pkt->getAddr(), blk, id);
359 blk->status = BlkValid | BlkReadable;
361 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
362 blk->status |= BlkDirty;
363 if (pkt->isSupplyExclusive()) {
364 blk->status |= BlkWritable;
366 // nothing else to do; writeback doesn't expect response
367 assert(!pkt->needsResponse());
368 incHitCount(pkt, id);
372 incMissCount(pkt, id);
374 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
375 // complete miss on store conditional... just give up now
376 pkt->req->setExtraData(0);
384 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
386 Packet::SenderState *prevSenderState;
392 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
393 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
398 void restore(Packet *pkt, BaseCache *_cache)
400 assert(_cache == cache);
401 pkt->senderState = prevSenderState;
402 pkt->setDest(prevSrc);
407 template<class TagStore>
409 Cache<TagStore>::timingAccess(PacketPtr pkt)
411 //@todo Add back in MemDebug Calls
412 // MemDebug::cacheAccess(pkt);
414 // we charge hitLatency for doing just about anything here
415 Tick time = curTick + hitLatency;
417 if (pkt->isResponse()) {
418 // must be cache-to-cache response from upper to lower level
419 ForwardResponseRecord *rec =
420 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
422 rec->restore(pkt, this);
424 memSidePort->respond(pkt, time);
428 assert(pkt->isRequest());
430 if (pkt->memInhibitAsserted()) {
431 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
433 assert(!pkt->req->isUncacheable());
434 // Special tweak for multilevel coherence: snoop downward here
435 // on invalidates since there may be other caches below here
436 // that have shared copies. Not necessary if we know that
437 // supplier had exclusive copy to begin with.
438 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
439 Packet *snoopPkt = new Packet(pkt, true); // clear flags
440 snoopPkt->setExpressSnoop();
441 snoopPkt->assertMemInhibit();
442 memSidePort->sendTiming(snoopPkt);
443 // main memory will delete snoopPkt
445 // since we're the official target but we aren't responding,
446 // delete the packet now.
451 if (pkt->req->isUncacheable()) {
452 if (pkt->req->isClrex()) {
455 BlkType *blk = tags->findBlock(pkt->getAddr());
457 tags->invalidateBlk(blk);
461 // writes go in write buffer, reads use MSHR
462 if (pkt->isWrite() && !pkt->isRead()) {
463 allocateWriteBuffer(pkt, time, true);
465 allocateUncachedReadBuffer(pkt, time, true);
467 assert(pkt->needsResponse()); // else we should delete it here??
471 int lat = hitLatency;
473 PacketList writebacks;
475 bool satisfied = access(pkt, blk, lat, writebacks);
478 /** @todo make the fast write alloc (wh64) work with coherence. */
480 // If this is a block size write/hint (WH64) allocate the block here
481 // if the coherence protocol allows it.
482 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
483 (pkt->cmd == MemCmd::WriteReq
484 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
485 // not outstanding misses, can do this
486 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
487 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
488 if (outstanding_miss) {
489 warn("WriteInv doing a fastallocate"
490 "with an outstanding miss to the same address\n");
492 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
499 // track time of availability of next prefetch, if any
500 Tick next_pf_time = 0;
502 bool needsResponse = pkt->needsResponse();
506 pkt->makeTimingResponse();
507 cpuSidePort->respond(pkt, curTick+lat);
512 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
514 blk->status &= ~BlkHWPrefetched;
515 next_pf_time = prefetcher->notify(pkt, time);
520 Addr blk_addr = blockAlign(pkt->getAddr());
521 MSHR *mshr = mshrQueue.findMatch(blk_addr);
525 //@todo remove hw_pf here
526 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
527 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
528 mshr->threadNum = -1;
530 mshr->allocateTarget(pkt, time, order++);
531 if (mshr->getNumTargets() == numTarget) {
533 setBlocked(Blocked_NoTargets);
534 // need to be careful with this... if this mshr isn't
535 // ready yet (i.e. time > curTick_, we don't want to
536 // move it ahead of mshrs that are ready
537 // mshrQueue.moveToFront(mshr);
541 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
542 // always mark as cache fill for now... if we implement
543 // no-write-allocate or bypass accesses this will have to
545 if (pkt->cmd == MemCmd::Writeback) {
546 allocateWriteBuffer(pkt, time, true);
548 if (blk && blk->isValid()) {
549 // If we have a write miss to a valid block, we
550 // need to mark the block non-readable. Otherwise
551 // if we allow reads while there's an outstanding
552 // write miss, the read could return stale data
553 // out of the cache block... a more aggressive
554 // system could detect the overlap (if any) and
555 // forward data out of the MSHRs, but we don't do
556 // that yet. Note that we do need to leave the
557 // block valid so that it stays in the cache, in
558 // case we get an upgrade response (and hence no
559 // new data) when the write miss completes.
560 // As long as CPUs do proper store/load forwarding
561 // internally, and have a sufficiently weak memory
562 // model, this is probably unnecessary, but at some
563 // point it must have seemed like we needed it...
564 assert(pkt->needsExclusive() && !blk->isWritable());
565 blk->status &= ~BlkReadable;
568 allocateMissBuffer(pkt, time, true);
572 next_pf_time = prefetcher->notify(pkt, time);
577 if (next_pf_time != 0)
578 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
580 // copy writebacks to write buffer
581 while (!writebacks.empty()) {
582 PacketPtr wbPkt = writebacks.front();
583 allocateWriteBuffer(wbPkt, time, true);
584 writebacks.pop_front();
591 // See comment in cache.hh.
592 template<class TagStore>
594 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
597 bool blkValid = blk && blk->isValid();
599 if (cpu_pkt->req->isUncacheable()) {
600 //assert(blk == NULL);
605 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
606 // Writebacks that weren't allocated in access() and upgrades
607 // from upper-level caches that missed completely just go
612 assert(cpu_pkt->needsResponse());
615 // @TODO make useUpgrades a parameter.
616 // Note that ownership protocols require upgrade, otherwise a
617 // write miss on a shared owned block will generate a ReadExcl,
618 // which will clobber the owned copy.
619 const bool useUpgrades = true;
620 if (blkValid && useUpgrades) {
621 // only reason to be here is that blk is shared
622 // (read-only) and we need exclusive
623 assert(needsExclusive && !blk->isWritable());
624 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
627 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
629 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
636 template<class TagStore>
638 Cache<TagStore>::atomicAccess(PacketPtr pkt)
640 int lat = hitLatency;
642 // @TODO: make this a parameter
643 bool last_level_cache = false;
645 if (pkt->memInhibitAsserted()) {
646 assert(!pkt->req->isUncacheable());
647 // have to invalidate ourselves and any lower caches even if
648 // upper cache will be responding
649 if (pkt->isInvalidate()) {
650 BlkType *blk = tags->findBlock(pkt->getAddr());
651 if (blk && blk->isValid()) {
652 tags->invalidateBlk(blk);
653 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
654 pkt->cmdString(), pkt->getAddr());
656 if (!last_level_cache) {
657 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
658 pkt->cmdString(), pkt->getAddr());
659 lat += memSidePort->sendAtomic(pkt);
662 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
663 pkt->cmdString(), pkt->getAddr());
669 // should assert here that there are no outstanding MSHRs or
670 // writebacks... that would mean that someone used an atomic
671 // access in timing mode
674 PacketList writebacks;
676 if (!access(pkt, blk, lat, writebacks)) {
678 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
680 bool is_forward = (bus_pkt == NULL);
683 // just forwarding the same request to the next level
684 // no local cache operation involved
688 DPRINTF(Cache, "Sending an atomic %s for %x\n",
689 bus_pkt->cmdString(), bus_pkt->getAddr());
692 CacheBlk::State old_state = blk ? blk->status : 0;
695 lat += memSidePort->sendAtomic(bus_pkt);
697 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
698 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
700 assert(!bus_pkt->wasNacked());
702 // If packet was a forward, the response (if any) is already
703 // in place in the bus_pkt == pkt structure, so we don't need
704 // to do anything. Otherwise, use the separate bus_pkt to
705 // generate response to pkt and then delete it.
707 if (pkt->needsResponse()) {
708 assert(bus_pkt->isResponse());
709 if (bus_pkt->isError()) {
710 pkt->makeAtomicResponse();
711 pkt->copyError(bus_pkt);
712 } else if (bus_pkt->isRead() ||
713 bus_pkt->cmd == MemCmd::UpgradeResp) {
714 // we're updating cache state to allow us to
715 // satisfy the upstream request from the cache
716 blk = handleFill(bus_pkt, blk, writebacks);
717 satisfyCpuSideRequest(pkt, blk);
719 // we're satisfying the upstream request without
720 // modifying cache state, e.g., a write-through
721 pkt->makeAtomicResponse();
728 // Note that we don't invoke the prefetcher at all in atomic mode.
729 // It's not clear how to do it properly, particularly for
730 // prefetchers that aggressively generate prefetch candidates and
731 // rely on bandwidth contention to throttle them; these will tend
732 // to pollute the cache in atomic mode since there is no bandwidth
733 // contention. If we ever do want to enable prefetching in atomic
734 // mode, though, this is the place to do it... see timingAccess()
735 // for an example (though we'd want to issue the prefetch(es)
736 // immediately rather than calling requestMemSideBus() as we do
739 // Handle writebacks if needed
740 while (!writebacks.empty()){
741 PacketPtr wbPkt = writebacks.front();
742 memSidePort->sendAtomic(wbPkt);
743 writebacks.pop_front();
747 // We now have the block one way or another (hit or completed miss)
749 if (pkt->needsResponse()) {
750 pkt->makeAtomicResponse();
757 template<class TagStore>
759 Cache<TagStore>::functionalAccess(PacketPtr pkt,
760 CachePort *incomingPort,
761 CachePort *otherSidePort)
763 Addr blk_addr = blockAlign(pkt->getAddr());
764 BlkType *blk = tags->findBlock(pkt->getAddr());
765 MSHR *mshr = mshrQueue.findMatch(blk_addr);
767 pkt->pushLabel(name());
769 CacheBlkPrintWrapper cbpw(blk);
771 // Note that just because an L2/L3 has valid data doesn't mean an
772 // L1 doesn't have a more up-to-date modified copy that still
773 // needs to be found. As a result we always update the request if
774 // we have it, but only declare it satisfied if we are the owner.
776 // see if we have data at all (owned or otherwise)
777 bool have_data = blk && blk->isValid()
778 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
780 // data we have is dirty if marked as such or if valid & ownership
781 // pending due to outstanding UpgradeReq
783 have_data && (blk->isDirty() ||
784 (mshr && mshr->inService && mshr->isPendingDirty()));
786 bool done = have_dirty
787 || incomingPort->checkFunctional(pkt)
788 || mshrQueue.checkFunctional(pkt, blk_addr)
789 || writeBuffer.checkFunctional(pkt, blk_addr)
790 || otherSidePort->checkFunctional(pkt);
792 DPRINTF(Cache, "functional %s %x %s%s%s\n",
793 pkt->cmdString(), pkt->getAddr(),
794 (blk && blk->isValid()) ? "valid " : "",
795 have_data ? "data " : "", done ? "done " : "");
797 // We're leaving the cache, so pop cache->name() label
803 otherSidePort->sendFunctional(pkt);
808 /////////////////////////////////////////////////////
810 // Response handling: responses from the memory side
812 /////////////////////////////////////////////////////
815 template<class TagStore>
817 Cache<TagStore>::handleResponse(PacketPtr pkt)
819 Tick time = curTick + hitLatency;
820 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
821 bool is_error = pkt->isError();
825 if (pkt->wasNacked()) {
826 //pkt->reinitFromRequest();
827 warn("NACKs from devices not connected to the same bus "
828 "not implemented\n");
832 DPRINTF(Cache, "Cache received packet with error for address %x, "
833 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
836 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
838 MSHRQueue *mq = mshr->queue;
839 bool wasFull = mq->isFull();
841 if (mshr == noTargetMSHR) {
842 // we always clear at least one target
843 clearBlocked(Blocked_NoTargets);
847 // Initial target is used just for stats
848 MSHR::Target *initial_tgt = mshr->getTarget();
849 BlkType *blk = tags->findBlock(pkt->getAddr());
850 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
851 Tick miss_latency = curTick - initial_tgt->recvTime;
852 PacketList writebacks;
854 if (pkt->req->isUncacheable()) {
855 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
858 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
862 bool is_fill = !mshr->isForward &&
863 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
865 if (is_fill && !is_error) {
866 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
869 // give mshr a chance to do some dirty work
870 mshr->handleFill(pkt, blk);
872 blk = handleFill(pkt, blk, writebacks);
876 // First offset for critical word first calculations
877 int initial_offset = 0;
879 if (mshr->hasTargets()) {
880 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
883 while (mshr->hasTargets()) {
884 MSHR::Target *target = mshr->getTarget();
886 switch (target->source) {
887 case MSHR::Target::FromCPU:
888 Tick completion_time;
890 satisfyCpuSideRequest(target->pkt, blk,
891 true, mshr->hasPostDowngrade());
892 // How many bytes past the first request is this one
893 int transfer_offset =
894 target->pkt->getOffset(blkSize) - initial_offset;
895 if (transfer_offset < 0) {
896 transfer_offset += blkSize;
899 // If critical word (no offset) return first word time
900 completion_time = tags->getHitLatency() +
901 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
903 assert(!target->pkt->req->isUncacheable());
904 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
905 completion_time - target->recvTime;
906 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
907 // failed StoreCond upgrade
908 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
909 target->pkt->cmd == MemCmd::StoreCondFailReq);
910 completion_time = tags->getHitLatency() + pkt->finishTime;
911 target->pkt->req->setExtraData(0);
913 // not a cache fill, just forwarding response
914 completion_time = tags->getHitLatency() + pkt->finishTime;
915 if (pkt->isRead() && !is_error) {
916 target->pkt->setData(pkt->getPtr<uint8_t>());
919 target->pkt->makeTimingResponse();
920 // if this packet is an error copy that to the new packet
922 target->pkt->copyError(pkt);
923 if (target->pkt->cmd == MemCmd::ReadResp &&
924 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
925 // If intermediate cache got ReadRespWithInvalidate,
926 // propagate that. Response should not have
927 // isInvalidate() set otherwise.
928 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
930 cpuSidePort->respond(target->pkt, completion_time);
933 case MSHR::Target::FromPrefetcher:
934 assert(target->pkt->cmd == MemCmd::HardPFReq);
936 blk->status |= BlkHWPrefetched;
937 delete target->pkt->req;
941 case MSHR::Target::FromSnoop:
942 // I don't believe that a snoop can be in an error state
944 // response to snoop request
945 DPRINTF(Cache, "processing deferred snoop...\n");
946 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
947 handleSnoop(target->pkt, blk, true, true,
948 mshr->hasPostInvalidate());
952 panic("Illegal target->source enum %d\n", target->source);
959 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
960 tags->invalidateBlk(blk);
961 } else if (mshr->hasPostDowngrade()) {
962 blk->status &= ~BlkWritable;
966 if (mshr->promoteDeferredTargets()) {
967 // avoid later read getting stale data while write miss is
968 // outstanding.. see comment in timingAccess()
970 blk->status &= ~BlkReadable;
972 MSHRQueue *mq = mshr->queue;
973 mq->markPending(mshr);
974 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
976 mq->deallocate(mshr);
977 if (wasFull && !mq->isFull()) {
978 clearBlocked((BlockedCause)mq->index);
982 // copy writebacks to write buffer
983 while (!writebacks.empty()) {
984 PacketPtr wbPkt = writebacks.front();
985 allocateWriteBuffer(wbPkt, time, true);
986 writebacks.pop_front();
988 // if we used temp block, clear it out
989 if (blk == tempBlock) {
990 if (blk->isDirty()) {
991 allocateWriteBuffer(writebackBlk(blk), time, true);
993 tags->invalidateBlk(blk);
1002 template<class TagStore>
1004 Cache<TagStore>::writebackBlk(BlkType *blk)
1006 assert(blk && blk->isValid() && blk->isDirty());
1008 writebacks[0/*pkt->req->threadId()*/]++;
1010 Request *writebackReq =
1011 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
1012 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
1013 if (blk->isWritable()) {
1014 writeback->setSupplyExclusive();
1016 writeback->allocate();
1017 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1019 blk->status &= ~BlkDirty;
1024 template<class TagStore>
1025 typename Cache<TagStore>::BlkType*
1026 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1028 BlkType *blk = tags->findVictim(addr, writebacks);
1030 if (blk->isValid()) {
1031 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1032 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1034 // must be an outstanding upgrade request on block
1035 // we're about to replace...
1036 assert(!blk->isWritable());
1037 assert(repl_mshr->needsExclusive());
1038 // too hard to replace block with transient state
1039 // allocation failed, block not inserted
1042 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1044 blk->isDirty() ? "writeback" : "clean");
1046 if (blk->isDirty()) {
1047 // Save writeback packet for handling by caller
1048 writebacks.push_back(writebackBlk(blk));
1057 // Note that the reason we return a list of writebacks rather than
1058 // inserting them directly in the write buffer is that this function
1059 // is called by both atomic and timing-mode accesses, and in atomic
1060 // mode we don't mess with the write buffer (we just perform the
1061 // writebacks atomically once the original request is complete).
1062 template<class TagStore>
1063 typename Cache<TagStore>::BlkType*
1064 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1065 PacketList &writebacks)
1067 Addr addr = pkt->getAddr();
1069 CacheBlk::State old_state = blk ? blk->status : 0;
1073 // better have read new data...
1074 assert(pkt->hasData());
1075 // need to do a replacement
1076 blk = allocateBlock(addr, writebacks);
1078 // No replaceable block... just use temporary storage to
1079 // complete the current request and then get rid of it
1080 assert(!tempBlock->isValid());
1082 tempBlock->set = tags->extractSet(addr);
1083 tempBlock->tag = tags->extractTag(addr);
1084 DPRINTF(Cache, "using temp block for %x\n", addr);
1086 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
1087 tags->insertBlock(pkt->getAddr(), blk, id);
1090 // starting from scratch with a new block
1093 // existing block... probably an upgrade
1094 assert(blk->tag == tags->extractTag(addr));
1095 // either we're getting new data or the block should already be valid
1096 assert(pkt->hasData() || blk->isValid());
1097 // don't clear block status... if block is already dirty we
1098 // don't want to lose that
1101 blk->status |= BlkValid | BlkReadable;
1103 if (!pkt->sharedAsserted()) {
1104 blk->status |= BlkWritable;
1105 // If we got this via cache-to-cache transfer (i.e., from a
1106 // cache that was an owner) and took away that owner's copy,
1107 // then we need to write it back. Normally this happens
1108 // anyway as a side effect of getting a copy to write it, but
1109 // there are cases (such as failed store conditionals or
1110 // compare-and-swaps) where we'll demand an exclusive copy but
1111 // end up not writing it.
1112 if (pkt->memInhibitAsserted())
1113 blk->status |= BlkDirty;
1116 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1117 addr, old_state, blk->status);
1119 // if we got new data, copy it in
1120 if (pkt->isRead()) {
1121 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1124 blk->whenReady = pkt->finishTime;
1130 /////////////////////////////////////////////////////
1132 // Snoop path: requests coming in from the memory side
1134 /////////////////////////////////////////////////////
1136 template<class TagStore>
1139 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1140 bool already_copied, bool pending_inval)
1142 // timing-mode snoop responses require a new packet, unless we
1143 // already made a copy...
1144 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1145 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1147 pkt->makeTimingResponse();
1148 if (pkt->isRead()) {
1149 pkt->setDataFromBlock(blk_data, blkSize);
1151 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1152 // Assume we defer a response to a read from a far-away cache
1153 // A, then later defer a ReadExcl from a cache B on the same
1154 // bus as us. We'll assert MemInhibit in both cases, but in
1155 // the latter case MemInhibit will keep the invalidation from
1156 // reaching cache A. This special response tells cache A that
1157 // it gets the block to satisfy its read, but must immediately
1159 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1161 memSidePort->respond(pkt, curTick + hitLatency);
1164 template<class TagStore>
1166 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1167 bool is_timing, bool is_deferred,
1170 // deferred snoops can only happen in timing mode
1171 assert(!(is_deferred && !is_timing));
1172 // pending_inval only makes sense on deferred snoops
1173 assert(!(pending_inval && !is_deferred));
1174 assert(pkt->isRequest());
1176 // the packet may get modified if we or a forwarded snooper
1177 // responds in atomic mode, so remember a few things about the
1178 // original packet up front
1179 bool invalidate = pkt->isInvalidate();
1180 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1182 if (forwardSnoops) {
1183 // first propagate snoop upward to see if anyone above us wants to
1184 // handle it. save & restore packet src since it will get
1185 // rewritten to be relative to cpu-side bus (if any)
1186 bool alreadyResponded = pkt->memInhibitAsserted();
1188 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1189 snoopPkt->setExpressSnoop();
1190 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1191 cpuSidePort->sendTiming(snoopPkt);
1192 if (snoopPkt->memInhibitAsserted()) {
1193 // cache-to-cache response from some upper cache
1194 assert(!alreadyResponded);
1195 pkt->assertMemInhibit();
1197 delete snoopPkt->senderState;
1199 if (snoopPkt->sharedAsserted()) {
1200 pkt->assertShared();
1204 int origSrc = pkt->getSrc();
1205 cpuSidePort->sendAtomic(pkt);
1206 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1207 // cache-to-cache response from some upper cache:
1208 // forward response to original requester
1209 assert(pkt->isResponse());
1211 pkt->setSrc(origSrc);
1215 if (!blk || !blk->isValid()) {
1219 // we may end up modifying both the block state and the packet (if
1220 // we respond in atomic mode), so just figure out what to do now
1221 // and then do it later
1222 bool respond = blk->isDirty() && pkt->needsResponse();
1223 bool have_exclusive = blk->isWritable();
1225 if (pkt->isRead() && !invalidate) {
1226 assert(!needs_exclusive);
1227 pkt->assertShared();
1228 int bits_to_clear = BlkWritable;
1229 const bool haveOwnershipState = true; // for now
1230 if (!haveOwnershipState) {
1231 // if we don't support pure ownership (dirty && !writable),
1232 // have to clear dirty bit here, assume memory snarfs data
1233 // on cache-to-cache xfer
1234 bits_to_clear |= BlkDirty;
1236 blk->status &= ~bits_to_clear;
1239 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1240 pkt->cmdString(), blockAlign(pkt->getAddr()),
1241 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1244 assert(!pkt->memInhibitAsserted());
1245 pkt->assertMemInhibit();
1246 if (have_exclusive) {
1247 pkt->setSupplyExclusive();
1250 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1252 pkt->makeAtomicResponse();
1253 pkt->setDataFromBlock(blk->data, blkSize);
1255 } else if (is_timing && is_deferred) {
1256 // if it's a deferred timing snoop then we've made a copy of
1257 // the packet, and so if we're not using that copy to respond
1258 // then we need to delete it here.
1262 // Do this last in case it deallocates block data or something
1265 tags->invalidateBlk(blk);
1270 template<class TagStore>
1272 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1274 // Note that some deferred snoops don't have requests, since the
1275 // original access may have already completed
1276 if ((pkt->req && pkt->req->isUncacheable()) ||
1277 pkt->cmd == MemCmd::Writeback) {
1278 //Can't get a hit on an uncacheable address
1279 //Revisit this for multi level coherence
1283 BlkType *blk = tags->findBlock(pkt->getAddr());
1285 Addr blk_addr = blockAlign(pkt->getAddr());
1286 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1288 // Let the MSHR itself track the snoop and decide whether we want
1289 // to go ahead and do the regular cache snoop
1290 if (mshr && mshr->handleSnoop(pkt, order++)) {
1291 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1293 if (mshr->getNumTargets() > numTarget)
1294 warn("allocating bonus target for snoop"); //handle later
1298 //We also need to check the writeback buffers and handle those
1299 std::vector<MSHR *> writebacks;
1300 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1301 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1304 //Look through writebacks for any non-uncachable writes, use that
1305 for (int i = 0; i < writebacks.size(); i++) {
1306 mshr = writebacks[i];
1307 assert(!mshr->isUncacheable());
1308 assert(mshr->getNumTargets() == 1);
1309 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1310 assert(wb_pkt->cmd == MemCmd::Writeback);
1312 assert(!pkt->memInhibitAsserted());
1313 pkt->assertMemInhibit();
1314 if (!pkt->needsExclusive()) {
1315 pkt->assertShared();
1316 // the writeback is no longer the exclusive copy in the system
1317 wb_pkt->clearSupplyExclusive();
1319 // if we're not asserting the shared line, we need to
1320 // invalidate our copy. we'll do that below as long as
1321 // the packet's invalidate flag is set...
1322 assert(pkt->isInvalidate());
1324 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1327 if (pkt->isInvalidate()) {
1328 // Invalidation trumps our writeback... discard here
1329 markInService(mshr);
1333 // If this was a shared writeback, there may still be
1334 // other shared copies above that require invalidation.
1335 // We could be more selective and return here if the
1336 // request is non-exclusive or if the writeback is
1342 handleSnoop(pkt, blk, true, false, false);
1346 template<class TagStore>
1348 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1350 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1351 // Can't get a hit on an uncacheable address
1352 // Revisit this for multi level coherence
1356 BlkType *blk = tags->findBlock(pkt->getAddr());
1357 handleSnoop(pkt, blk, false, false, false);
1362 template<class TagStore>
1364 Cache<TagStore>::getNextMSHR()
1366 // Check both MSHR queue and write buffer for potential requests
1367 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1368 MSHR *write_mshr = writeBuffer.getNextMSHR();
1370 // Now figure out which one to send... some cases are easy
1371 if (miss_mshr && !write_mshr) {
1374 if (write_mshr && !miss_mshr) {
1378 if (miss_mshr && write_mshr) {
1379 // We have one of each... normally we favor the miss request
1380 // unless the write buffer is full
1381 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1382 // Write buffer is full, so we'd like to issue a write;
1383 // need to search MSHR queue for conflicting earlier miss.
1384 MSHR *conflict_mshr =
1385 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1387 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1388 // Service misses in order until conflict is cleared.
1389 return conflict_mshr;
1392 // No conflicts; issue write
1396 // Write buffer isn't full, but need to check it for
1397 // conflicting earlier writeback
1398 MSHR *conflict_mshr =
1399 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1400 if (conflict_mshr) {
1401 // not sure why we don't check order here... it was in the
1402 // original code but commented out.
1404 // The only way this happens is if we are
1405 // doing a write and we didn't have permissions
1406 // then subsequently saw a writeback (owned got evicted)
1407 // We need to make sure to perform the writeback first
1408 // To preserve the dirty data, then we can issue the write
1410 // should we return write_mshr here instead? I.e. do we
1411 // have to flush writes in order? I don't think so... not
1412 // for Alpha anyway. Maybe for x86?
1413 return conflict_mshr;
1416 // No conflicts; issue read
1420 // fall through... no pending requests. Try a prefetch.
1421 assert(!miss_mshr && !write_mshr);
1422 if (prefetcher && !mshrQueue.isFull()) {
1423 // If we have a miss queue slot, we can try a prefetch
1424 PacketPtr pkt = prefetcher->getPacket();
1426 Addr pf_addr = blockAlign(pkt->getAddr());
1427 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) {
1428 // Update statistic on number of prefetches issued
1429 // (hwpf_mshr_misses)
1430 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1431 // Don't request bus, since we already have it
1432 return allocateMissBuffer(pkt, curTick, false);
1441 template<class TagStore>
1443 Cache<TagStore>::getTimingPacket()
1445 MSHR *mshr = getNextMSHR();
1451 // use request from 1st target
1452 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1453 PacketPtr pkt = NULL;
1455 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1456 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1457 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1458 // in MSHR, so now that we are getting around to processing
1459 // it, just treat it as if we got a failure response
1460 pkt = new Packet(tgt_pkt);
1461 pkt->cmd = MemCmd::UpgradeFailResp;
1462 pkt->senderState = mshr;
1463 pkt->firstWordTime = pkt->finishTime = curTick;
1464 handleResponse(pkt);
1466 } else if (mshr->isForwardNoResponse()) {
1467 // no response expected, just forward packet as it is
1468 assert(tags->findBlock(mshr->addr) == NULL);
1471 BlkType *blk = tags->findBlock(mshr->addr);
1472 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1474 mshr->isForward = (pkt == NULL);
1476 if (mshr->isForward) {
1477 // not a cache block request, but a response is expected
1478 // make copy of current packet to forward, keep current
1479 // copy for response handling
1480 pkt = new Packet(tgt_pkt);
1482 if (pkt->isWrite()) {
1483 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1488 assert(pkt != NULL);
1489 pkt->senderState = mshr;
1494 template<class TagStore>
1496 Cache<TagStore>::nextMSHRReadyTime()
1498 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1499 writeBuffer.nextMSHRReadyTime());
1502 nextReady = std::min(nextReady,
1503 prefetcher->nextPrefetchReadyTime());
1516 template<class TagStore>
1518 Cache<TagStore>::CpuSidePort::
1519 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1521 // CPU side port doesn't snoop; it's a target only. It can
1522 // potentially respond to any address.
1524 resp.push_back(myCache()->getAddrRange());
1528 template<class TagStore>
1530 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1532 // illegal to block responses... can lead to deadlock
1533 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1534 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1535 mustSendRetry = true;
1539 myCache()->timingAccess(pkt);
1544 template<class TagStore>
1546 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1548 return myCache()->atomicAccess(pkt);
1552 template<class TagStore>
1554 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1556 myCache()->functionalAccess(pkt, this, otherPort);
1560 template<class TagStore>
1562 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1563 const std::string &_label)
1564 : BaseCache::CachePort(_name, _cache, _label)
1574 template<class TagStore>
1576 Cache<TagStore>::MemSidePort::
1577 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1579 // Memory-side port always snoops, but never passes requests
1580 // through to targets on the cpu side (so we don't add anything to
1581 // the address range list).
1586 template<class TagStore>
1588 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1590 // this needs to be fixed so that the cache updates the mshr and sends the
1591 // packet back out on the link, but it probably won't happen so until this
1592 // gets fixed, just panic when it does
1593 if (pkt->wasNacked())
1594 panic("Need to implement cache resending nacked packets!\n");
1596 if (pkt->isRequest() && blocked) {
1597 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1598 mustSendRetry = true;
1602 if (pkt->isResponse()) {
1603 myCache()->handleResponse(pkt);
1605 myCache()->snoopTiming(pkt);
1611 template<class TagStore>
1613 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1615 // in atomic mode, responses go back to the sender via the
1616 // function return from sendAtomic(), not via a separate
1617 // sendAtomic() from the responder. Thus we should never see a
1618 // response packet in recvAtomic() (anywhere, not just here).
1619 assert(!pkt->isResponse());
1620 return myCache()->snoopAtomic(pkt);
1624 template<class TagStore>
1626 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1628 myCache()->functionalAccess(pkt, this, otherPort);
1633 template<class TagStore>
1635 Cache<TagStore>::MemSidePort::sendPacket()
1637 // if we have responses that are ready, they take precedence
1638 if (deferredPacketReady()) {
1639 bool success = sendTiming(transmitList.front().pkt);
1642 //send successful, remove packet
1643 transmitList.pop_front();
1646 waitingOnRetry = !success;
1648 // check for non-response packets (requests & writebacks)
1649 PacketPtr pkt = myCache()->getTimingPacket();
1651 // can happen if e.g. we attempt a writeback and fail, but
1652 // before the retry, the writeback is eliminated because
1653 // we snoop another cache's ReadEx.
1654 waitingOnRetry = false;
1656 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1658 bool success = sendTiming(pkt);
1660 waitingOnRetry = !success;
1661 if (waitingOnRetry) {
1662 DPRINTF(CachePort, "now waiting on a retry\n");
1663 if (!mshr->isForwardNoResponse()) {
1667 myCache()->markInService(mshr, pkt);
1673 // tried to send packet... if it was successful (no retry), see if
1674 // we need to rerequest bus or not
1675 if (!waitingOnRetry) {
1676 Tick nextReady = std::min(deferredPacketReadyTime(),
1677 myCache()->nextMSHRReadyTime());
1678 // @TODO: need to facotr in prefetch requests here somehow
1679 if (nextReady != MaxTick) {
1680 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1681 schedule(sendEvent, std::max(nextReady, curTick + 1));
1683 // no more to send right now: if we're draining, we may be done
1684 if (drainEvent && !sendEvent->scheduled()) {
1685 drainEvent->process();
1692 template<class TagStore>
1694 Cache<TagStore>::MemSidePort::recvRetry()
1696 assert(waitingOnRetry);
1701 template<class TagStore>
1703 Cache<TagStore>::MemSidePort::processSendEvent()
1705 assert(!waitingOnRetry);
1710 template<class TagStore>
1712 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1713 const std::string &_label)
1714 : BaseCache::CachePort(_name, _cache, _label)
1716 // override default send event from SimpleTimingPort
1718 sendEvent = new SendEvent(this);