2 * Copyright (c) 2010-2012 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Erik Hallnor
53 #include "base/misc.hh"
54 #include "base/range.hh"
55 #include "base/types.hh"
56 #include "debug/Cache.hh"
57 #include "debug/CachePort.hh"
58 #include "mem/cache/prefetch/base.hh"
59 #include "mem/cache/blk.hh"
60 #include "mem/cache/cache.hh"
61 #include "mem/cache/mshr.hh"
62 #include "sim/sim_exit.hh"
64 template<class TagStore>
65 Cache<TagStore>::Cache(const Params *p, TagStore *tags)
68 prefetcher(p->prefetcher),
70 prefetchOnAccess(p->prefetch_on_access)
72 tempBlock = new BlkType();
73 tempBlock->data = new uint8_t[blkSize];
75 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
77 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
82 prefetcher->setCache(this);
85 template<class TagStore>
87 Cache<TagStore>::regStats()
89 BaseCache::regStats();
90 tags->regStats(name());
93 template<class TagStore>
95 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
97 uint64_t overwrite_val;
99 uint64_t condition_val64;
100 uint32_t condition_val32;
102 int offset = tags->extractBlkOffset(pkt->getAddr());
103 uint8_t *blk_data = blk->data + offset;
105 assert(sizeof(uint64_t) >= pkt->getSize());
107 overwrite_mem = true;
108 // keep a copy of our possible write value, and copy what is at the
109 // memory address into the packet
110 pkt->writeData((uint8_t *)&overwrite_val);
111 pkt->setData(blk_data);
113 if (pkt->req->isCondSwap()) {
114 if (pkt->getSize() == sizeof(uint64_t)) {
115 condition_val64 = pkt->req->getExtraData();
116 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
118 } else if (pkt->getSize() == sizeof(uint32_t)) {
119 condition_val32 = (uint32_t)pkt->req->getExtraData();
120 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
123 panic("Invalid size for conditional read/write\n");
127 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
128 blk->status |= BlkDirty;
133 template<class TagStore>
135 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
136 bool deferred_response,
137 bool pending_downgrade)
139 assert(blk && blk->isValid());
140 // Occasionally this is not true... if we are a lower-level cache
141 // satisfying a string of Read and ReadEx requests from
142 // upper-level caches, a Read will mark the block as shared but we
143 // can satisfy a following ReadEx anyway since we can rely on the
144 // Read requester(s) to have buffered the ReadEx snoop and to
145 // invalidate their blocks after receiving them.
146 // assert(!pkt->needsExclusive() || blk->isWritable());
147 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
149 // Check RMW operations first since both isRead() and
150 // isWrite() will be true for them
151 if (pkt->cmd == MemCmd::SwapReq) {
152 cmpAndSwap(blk, pkt);
153 } else if (pkt->isWrite()) {
154 if (blk->checkWrite(pkt)) {
155 pkt->writeDataToBlock(blk->data, blkSize);
156 blk->status |= BlkDirty;
158 } else if (pkt->isRead()) {
160 blk->trackLoadLocked(pkt);
162 pkt->setDataFromBlock(blk->data, blkSize);
163 if (pkt->getSize() == blkSize) {
164 // special handling for coherent block requests from
165 // upper-level caches
166 if (pkt->needsExclusive()) {
167 // if we have a dirty copy, make sure the recipient
168 // keeps it marked dirty
169 if (blk->isDirty()) {
170 pkt->assertMemInhibit();
172 // on ReadExReq we give up our copy unconditionally
173 tags->invalidateBlk(blk);
174 } else if (blk->isWritable() && !pending_downgrade
175 && !pkt->sharedAsserted()) {
176 // we can give the requester an exclusive copy (by not
177 // asserting shared line) on a read request if:
178 // - we have an exclusive copy at this level (& below)
179 // - we don't have a pending snoop from below
180 // signaling another read request
181 // - no other cache above has a copy (otherwise it
182 // would have asseretd shared line on request)
184 if (blk->isDirty()) {
185 // special considerations if we're owner:
186 if (!deferred_response && !isTopLevel) {
187 // if we are responding immediately and can
188 // signal that we're transferring ownership
189 // along with exclusivity, do so
190 pkt->assertMemInhibit();
191 blk->status &= ~BlkDirty;
193 // if we're responding after our own miss,
194 // there's a window where the recipient didn't
195 // know it was getting ownership and may not
196 // have responded to snoops correctly, so we
197 // can't pass off ownership *or* exclusivity
202 // otherwise only respond with a shared copy
207 // Not a read or write... must be an upgrade. it's OK
208 // to just ack those as long as we have an exclusive
209 // copy at this level.
210 assert(pkt->isUpgrade());
211 tags->invalidateBlk(blk);
216 /////////////////////////////////////////////////////
218 // MSHR helper functions
220 /////////////////////////////////////////////////////
223 template<class TagStore>
225 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
227 markInServiceInternal(mshr, pkt);
229 if (mshr->originalCmd == MemCmd::HardPFReq) {
230 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
232 //Also clear pending if need be
233 if (!prefetcher->havePending())
235 deassertMemSideBusRequest(Request_PF);
242 template<class TagStore>
244 Cache<TagStore>::squash(int threadNum)
246 bool unblock = false;
247 BlockedCause cause = NUM_BLOCKED_CAUSES;
249 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
252 cause = Blocked_NoTargets;
254 if (mshrQueue.isFull()) {
256 cause = Blocked_NoMSHRs;
258 mshrQueue.squash(threadNum);
259 if (unblock && !mshrQueue.isFull()) {
264 /////////////////////////////////////////////////////
266 // Access path: requests coming in from the CPU side
268 /////////////////////////////////////////////////////
270 template<class TagStore>
272 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
273 int &lat, PacketList &writebacks)
275 if (pkt->req->isUncacheable()) {
276 if (pkt->req->isClearLL()) {
278 } else if (pkt->isWrite()) {
279 blk = tags->findBlock(pkt->getAddr());
281 tags->invalidateBlk(blk);
290 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
291 blk = tags->accessBlock(pkt->getAddr(), lat, id);
293 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
294 pkt->req->isInstFetch() ? " (ifetch)" : "",
295 pkt->getAddr(), (blk) ? "hit" : "miss");
299 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
300 // OK to satisfy access
302 satisfyCpuSideRequest(pkt, blk);
307 // Can't satisfy access normally... either no block (blk == NULL)
308 // or have block but need exclusive & only have shared.
310 // Writeback handling is special case. We can write the block
311 // into the cache without having a writeable copy (or any copy at
313 if (pkt->cmd == MemCmd::Writeback) {
314 assert(blkSize == pkt->getSize());
316 // need to do a replacement
317 blk = allocateBlock(pkt->getAddr(), writebacks);
319 // no replaceable block available, give up.
320 // writeback will be forwarded to next level.
324 int id = pkt->req->masterId();
325 tags->insertBlock(pkt->getAddr(), blk, id);
326 blk->status = BlkValid | BlkReadable;
328 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
329 blk->status |= BlkDirty;
330 if (pkt->isSupplyExclusive()) {
331 blk->status |= BlkWritable;
333 // nothing else to do; writeback doesn't expect response
334 assert(!pkt->needsResponse());
341 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
342 // complete miss on store conditional... just give up now
343 pkt->req->setExtraData(0);
351 class ForwardResponseRecord : public Packet::SenderState
353 Packet::SenderState *prevSenderState;
359 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
360 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
365 void restore(Packet *pkt, BaseCache *_cache)
367 assert(_cache == cache);
368 pkt->senderState = prevSenderState;
369 pkt->setDest(prevSrc);
374 template<class TagStore>
376 Cache<TagStore>::timingAccess(PacketPtr pkt)
378 //@todo Add back in MemDebug Calls
379 // MemDebug::cacheAccess(pkt);
382 /// @todo temporary hack to deal with memory corruption issue until
383 /// 4-phase transactions are complete
384 for (int x = 0; x < pendingDelete.size(); x++)
385 delete pendingDelete[x];
386 pendingDelete.clear();
388 // we charge hitLatency for doing just about anything here
389 Tick time = curTick() + hitLatency;
391 if (pkt->isResponse()) {
392 // must be cache-to-cache response from upper to lower level
393 ForwardResponseRecord *rec =
394 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
397 assert(pkt->cmd == MemCmd::HardPFResp);
398 // Check if it's a prefetch response and handle it. We shouldn't
399 // get any other kinds of responses without FRRs.
400 DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
406 rec->restore(pkt, this);
408 memSidePort->respond(pkt, time);
412 assert(pkt->isRequest());
414 if (pkt->memInhibitAsserted()) {
415 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
417 assert(!pkt->req->isUncacheable());
418 // Special tweak for multilevel coherence: snoop downward here
419 // on invalidates since there may be other caches below here
420 // that have shared copies. Not necessary if we know that
421 // supplier had exclusive copy to begin with.
422 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
423 Packet *snoopPkt = new Packet(pkt, true); // clear flags
424 snoopPkt->setExpressSnoop();
425 snoopPkt->assertMemInhibit();
426 memSidePort->sendTimingReq(snoopPkt);
427 // main memory will delete snoopPkt
429 // since we're the official target but we aren't responding,
430 // delete the packet now.
432 /// @todo nominally we should just delete the packet here,
433 /// however, until 4-phase stuff we can't because sending
434 /// cache is still relying on it
435 pendingDelete.push_back(pkt);
439 if (pkt->req->isUncacheable()) {
440 if (pkt->req->isClearLL()) {
442 } else if (pkt->isWrite()) {
443 BlkType *blk = tags->findBlock(pkt->getAddr());
445 tags->invalidateBlk(blk);
449 // writes go in write buffer, reads use MSHR
450 if (pkt->isWrite() && !pkt->isRead()) {
451 allocateWriteBuffer(pkt, time, true);
453 allocateUncachedReadBuffer(pkt, time, true);
455 assert(pkt->needsResponse()); // else we should delete it here??
459 int lat = hitLatency;
461 PacketList writebacks;
463 bool satisfied = access(pkt, blk, lat, writebacks);
466 /** @todo make the fast write alloc (wh64) work with coherence. */
468 // If this is a block size write/hint (WH64) allocate the block here
469 // if the coherence protocol allows it.
470 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
471 (pkt->cmd == MemCmd::WriteReq
472 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
473 // not outstanding misses, can do this
474 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
475 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
476 if (outstanding_miss) {
477 warn("WriteInv doing a fastallocate"
478 "with an outstanding miss to the same address\n");
480 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
487 // track time of availability of next prefetch, if any
488 Tick next_pf_time = 0;
490 bool needsResponse = pkt->needsResponse();
493 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
495 blk->status &= ~BlkHWPrefetched;
496 next_pf_time = prefetcher->notify(pkt, time);
500 pkt->makeTimingResponse();
501 cpuSidePort->respond(pkt, curTick()+lat);
503 /// @todo nominally we should just delete the packet here,
504 /// however, until 4-phase stuff we can't because sending
505 /// cache is still relying on it
506 pendingDelete.push_back(pkt);
511 Addr blk_addr = blockAlign(pkt->getAddr());
512 MSHR *mshr = mshrQueue.findMatch(blk_addr);
516 //@todo remove hw_pf here
517 assert(pkt->req->masterId() < system->maxMasters());
518 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
519 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
520 mshr->threadNum = -1;
522 mshr->allocateTarget(pkt, time, order++);
523 if (mshr->getNumTargets() == numTarget) {
525 setBlocked(Blocked_NoTargets);
526 // need to be careful with this... if this mshr isn't
527 // ready yet (i.e. time > curTick()_, we don't want to
528 // move it ahead of mshrs that are ready
529 // mshrQueue.moveToFront(mshr);
533 assert(pkt->req->masterId() < system->maxMasters());
534 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
535 // always mark as cache fill for now... if we implement
536 // no-write-allocate or bypass accesses this will have to
538 if (pkt->cmd == MemCmd::Writeback) {
539 allocateWriteBuffer(pkt, time, true);
541 if (blk && blk->isValid()) {
542 // If we have a write miss to a valid block, we
543 // need to mark the block non-readable. Otherwise
544 // if we allow reads while there's an outstanding
545 // write miss, the read could return stale data
546 // out of the cache block... a more aggressive
547 // system could detect the overlap (if any) and
548 // forward data out of the MSHRs, but we don't do
549 // that yet. Note that we do need to leave the
550 // block valid so that it stays in the cache, in
551 // case we get an upgrade response (and hence no
552 // new data) when the write miss completes.
553 // As long as CPUs do proper store/load forwarding
554 // internally, and have a sufficiently weak memory
555 // model, this is probably unnecessary, but at some
556 // point it must have seemed like we needed it...
557 assert(pkt->needsExclusive() && !blk->isWritable());
558 blk->status &= ~BlkReadable;
561 allocateMissBuffer(pkt, time, true);
565 next_pf_time = prefetcher->notify(pkt, time);
570 if (next_pf_time != 0)
571 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
573 // copy writebacks to write buffer
574 while (!writebacks.empty()) {
575 PacketPtr wbPkt = writebacks.front();
576 allocateWriteBuffer(wbPkt, time, true);
577 writebacks.pop_front();
584 // See comment in cache.hh.
585 template<class TagStore>
587 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
590 bool blkValid = blk && blk->isValid();
592 if (cpu_pkt->req->isUncacheable()) {
593 //assert(blk == NULL);
598 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
599 // Writebacks that weren't allocated in access() and upgrades
600 // from upper-level caches that missed completely just go
605 assert(cpu_pkt->needsResponse());
608 // @TODO make useUpgrades a parameter.
609 // Note that ownership protocols require upgrade, otherwise a
610 // write miss on a shared owned block will generate a ReadExcl,
611 // which will clobber the owned copy.
612 const bool useUpgrades = true;
613 if (blkValid && useUpgrades) {
614 // only reason to be here is that blk is shared
615 // (read-only) and we need exclusive
616 assert(needsExclusive && !blk->isWritable());
617 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
620 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
622 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
629 template<class TagStore>
631 Cache<TagStore>::atomicAccess(PacketPtr pkt)
633 int lat = hitLatency;
635 // @TODO: make this a parameter
636 bool last_level_cache = false;
638 if (pkt->memInhibitAsserted()) {
639 assert(!pkt->req->isUncacheable());
640 // have to invalidate ourselves and any lower caches even if
641 // upper cache will be responding
642 if (pkt->isInvalidate()) {
643 BlkType *blk = tags->findBlock(pkt->getAddr());
644 if (blk && blk->isValid()) {
645 tags->invalidateBlk(blk);
646 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
647 pkt->cmdString(), pkt->getAddr());
649 if (!last_level_cache) {
650 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
651 pkt->cmdString(), pkt->getAddr());
652 lat += memSidePort->sendAtomic(pkt);
655 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
656 pkt->cmdString(), pkt->getAddr());
662 // should assert here that there are no outstanding MSHRs or
663 // writebacks... that would mean that someone used an atomic
664 // access in timing mode
667 PacketList writebacks;
669 if (!access(pkt, blk, lat, writebacks)) {
671 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
673 bool is_forward = (bus_pkt == NULL);
676 // just forwarding the same request to the next level
677 // no local cache operation involved
681 DPRINTF(Cache, "Sending an atomic %s for %x\n",
682 bus_pkt->cmdString(), bus_pkt->getAddr());
685 CacheBlk::State old_state = blk ? blk->status : 0;
688 lat += memSidePort->sendAtomic(bus_pkt);
690 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
691 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
693 assert(!bus_pkt->wasNacked());
695 // If packet was a forward, the response (if any) is already
696 // in place in the bus_pkt == pkt structure, so we don't need
697 // to do anything. Otherwise, use the separate bus_pkt to
698 // generate response to pkt and then delete it.
700 if (pkt->needsResponse()) {
701 assert(bus_pkt->isResponse());
702 if (bus_pkt->isError()) {
703 pkt->makeAtomicResponse();
704 pkt->copyError(bus_pkt);
705 } else if (bus_pkt->isRead() ||
706 bus_pkt->cmd == MemCmd::UpgradeResp) {
707 // we're updating cache state to allow us to
708 // satisfy the upstream request from the cache
709 blk = handleFill(bus_pkt, blk, writebacks);
710 satisfyCpuSideRequest(pkt, blk);
712 // we're satisfying the upstream request without
713 // modifying cache state, e.g., a write-through
714 pkt->makeAtomicResponse();
721 // Note that we don't invoke the prefetcher at all in atomic mode.
722 // It's not clear how to do it properly, particularly for
723 // prefetchers that aggressively generate prefetch candidates and
724 // rely on bandwidth contention to throttle them; these will tend
725 // to pollute the cache in atomic mode since there is no bandwidth
726 // contention. If we ever do want to enable prefetching in atomic
727 // mode, though, this is the place to do it... see timingAccess()
728 // for an example (though we'd want to issue the prefetch(es)
729 // immediately rather than calling requestMemSideBus() as we do
732 // Handle writebacks if needed
733 while (!writebacks.empty()){
734 PacketPtr wbPkt = writebacks.front();
735 memSidePort->sendAtomic(wbPkt);
736 writebacks.pop_front();
740 // We now have the block one way or another (hit or completed miss)
742 if (pkt->needsResponse()) {
743 pkt->makeAtomicResponse();
750 template<class TagStore>
752 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
754 Addr blk_addr = blockAlign(pkt->getAddr());
755 BlkType *blk = tags->findBlock(pkt->getAddr());
756 MSHR *mshr = mshrQueue.findMatch(blk_addr);
758 pkt->pushLabel(name());
760 CacheBlkPrintWrapper cbpw(blk);
762 // Note that just because an L2/L3 has valid data doesn't mean an
763 // L1 doesn't have a more up-to-date modified copy that still
764 // needs to be found. As a result we always update the request if
765 // we have it, but only declare it satisfied if we are the owner.
767 // see if we have data at all (owned or otherwise)
768 bool have_data = blk && blk->isValid()
769 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
771 // data we have is dirty if marked as such or if valid & ownership
772 // pending due to outstanding UpgradeReq
774 have_data && (blk->isDirty() ||
775 (mshr && mshr->inService && mshr->isPendingDirty()));
777 bool done = have_dirty
778 || cpuSidePort->checkFunctional(pkt)
779 || mshrQueue.checkFunctional(pkt, blk_addr)
780 || writeBuffer.checkFunctional(pkt, blk_addr)
781 || memSidePort->checkFunctional(pkt);
783 DPRINTF(Cache, "functional %s %x %s%s%s\n",
784 pkt->cmdString(), pkt->getAddr(),
785 (blk && blk->isValid()) ? "valid " : "",
786 have_data ? "data " : "", done ? "done " : "");
788 // We're leaving the cache, so pop cache->name() label
794 // if it came as a request from the CPU side then make sure it
795 // continues towards the memory side
797 memSidePort->sendFunctional(pkt);
798 } else if (forwardSnoops && cpuSidePort->isSnooping()) {
799 // if it came from the memory side, it must be a snoop request
800 // and we should only forward it if we are forwarding snoops
801 cpuSidePort->sendFunctionalSnoop(pkt);
807 /////////////////////////////////////////////////////
809 // Response handling: responses from the memory side
811 /////////////////////////////////////////////////////
814 template<class TagStore>
816 Cache<TagStore>::handleResponse(PacketPtr pkt)
818 Tick time = curTick() + hitLatency;
819 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
820 bool is_error = pkt->isError();
824 if (pkt->wasNacked()) {
825 //pkt->reinitFromRequest();
826 warn("NACKs from devices not connected to the same bus "
827 "not implemented\n");
831 DPRINTF(Cache, "Cache received packet with error for address %x, "
832 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
835 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
837 MSHRQueue *mq = mshr->queue;
838 bool wasFull = mq->isFull();
840 if (mshr == noTargetMSHR) {
841 // we always clear at least one target
842 clearBlocked(Blocked_NoTargets);
846 // Initial target is used just for stats
847 MSHR::Target *initial_tgt = mshr->getTarget();
848 BlkType *blk = tags->findBlock(pkt->getAddr());
849 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
850 Tick miss_latency = curTick() - initial_tgt->recvTime;
851 PacketList writebacks;
853 if (pkt->req->isUncacheable()) {
854 assert(pkt->req->masterId() < system->maxMasters());
855 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
858 assert(pkt->req->masterId() < system->maxMasters());
859 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
863 bool is_fill = !mshr->isForward &&
864 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
866 if (is_fill && !is_error) {
867 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
870 // give mshr a chance to do some dirty work
871 mshr->handleFill(pkt, blk);
873 blk = handleFill(pkt, blk, writebacks);
877 // First offset for critical word first calculations
878 int initial_offset = 0;
880 if (mshr->hasTargets()) {
881 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
884 while (mshr->hasTargets()) {
885 MSHR::Target *target = mshr->getTarget();
887 switch (target->source) {
888 case MSHR::Target::FromCPU:
889 Tick completion_time;
891 satisfyCpuSideRequest(target->pkt, blk,
892 true, mshr->hasPostDowngrade());
893 // How many bytes past the first request is this one
894 int transfer_offset =
895 target->pkt->getOffset(blkSize) - initial_offset;
896 if (transfer_offset < 0) {
897 transfer_offset += blkSize;
900 // If critical word (no offset) return first word time
901 completion_time = tags->getHitLatency() +
902 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
904 assert(!target->pkt->req->isUncacheable());
906 assert(target->pkt->req->masterId() < system->maxMasters());
907 missLatency[target->pkt->cmdToIndex()][target->pkt->req->masterId()] +=
908 completion_time - target->recvTime;
909 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
910 // failed StoreCond upgrade
911 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
912 target->pkt->cmd == MemCmd::StoreCondFailReq ||
913 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
914 completion_time = tags->getHitLatency() + pkt->finishTime;
915 target->pkt->req->setExtraData(0);
917 // not a cache fill, just forwarding response
918 completion_time = tags->getHitLatency() + pkt->finishTime;
919 if (pkt->isRead() && !is_error) {
920 target->pkt->setData(pkt->getPtr<uint8_t>());
923 target->pkt->makeTimingResponse();
924 // if this packet is an error copy that to the new packet
926 target->pkt->copyError(pkt);
927 if (target->pkt->cmd == MemCmd::ReadResp &&
928 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
929 // If intermediate cache got ReadRespWithInvalidate,
930 // propagate that. Response should not have
931 // isInvalidate() set otherwise.
932 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
934 cpuSidePort->respond(target->pkt, completion_time);
937 case MSHR::Target::FromPrefetcher:
938 assert(target->pkt->cmd == MemCmd::HardPFReq);
940 blk->status |= BlkHWPrefetched;
941 delete target->pkt->req;
945 case MSHR::Target::FromSnoop:
946 // I don't believe that a snoop can be in an error state
948 // response to snoop request
949 DPRINTF(Cache, "processing deferred snoop...\n");
950 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
951 handleSnoop(target->pkt, blk, true, true,
952 mshr->hasPostInvalidate());
956 panic("Illegal target->source enum %d\n", target->source);
963 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
964 tags->invalidateBlk(blk);
965 } else if (mshr->hasPostDowngrade()) {
966 blk->status &= ~BlkWritable;
970 if (mshr->promoteDeferredTargets()) {
971 // avoid later read getting stale data while write miss is
972 // outstanding.. see comment in timingAccess()
974 blk->status &= ~BlkReadable;
976 MSHRQueue *mq = mshr->queue;
977 mq->markPending(mshr);
978 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
980 mq->deallocate(mshr);
981 if (wasFull && !mq->isFull()) {
982 clearBlocked((BlockedCause)mq->index);
986 // copy writebacks to write buffer
987 while (!writebacks.empty()) {
988 PacketPtr wbPkt = writebacks.front();
989 allocateWriteBuffer(wbPkt, time, true);
990 writebacks.pop_front();
992 // if we used temp block, clear it out
993 if (blk == tempBlock) {
994 if (blk->isDirty()) {
995 allocateWriteBuffer(writebackBlk(blk), time, true);
997 blk->status &= ~BlkValid;
998 tags->invalidateBlk(blk);
1007 template<class TagStore>
1009 Cache<TagStore>::writebackBlk(BlkType *blk)
1011 assert(blk && blk->isValid() && blk->isDirty());
1013 writebacks[Request::wbMasterId]++;
1015 Request *writebackReq =
1016 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1017 Request::wbMasterId);
1018 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback);
1019 if (blk->isWritable()) {
1020 writeback->setSupplyExclusive();
1022 writeback->allocate();
1023 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1025 blk->status &= ~BlkDirty;
1030 template<class TagStore>
1031 typename Cache<TagStore>::BlkType*
1032 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1034 BlkType *blk = tags->findVictim(addr, writebacks);
1036 if (blk->isValid()) {
1037 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1038 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1040 // must be an outstanding upgrade request on block
1041 // we're about to replace...
1042 assert(!blk->isWritable());
1043 assert(repl_mshr->needsExclusive());
1044 // too hard to replace block with transient state
1045 // allocation failed, block not inserted
1048 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1050 blk->isDirty() ? "writeback" : "clean");
1052 if (blk->isDirty()) {
1053 // Save writeback packet for handling by caller
1054 writebacks.push_back(writebackBlk(blk));
1063 // Note that the reason we return a list of writebacks rather than
1064 // inserting them directly in the write buffer is that this function
1065 // is called by both atomic and timing-mode accesses, and in atomic
1066 // mode we don't mess with the write buffer (we just perform the
1067 // writebacks atomically once the original request is complete).
1068 template<class TagStore>
1069 typename Cache<TagStore>::BlkType*
1070 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1071 PacketList &writebacks)
1073 Addr addr = pkt->getAddr();
1075 CacheBlk::State old_state = blk ? blk->status : 0;
1079 // better have read new data...
1080 assert(pkt->hasData());
1081 // need to do a replacement
1082 blk = allocateBlock(addr, writebacks);
1084 // No replaceable block... just use temporary storage to
1085 // complete the current request and then get rid of it
1086 assert(!tempBlock->isValid());
1088 tempBlock->set = tags->extractSet(addr);
1089 tempBlock->tag = tags->extractTag(addr);
1090 DPRINTF(Cache, "using temp block for %x\n", addr);
1092 int id = pkt->req->masterId();
1093 tags->insertBlock(pkt->getAddr(), blk, id);
1096 // starting from scratch with a new block
1099 // existing block... probably an upgrade
1100 assert(blk->tag == tags->extractTag(addr));
1101 // either we're getting new data or the block should already be valid
1102 assert(pkt->hasData() || blk->isValid());
1103 // don't clear block status... if block is already dirty we
1104 // don't want to lose that
1107 blk->status |= BlkValid | BlkReadable;
1109 if (!pkt->sharedAsserted()) {
1110 blk->status |= BlkWritable;
1111 // If we got this via cache-to-cache transfer (i.e., from a
1112 // cache that was an owner) and took away that owner's copy,
1113 // then we need to write it back. Normally this happens
1114 // anyway as a side effect of getting a copy to write it, but
1115 // there are cases (such as failed store conditionals or
1116 // compare-and-swaps) where we'll demand an exclusive copy but
1117 // end up not writing it.
1118 if (pkt->memInhibitAsserted())
1119 blk->status |= BlkDirty;
1122 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1123 addr, old_state, blk->status);
1125 // if we got new data, copy it in
1126 if (pkt->isRead()) {
1127 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1130 blk->whenReady = pkt->finishTime;
1136 /////////////////////////////////////////////////////
1138 // Snoop path: requests coming in from the memory side
1140 /////////////////////////////////////////////////////
1142 template<class TagStore>
1145 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1146 bool already_copied, bool pending_inval)
1148 // timing-mode snoop responses require a new packet, unless we
1149 // already made a copy...
1150 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1151 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1153 pkt->makeTimingResponse();
1154 if (pkt->isRead()) {
1155 pkt->setDataFromBlock(blk_data, blkSize);
1157 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1158 // Assume we defer a response to a read from a far-away cache
1159 // A, then later defer a ReadExcl from a cache B on the same
1160 // bus as us. We'll assert MemInhibit in both cases, but in
1161 // the latter case MemInhibit will keep the invalidation from
1162 // reaching cache A. This special response tells cache A that
1163 // it gets the block to satisfy its read, but must immediately
1165 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1167 memSidePort->respond(pkt, curTick() + hitLatency);
1170 template<class TagStore>
1172 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1173 bool is_timing, bool is_deferred,
1176 // deferred snoops can only happen in timing mode
1177 assert(!(is_deferred && !is_timing));
1178 // pending_inval only makes sense on deferred snoops
1179 assert(!(pending_inval && !is_deferred));
1180 assert(pkt->isRequest());
1182 // the packet may get modified if we or a forwarded snooper
1183 // responds in atomic mode, so remember a few things about the
1184 // original packet up front
1185 bool invalidate = pkt->isInvalidate();
1186 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1188 if (forwardSnoops) {
1189 // first propagate snoop upward to see if anyone above us wants to
1190 // handle it. save & restore packet src since it will get
1191 // rewritten to be relative to cpu-side bus (if any)
1192 bool alreadyResponded = pkt->memInhibitAsserted();
1194 Packet snoopPkt(pkt, true); // clear flags
1195 snoopPkt.setExpressSnoop();
1196 snoopPkt.senderState = new ForwardResponseRecord(pkt, this);
1197 cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1198 if (snoopPkt.memInhibitAsserted()) {
1199 // cache-to-cache response from some upper cache
1200 assert(!alreadyResponded);
1201 pkt->assertMemInhibit();
1203 delete snoopPkt.senderState;
1205 if (snoopPkt.sharedAsserted()) {
1206 pkt->assertShared();
1209 cpuSidePort->sendAtomicSnoop(pkt);
1210 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1211 // cache-to-cache response from some upper cache:
1212 // forward response to original requester
1213 assert(pkt->isResponse());
1218 if (!blk || !blk->isValid()) {
1222 // we may end up modifying both the block state and the packet (if
1223 // we respond in atomic mode), so just figure out what to do now
1224 // and then do it later
1225 bool respond = blk->isDirty() && pkt->needsResponse();
1226 bool have_exclusive = blk->isWritable();
1228 if (pkt->isRead() && !invalidate) {
1229 assert(!needs_exclusive);
1230 pkt->assertShared();
1231 int bits_to_clear = BlkWritable;
1232 const bool haveOwnershipState = true; // for now
1233 if (!haveOwnershipState) {
1234 // if we don't support pure ownership (dirty && !writable),
1235 // have to clear dirty bit here, assume memory snarfs data
1236 // on cache-to-cache xfer
1237 bits_to_clear |= BlkDirty;
1239 blk->status &= ~bits_to_clear;
1242 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1243 pkt->cmdString(), blockAlign(pkt->getAddr()),
1244 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1247 assert(!pkt->memInhibitAsserted());
1248 pkt->assertMemInhibit();
1249 if (have_exclusive) {
1250 pkt->setSupplyExclusive();
1253 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1255 pkt->makeAtomicResponse();
1256 pkt->setDataFromBlock(blk->data, blkSize);
1258 } else if (is_timing && is_deferred) {
1259 // if it's a deferred timing snoop then we've made a copy of
1260 // the packet, and so if we're not using that copy to respond
1261 // then we need to delete it here.
1265 // Do this last in case it deallocates block data or something
1268 tags->invalidateBlk(blk);
1273 template<class TagStore>
1275 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1277 // Note that some deferred snoops don't have requests, since the
1278 // original access may have already completed
1279 if ((pkt->req && pkt->req->isUncacheable()) ||
1280 pkt->cmd == MemCmd::Writeback) {
1281 //Can't get a hit on an uncacheable address
1282 //Revisit this for multi level coherence
1286 BlkType *blk = tags->findBlock(pkt->getAddr());
1288 Addr blk_addr = blockAlign(pkt->getAddr());
1289 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1291 // Let the MSHR itself track the snoop and decide whether we want
1292 // to go ahead and do the regular cache snoop
1293 if (mshr && mshr->handleSnoop(pkt, order++)) {
1294 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1296 if (mshr->getNumTargets() > numTarget)
1297 warn("allocating bonus target for snoop"); //handle later
1301 //We also need to check the writeback buffers and handle those
1302 std::vector<MSHR *> writebacks;
1303 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1304 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1307 //Look through writebacks for any non-uncachable writes, use that
1308 if (writebacks.size()) {
1309 // We should only ever find a single match
1310 assert(writebacks.size() == 1);
1311 mshr = writebacks[0];
1312 assert(!mshr->isUncacheable());
1313 assert(mshr->getNumTargets() == 1);
1314 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1315 assert(wb_pkt->cmd == MemCmd::Writeback);
1317 assert(!pkt->memInhibitAsserted());
1318 pkt->assertMemInhibit();
1319 if (!pkt->needsExclusive()) {
1320 pkt->assertShared();
1321 // the writeback is no longer the exclusive copy in the system
1322 wb_pkt->clearSupplyExclusive();
1324 // if we're not asserting the shared line, we need to
1325 // invalidate our copy. we'll do that below as long as
1326 // the packet's invalidate flag is set...
1327 assert(pkt->isInvalidate());
1329 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1332 if (pkt->isInvalidate()) {
1333 // Invalidation trumps our writeback... discard here
1334 markInService(mshr);
1337 } // writebacks.size()
1340 // If this was a shared writeback, there may still be
1341 // other shared copies above that require invalidation.
1342 // We could be more selective and return here if the
1343 // request is non-exclusive or if the writeback is
1345 handleSnoop(pkt, blk, true, false, false);
1348 template<class TagStore>
1350 Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
1352 // Express snoop responses from master to slave, e.g., from L1 to L2
1353 cache->timingAccess(pkt);
1357 template<class TagStore>
1359 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1361 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1362 // Can't get a hit on an uncacheable address
1363 // Revisit this for multi level coherence
1367 BlkType *blk = tags->findBlock(pkt->getAddr());
1368 handleSnoop(pkt, blk, false, false, false);
1373 template<class TagStore>
1375 Cache<TagStore>::getNextMSHR()
1377 // Check both MSHR queue and write buffer for potential requests
1378 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1379 MSHR *write_mshr = writeBuffer.getNextMSHR();
1381 // Now figure out which one to send... some cases are easy
1382 if (miss_mshr && !write_mshr) {
1385 if (write_mshr && !miss_mshr) {
1389 if (miss_mshr && write_mshr) {
1390 // We have one of each... normally we favor the miss request
1391 // unless the write buffer is full
1392 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1393 // Write buffer is full, so we'd like to issue a write;
1394 // need to search MSHR queue for conflicting earlier miss.
1395 MSHR *conflict_mshr =
1396 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1398 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1399 // Service misses in order until conflict is cleared.
1400 return conflict_mshr;
1403 // No conflicts; issue write
1407 // Write buffer isn't full, but need to check it for
1408 // conflicting earlier writeback
1409 MSHR *conflict_mshr =
1410 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1411 if (conflict_mshr) {
1412 // not sure why we don't check order here... it was in the
1413 // original code but commented out.
1415 // The only way this happens is if we are
1416 // doing a write and we didn't have permissions
1417 // then subsequently saw a writeback (owned got evicted)
1418 // We need to make sure to perform the writeback first
1419 // To preserve the dirty data, then we can issue the write
1421 // should we return write_mshr here instead? I.e. do we
1422 // have to flush writes in order? I don't think so... not
1423 // for Alpha anyway. Maybe for x86?
1424 return conflict_mshr;
1427 // No conflicts; issue read
1431 // fall through... no pending requests. Try a prefetch.
1432 assert(!miss_mshr && !write_mshr);
1433 if (prefetcher && !mshrQueue.isFull()) {
1434 // If we have a miss queue slot, we can try a prefetch
1435 PacketPtr pkt = prefetcher->getPacket();
1437 Addr pf_addr = blockAlign(pkt->getAddr());
1438 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
1439 !writeBuffer.findMatch(pf_addr)) {
1440 // Update statistic on number of prefetches issued
1441 // (hwpf_mshr_misses)
1442 assert(pkt->req->masterId() < system->maxMasters());
1443 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1444 // Don't request bus, since we already have it
1445 return allocateMissBuffer(pkt, curTick(), false);
1447 // free the request and packet
1458 template<class TagStore>
1460 Cache<TagStore>::getTimingPacket()
1462 MSHR *mshr = getNextMSHR();
1468 // use request from 1st target
1469 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1470 PacketPtr pkt = NULL;
1472 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1473 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1474 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1475 // in MSHR, so now that we are getting around to processing
1476 // it, just treat it as if we got a failure response
1477 pkt = new Packet(tgt_pkt);
1478 pkt->cmd = MemCmd::UpgradeFailResp;
1479 pkt->senderState = mshr;
1480 pkt->firstWordTime = pkt->finishTime = curTick();
1481 handleResponse(pkt);
1483 } else if (mshr->isForwardNoResponse()) {
1484 // no response expected, just forward packet as it is
1485 assert(tags->findBlock(mshr->addr) == NULL);
1488 BlkType *blk = tags->findBlock(mshr->addr);
1490 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1491 // It might be possible for a writeback to arrive between
1492 // the time the prefetch is placed in the MSHRs and when
1493 // it's selected to send... if so, this assert will catch
1494 // that, and then we'll have to figure out what to do.
1495 assert(blk == NULL);
1497 // We need to check the caches above us to verify that they don't have
1498 // a copy of this block in the dirty state at the moment. Without this
1499 // check we could get a stale copy from memory that might get used
1500 // in place of the dirty one.
1501 PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
1502 snoop_pkt->setExpressSnoop();
1503 snoop_pkt->senderState = mshr;
1504 cpuSidePort->sendTimingSnoopReq(snoop_pkt);
1506 if (snoop_pkt->memInhibitAsserted()) {
1507 markInService(mshr, snoop_pkt);
1508 DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
1509 tgt_pkt->getAddr());
1516 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1518 mshr->isForward = (pkt == NULL);
1520 if (mshr->isForward) {
1521 // not a cache block request, but a response is expected
1522 // make copy of current packet to forward, keep current
1523 // copy for response handling
1524 pkt = new Packet(tgt_pkt);
1526 if (pkt->isWrite()) {
1527 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1532 assert(pkt != NULL);
1533 pkt->senderState = mshr;
1538 template<class TagStore>
1540 Cache<TagStore>::nextMSHRReadyTime()
1542 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1543 writeBuffer.nextMSHRReadyTime());
1546 nextReady = std::min(nextReady,
1547 prefetcher->nextPrefetchReadyTime());
1553 template<class TagStore>
1555 Cache<TagStore>::serialize(std::ostream &os)
1557 warn("*** Creating checkpoints with caches is not supported. ***\n");
1558 warn(" Remove any caches before taking checkpoints\n");
1559 warn(" This checkpoint will not restore correctly and dirty data in "
1560 "the cache will be lost!\n");
1562 // Since we don't write back the data dirty in the caches to the physical
1563 // memory if caches exist in the system we won't be able to restore
1564 // from the checkpoint as any data dirty in the caches will be lost.
1566 bool bad_checkpoint = true;
1567 SERIALIZE_SCALAR(bad_checkpoint);
1570 template<class TagStore>
1572 Cache<TagStore>::unserialize(Checkpoint *cp, const std::string §ion)
1574 bool bad_checkpoint;
1575 UNSERIALIZE_SCALAR(bad_checkpoint);
1576 if (bad_checkpoint) {
1577 fatal("Restoring from checkpoints with caches is not supported in the "
1578 "classic memory system. Please remove any caches before taking "
1589 template<class TagStore>
1591 Cache<TagStore>::CpuSidePort::getAddrRanges() const
1593 return cache->getAddrRanges();
1596 template<class TagStore>
1598 Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
1600 // always let inhibited requests through even if blocked
1601 if (!pkt->memInhibitAsserted() && blocked) {
1602 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1603 mustSendRetry = true;
1607 cache->timingAccess(pkt);
1611 template<class TagStore>
1613 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1616 return cache->atomicAccess(pkt);
1619 template<class TagStore>
1621 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1623 // functional request
1624 cache->functionalAccess(pkt, true);
1627 template<class TagStore>
1629 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1630 const std::string &_label)
1631 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
1641 template<class TagStore>
1643 Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
1645 // this needs to be fixed so that the cache updates the mshr and sends the
1646 // packet back out on the link, but it probably won't happen so until this
1647 // gets fixed, just panic when it does
1648 if (pkt->wasNacked())
1649 panic("Need to implement cache resending nacked packets!\n");
1651 cache->handleResponse(pkt);
1655 // Express snooping requests to memside port
1656 template<class TagStore>
1658 Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
1660 // handle snooping requests
1661 cache->snoopTiming(pkt);
1664 template<class TagStore>
1666 Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
1669 return cache->snoopAtomic(pkt);
1672 template<class TagStore>
1674 Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
1676 // functional snoop (note that in contrast to atomic we don't have
1677 // a specific functionalSnoop method, as they have the same
1678 // behaviour regardless)
1679 cache->functionalAccess(pkt, false);
1682 template<class TagStore>
1684 Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
1686 // if we have a response packet waiting we have to start with that
1687 if (deferredPacketReady()) {
1688 // use the normal approach from the timing port
1691 // check for request packets (requests & writebacks)
1692 PacketPtr pkt = cache.getTimingPacket();
1694 // can happen if e.g. we attempt a writeback and fail, but
1695 // before the retry, the writeback is eliminated because
1696 // we snoop another cache's ReadEx.
1697 waitingOnRetry = false;
1699 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1701 waitingOnRetry = !masterPort.sendTimingReq(pkt);
1703 if (waitingOnRetry) {
1704 DPRINTF(CachePort, "now waiting on a retry\n");
1705 if (!mshr->isForwardNoResponse()) {
1706 // we are awaiting a retry, but we
1707 // delete the packet and will be creating a new packet
1708 // when we get the opportunity
1711 // note that we have now masked any requestBus and
1712 // schedSendEvent (we will wait for a retry before
1713 // doing anything), and this is so even if we do not
1714 // care about this packet and might override it before
1717 cache.markInService(mshr, pkt);
1722 // if we succeeded and are not waiting for a retry, schedule the
1723 // next send, not only looking at the response transmit list, but
1724 // also considering when the next MSHR is ready
1725 if (!waitingOnRetry) {
1726 scheduleSend(cache.nextMSHRReadyTime());
1730 template<class TagStore>
1732 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1733 const std::string &_label)
1734 : BaseCache::CacheMasterPort(_name, _cache, _queue),
1735 _queue(*_cache, *this, _label), cache(_cache)