2 * Copyright (c) 2010-2015 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Erik Hallnor
49 #ifndef __MEM_CACHE_CACHE_IMPL_HH__
50 #define __MEM_CACHE_CACHE_IMPL_HH__
57 #include "base/misc.hh"
58 #include "base/types.hh"
59 #include "debug/Cache.hh"
60 #include "debug/CachePort.hh"
61 #include "debug/CacheTags.hh"
62 #include "mem/cache/prefetch/base.hh"
63 #include "mem/cache/blk.hh"
64 #include "mem/cache/cache.hh"
65 #include "mem/cache/mshr.hh"
66 #include "sim/sim_exit.hh"
68 template<class TagStore>
69 Cache<TagStore>::Cache(const Params *p)
71 tags(dynamic_cast<TagStore*>(p->tags)),
72 prefetcher(p->prefetcher),
74 prefetchOnAccess(p->prefetch_on_access)
76 tempBlock = new BlkType();
77 tempBlock->data = new uint8_t[blkSize];
79 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
81 memSidePort = new MemSidePort(p->name + ".mem_side", this,
86 prefetcher->setCache(this);
89 template<class TagStore>
90 Cache<TagStore>::~Cache()
92 delete [] tempBlock->data;
99 template<class TagStore>
101 Cache<TagStore>::regStats()
103 BaseCache::regStats();
106 template<class TagStore>
108 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
110 assert(pkt->isRequest());
112 uint64_t overwrite_val;
114 uint64_t condition_val64;
115 uint32_t condition_val32;
117 int offset = tags->extractBlkOffset(pkt->getAddr());
118 uint8_t *blk_data = blk->data + offset;
120 assert(sizeof(uint64_t) >= pkt->getSize());
122 overwrite_mem = true;
123 // keep a copy of our possible write value, and copy what is at the
124 // memory address into the packet
125 pkt->writeData((uint8_t *)&overwrite_val);
126 pkt->setData(blk_data);
128 if (pkt->req->isCondSwap()) {
129 if (pkt->getSize() == sizeof(uint64_t)) {
130 condition_val64 = pkt->req->getExtraData();
131 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
133 } else if (pkt->getSize() == sizeof(uint32_t)) {
134 condition_val32 = (uint32_t)pkt->req->getExtraData();
135 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
138 panic("Invalid size for conditional read/write\n");
142 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
143 blk->status |= BlkDirty;
148 template<class TagStore>
150 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
151 bool deferred_response,
152 bool pending_downgrade)
154 assert(pkt->isRequest());
156 assert(blk && blk->isValid());
157 // Occasionally this is not true... if we are a lower-level cache
158 // satisfying a string of Read and ReadEx requests from
159 // upper-level caches, a Read will mark the block as shared but we
160 // can satisfy a following ReadEx anyway since we can rely on the
161 // Read requester(s) to have buffered the ReadEx snoop and to
162 // invalidate their blocks after receiving them.
163 // assert(!pkt->needsExclusive() || blk->isWritable());
164 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
166 // Check RMW operations first since both isRead() and
167 // isWrite() will be true for them
168 if (pkt->cmd == MemCmd::SwapReq) {
169 cmpAndSwap(blk, pkt);
170 } else if (pkt->isWrite() &&
171 (!pkt->isWriteInvalidate() || isTopLevel)) {
172 assert(blk->isWritable());
173 // Write or WriteInvalidate at the first cache with block in Exclusive
174 if (blk->checkWrite(pkt)) {
175 pkt->writeDataToBlock(blk->data, blkSize);
177 // Always mark the line as dirty even if we are a failed
178 // StoreCond so we supply data to any snoops that have
179 // appended themselves to this cache before knowing the store
181 blk->status |= BlkDirty;
182 DPRINTF(Cache, "%s for %s address %x size %d (write)\n", __func__,
183 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
184 } else if (pkt->isRead()) {
186 blk->trackLoadLocked(pkt);
188 pkt->setDataFromBlock(blk->data, blkSize);
189 if (pkt->getSize() == blkSize) {
190 // special handling for coherent block requests from
191 // upper-level caches
192 if (pkt->needsExclusive()) {
193 // if we have a dirty copy, make sure the recipient
194 // keeps it marked dirty
195 if (blk->isDirty()) {
196 pkt->assertMemInhibit();
198 // on ReadExReq we give up our copy unconditionally
199 if (blk != tempBlock)
200 tags->invalidate(blk);
202 } else if (blk->isWritable() && !pending_downgrade
203 && !pkt->sharedAsserted() && !pkt->req->isInstFetch()) {
204 // we can give the requester an exclusive copy (by not
205 // asserting shared line) on a read request if:
206 // - we have an exclusive copy at this level (& below)
207 // - we don't have a pending snoop from below
208 // signaling another read request
209 // - no other cache above has a copy (otherwise it
210 // would have asseretd shared line on request)
211 // - we are not satisfying an instruction fetch (this
212 // prevents dirty data in the i-cache)
214 if (blk->isDirty()) {
215 // special considerations if we're owner:
216 if (!deferred_response && !isTopLevel) {
217 // if we are responding immediately and can
218 // signal that we're transferring ownership
219 // along with exclusivity, do so
220 pkt->assertMemInhibit();
221 blk->status &= ~BlkDirty;
223 // if we're responding after our own miss,
224 // there's a window where the recipient didn't
225 // know it was getting ownership and may not
226 // have responded to snoops correctly, so we
227 // can't pass off ownership *or* exclusivity
232 // otherwise only respond with a shared copy
237 // Upgrade or WriteInvalidate at a different cache than received it.
238 // Since we have it Exclusively (E or M), we ack then invalidate.
239 assert(pkt->isUpgrade() ||
240 (pkt->isWriteInvalidate() && !isTopLevel));
241 assert(blk != tempBlock);
242 tags->invalidate(blk);
244 DPRINTF(Cache, "%s for %s address %x size %d (invalidation)\n",
245 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
250 /////////////////////////////////////////////////////
252 // MSHR helper functions
254 /////////////////////////////////////////////////////
257 template<class TagStore>
259 Cache<TagStore>::markInService(MSHR *mshr, bool pending_dirty_resp)
261 markInServiceInternal(mshr, pending_dirty_resp);
263 if (mshr->originalCmd == MemCmd::HardPFReq) {
264 DPRINTF(HWPrefetch, "Marking a HW_PF in service\n");
265 //Also clear pending if need be
266 if (!prefetcher->havePending())
268 deassertMemSideBusRequest(Request_PF);
275 template<class TagStore>
277 Cache<TagStore>::squash(int threadNum)
279 bool unblock = false;
280 BlockedCause cause = NUM_BLOCKED_CAUSES;
282 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
285 cause = Blocked_NoTargets;
287 if (mshrQueue.isFull()) {
289 cause = Blocked_NoMSHRs;
291 mshrQueue.squash(threadNum);
292 if (unblock && !mshrQueue.isFull()) {
297 /////////////////////////////////////////////////////
299 // Access path: requests coming in from the CPU side
301 /////////////////////////////////////////////////////
303 template<class TagStore>
305 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
306 Cycles &lat, PacketList &writebacks)
309 assert(pkt->isRequest());
311 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
312 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
313 if (pkt->req->isUncacheable()) {
314 uncacheableFlush(pkt);
316 // lookupLatency is the latency in case the request is uncacheable.
321 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
322 // Here lat is the value passed as parameter to accessBlock() function
323 // that can modify its value.
324 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id);
326 DPRINTF(Cache, "%s%s %x (%s) %s\n", pkt->cmdString(),
327 pkt->req->isInstFetch() ? " (ifetch)" : "",
328 pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
329 blk ? "hit " + blk->print() : "miss");
331 // Writeback handling is special case. We can write the block into
332 // the cache without having a writeable copy (or any copy at all).
333 if (pkt->cmd == MemCmd::Writeback) {
334 assert(blkSize == pkt->getSize());
336 // need to do a replacement
337 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
339 // no replaceable block available: give up, fwd to next level.
343 tags->insertBlock(pkt, blk);
345 blk->status = (BlkValid | BlkReadable);
346 if (pkt->isSecure()) {
347 blk->status |= BlkSecure;
350 blk->status |= BlkDirty;
351 if (pkt->isSupplyExclusive()) {
352 blk->status |= BlkWritable;
354 // nothing else to do; writeback doesn't expect response
355 assert(!pkt->needsResponse());
356 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
357 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
360 } else if ((blk != NULL) &&
361 (pkt->needsExclusive() ? blk->isWritable()
362 : blk->isReadable())) {
363 // OK to satisfy access
365 satisfyCpuSideRequest(pkt, blk);
369 // Can't satisfy access normally... either no block (blk == NULL)
370 // or have block but need exclusive & only have shared.
374 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
375 // complete miss on store conditional... just give up now
376 pkt->req->setExtraData(0);
384 class ForwardResponseRecord : public Packet::SenderState
388 ForwardResponseRecord() {}
391 template<class TagStore>
393 Cache<TagStore>::recvTimingSnoopResp(PacketPtr pkt)
395 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
396 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
398 assert(pkt->isResponse());
400 // must be cache-to-cache response from upper to lower level
401 ForwardResponseRecord *rec =
402 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
403 assert(!system->bypassCaches());
406 // @todo What guarantee do we have that this HardPFResp is
407 // actually for this cache, and not a cache closer to the
409 assert(pkt->cmd == MemCmd::HardPFResp);
410 // Check if it's a prefetch response and handle it. We shouldn't
411 // get any other kinds of responses without FRRs.
412 DPRINTF(Cache, "Got prefetch response from above for addr %#x (%s)\n",
413 pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
418 pkt->popSenderState();
420 // forwardLatency is set here because there is a response from an
421 // upper level cache.
422 // To pay the delay that occurs if the packet comes from the bus,
423 // we charge also headerDelay.
424 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
425 // Reset the timing of the packet.
426 pkt->headerDelay = pkt->payloadDelay = 0;
427 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
430 template<class TagStore>
432 Cache<TagStore>::promoteWholeLineWrites(PacketPtr pkt)
434 // Cache line clearing instructions
435 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
436 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
437 pkt->cmd = MemCmd::WriteInvalidateReq;
438 DPRINTF(Cache, "packet promoted from Write to WriteInvalidate\n");
439 assert(isTopLevel); // should only happen at L1 or I/O cache
443 template<class TagStore>
445 Cache<TagStore>::recvTimingReq(PacketPtr pkt)
447 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
448 //@todo Add back in MemDebug Calls
449 // MemDebug::cacheAccess(pkt);
452 /// @todo temporary hack to deal with memory corruption issue until
453 /// 4-phase transactions are complete
454 for (int x = 0; x < pendingDelete.size(); x++)
455 delete pendingDelete[x];
456 pendingDelete.clear();
458 assert(pkt->isRequest());
460 // Just forward the packet if caches are disabled.
461 if (system->bypassCaches()) {
462 // @todo This should really enqueue the packet rather
463 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
468 promoteWholeLineWrites(pkt);
470 if (pkt->memInhibitAsserted()) {
471 // a cache above us (but not where the packet came from) is
472 // responding to the request
473 DPRINTF(Cache, "mem inhibited on 0x%x (%s): not responding\n",
474 pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
475 assert(!pkt->req->isUncacheable());
477 // if the packet needs exclusive, and the cache that has
478 // promised to respond (setting the inhibit flag) is not
479 // providing exclusive (it is in O vs M state), we know that
480 // there may be other shared copies in the system; go out and
481 // invalidate them all
482 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
483 // create a downstream express snoop with cleared packet
484 // flags, there is no need to allocate any data as the
485 // packet is merely used to co-ordinate state transitions
486 Packet *snoop_pkt = new Packet(pkt, true, false);
488 // also reset the bus time that the original packet has
490 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
492 // make this an instantaneous express snoop, and let the
493 // other caches in the system know that the packet is
494 // inhibited, because we have found the authorative copy
495 // (O) that will supply the right data
496 snoop_pkt->setExpressSnoop();
497 snoop_pkt->assertMemInhibit();
499 // this express snoop travels towards the memory, and at
500 // every crossbar it is snooped upwards thus reaching
501 // every cache in the system
502 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt);
503 // express snoops always succeed
506 // main memory will delete the packet
509 /// @todo nominally we should just delete the packet here,
510 /// however, until 4-phase stuff we can't because sending
511 /// cache is still relying on it
512 pendingDelete.push_back(pkt);
514 // no need to take any action in this particular cache as the
515 // caches along the path to memory are allowed to keep lines
516 // in a shared state, and a cache above us already committed
521 if (pkt->req->isUncacheable()) {
522 uncacheableFlush(pkt);
524 // writes go in write buffer, reads use MSHR,
525 // prefetches are acknowledged (responded to) and dropped
526 if (pkt->cmd.isPrefetch()) {
527 // prefetching (cache loading) uncacheable data is nonsensical
528 pkt->makeTimingResponse();
529 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize());
530 // We use lookupLatency here because the request is uncacheable.
531 // We pay also for headerDelay that is charged of bus latencies if
532 // the packet comes from the bus.
533 Tick time = clockEdge(lookupLatency) + pkt->headerDelay;
534 // Reset the timing of the packet.
535 pkt->headerDelay = pkt->payloadDelay = 0;
536 cpuSidePort->schedTimingResp(pkt, time);
538 } else if (pkt->isWrite() && !pkt->isRead()) {
539 // We pay also for headerDelay that is charged of bus latencies if
540 // the packet comes from the bus.
541 Tick allocate_wr_buffer_time = clockEdge(forwardLatency) +
543 // Reset the timing of the packet.
544 pkt->headerDelay = pkt->payloadDelay = 0;
545 allocateWriteBuffer(pkt, allocate_wr_buffer_time, true);
547 // We use forwardLatency here because there is an uncached
548 // memory read, allocateded to MSHR queue (it requires the same
549 // time of forwarding to WriteBuffer, in our assumption). It
550 // specifies the latency to allocate an internal buffer and to
551 // schedule an event to the queued port.
552 // We pay also for headerDelay that is charged of bus latencies if
553 // the packet comes from the bus.
554 Tick allocate_rd_buffer_time = clockEdge(forwardLatency) +
556 // Reset the timing of the packet.
557 pkt->headerDelay = pkt->payloadDelay = 0;
558 allocateUncachedReadBuffer(pkt, allocate_rd_buffer_time, true);
560 assert(pkt->needsResponse()); // else we should delete it here??
564 // We use lookupLatency here because it is used to specify the latency
566 Cycles lat = lookupLatency;
568 PacketList writebacks;
569 // Note that lat is passed by reference here. The function access() calls
570 // accessBlock() which can modify lat value.
571 bool satisfied = access(pkt, blk, lat, writebacks);
572 // Here we charge the headerDelay that takes into account the latencies
573 // of the bus, if the packet comes from it.
574 // The latency charged it is just lat that is the value of lookupLatency
575 // modified by access() function, or if not just lookupLatency.
576 // In case of a hit we are neglecting response latency.
577 // In case of a miss we are neglecting forward latency.
578 Tick request_time = clockEdge(lat) + pkt->headerDelay;
579 // Here we condiser forward_time, paying for just forward latency and
580 // also charging the delay provided by the xbar.
581 // forward_time is used in allocateWriteBuffer() function, called
582 // in case of writeback.
583 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
584 // Here we reset the timing of the packet.
585 pkt->headerDelay = pkt->payloadDelay = 0;
587 // track time of availability of next prefetch, if any
588 Tick next_pf_time = MaxTick;
590 bool needsResponse = pkt->needsResponse();
593 // hit (for all other request types)
595 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
597 blk->status &= ~BlkHWPrefetched;
599 // Don't notify on SWPrefetch
600 if (!pkt->cmd.isSWPrefetch())
601 next_pf_time = prefetcher->notify(pkt);
605 pkt->makeTimingResponse();
606 // @todo: Make someone pay for this
607 pkt->headerDelay = pkt->payloadDelay = 0;
609 // In this case we are considering request_time that takes
610 // into account the delay of the xbar, if any, and just
611 // lat, neglecting responseLatency, modelling hit latency
612 // just as lookupLatency or or the value of lat overriden
613 // by access(), that calls accessBlock() function.
614 cpuSidePort->schedTimingResp(pkt, request_time);
616 /// @todo nominally we should just delete the packet here,
617 /// however, until 4-phase stuff we can't because sending
618 /// cache is still relying on it
619 pendingDelete.push_back(pkt);
624 Addr blk_addr = blockAlign(pkt->getAddr());
625 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
627 // Software prefetch handling:
628 // To keep the core from waiting on data it won't look at
629 // anyway, send back a response with dummy data. Miss handling
630 // will continue asynchronously. Unfortunately, the core will
631 // insist upon freeing original Packet/Request, so we have to
632 // create a new pair with a different lifecycle. Note that this
633 // processing happens before any MSHR munging on the behalf of
634 // this request because this new Request will be the one stored
635 // into the MSHRs, not the original.
636 if (pkt->cmd.isSWPrefetch() && isTopLevel) {
637 assert(needsResponse);
638 assert(pkt->req->hasPaddr());
640 // There's no reason to add a prefetch as an additional target
641 // to an existing MSHR. If an outstanding request is already
642 // in progress, there is nothing for the prefetch to do.
643 // If this is the case, we don't even create a request at all.
644 PacketPtr pf = nullptr;
647 // copy the request and create a new SoftPFReq packet
648 RequestPtr req = new Request(pkt->req->getPaddr(),
650 pkt->req->getFlags(),
651 pkt->req->masterId());
652 pf = new Packet(req, pkt->cmd);
654 assert(pf->getAddr() == pkt->getAddr());
655 assert(pf->getSize() == pkt->getSize());
658 pkt->makeTimingResponse();
659 // for debugging, set all the bits in the response data
660 // (also keeps valgrind from complaining when debugging settings
661 // print out instruction results)
662 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize());
663 // request_time is used here, taking into account lat and the delay
664 // charged if the packet comes from the xbar.
665 cpuSidePort->schedTimingResp(pkt, request_time);
667 // If an outstanding request is in progress (we found an
668 // MSHR) this is set to null
674 /// @note writebacks will be checked in getNextMSHR()
675 /// for any conflicting requests to the same block
677 //@todo remove hw_pf here
679 // Coalesce unless it was a software prefetch (see above).
681 assert(pkt->req->masterId() < system->maxMasters());
682 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
683 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
684 mshr->threadNum = -1;
686 // We use forward_time here because it is the same
687 // considering new targets. We have multiple requests for the
688 // same address here. It specifies the latency to allocate an
689 // internal buffer and to schedule an event to the queued
690 // port and also takes into account the additional delay of
692 mshr->allocateTarget(pkt, forward_time, order++);
693 if (mshr->getNumTargets() == numTarget) {
695 setBlocked(Blocked_NoTargets);
696 // need to be careful with this... if this mshr isn't
697 // ready yet (i.e. time > curTick()), we don't want to
698 // move it ahead of mshrs that are ready
699 // mshrQueue.moveToFront(mshr);
702 // We should call the prefetcher reguardless if the request is
703 // satisfied or not, reguardless if the request is in the MSHR or
704 // not. The request could be a ReadReq hit, but still not
705 // satisfied (potentially because of a prior write to the same
706 // cache line. So, even when not satisfied, tehre is an MSHR
707 // already allocated for this, we need to let the prefetcher know
710 // Don't notify on SWPrefetch
711 if (!pkt->cmd.isSWPrefetch())
712 next_pf_time = prefetcher->notify(pkt);
717 assert(pkt->req->masterId() < system->maxMasters());
718 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
719 // always mark as cache fill for now... if we implement
720 // no-write-allocate or bypass accesses this will have to
722 if (pkt->cmd == MemCmd::Writeback) {
723 // We use forward_time here because there is an
724 // uncached memory write, forwarded to WriteBuffer. It
725 // specifies the latency to allocate an internal buffer and to
726 // schedule an event to the queued port and also takes into
727 // account the additional delay of the xbar.
728 allocateWriteBuffer(pkt, forward_time, true);
730 if (blk && blk->isValid()) {
731 // If we have a write miss to a valid block, we
732 // need to mark the block non-readable. Otherwise
733 // if we allow reads while there's an outstanding
734 // write miss, the read could return stale data
735 // out of the cache block... a more aggressive
736 // system could detect the overlap (if any) and
737 // forward data out of the MSHRs, but we don't do
738 // that yet. Note that we do need to leave the
739 // block valid so that it stays in the cache, in
740 // case we get an upgrade response (and hence no
741 // new data) when the write miss completes.
742 // As long as CPUs do proper store/load forwarding
743 // internally, and have a sufficiently weak memory
744 // model, this is probably unnecessary, but at some
745 // point it must have seemed like we needed it...
746 assert(pkt->needsExclusive());
747 assert(!blk->isWritable());
748 blk->status &= ~BlkReadable;
750 // Here we are using forward_time, modelling the latency of
751 // a miss (outbound) just as forwardLatency, neglecting the
752 // lookupLatency component. In this case this latency value
753 // specifies the latency to allocate an internal buffer and to
754 // schedule an event to the queued port, when a cacheable miss
755 // is forwarded to MSHR queue.
756 // We take also into account the additional delay of the xbar.
757 allocateMissBuffer(pkt, forward_time, true);
761 // Don't notify on SWPrefetch
762 if (!pkt->cmd.isSWPrefetch())
763 next_pf_time = prefetcher->notify(pkt);
767 // Here we condiser just forward_time.
768 if (next_pf_time != MaxTick)
769 requestMemSideBus(Request_PF, std::max(clockEdge(forwardLatency),
771 // copy writebacks to write buffer
772 while (!writebacks.empty()) {
773 PacketPtr wbPkt = writebacks.front();
774 // We use forwardLatency here because we are copying writebacks
775 // to write buffer. It specifies the latency to allocate an internal
776 // buffer and to schedule an event to the queued port.
777 allocateWriteBuffer(wbPkt, forward_time, true);
778 writebacks.pop_front();
785 // See comment in cache.hh.
786 template<class TagStore>
788 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
789 bool needsExclusive) const
791 bool blkValid = blk && blk->isValid();
793 if (cpu_pkt->req->isUncacheable()) {
794 //assert(blk == NULL);
799 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
800 // Writebacks that weren't allocated in access() and upgrades
801 // from upper-level caches that missed completely just go
806 assert(cpu_pkt->needsResponse());
809 // @TODO make useUpgrades a parameter.
810 // Note that ownership protocols require upgrade, otherwise a
811 // write miss on a shared owned block will generate a ReadExcl,
812 // which will clobber the owned copy.
813 const bool useUpgrades = true;
814 if (blkValid && useUpgrades) {
815 // only reason to be here is that blk is shared
816 // (read-only) and we need exclusive
817 assert(needsExclusive);
818 assert(!blk->isWritable());
819 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
820 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
821 cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
822 // Even though this SC will fail, we still need to send out the
823 // request and get the data to supply it to other snoopers in the case
824 // where the determination the StoreCond fails is delayed due to
825 // all caches not being on the same local bus.
826 cmd = MemCmd::SCUpgradeFailReq;
827 } else if (cpu_pkt->isWriteInvalidate()) {
831 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
833 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
836 DPRINTF(Cache, "%s created %s address %x size %d\n",
837 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
842 template<class TagStore>
844 Cache<TagStore>::recvAtomic(PacketPtr pkt)
846 // We are in atomic mode so we pay just for lookupLatency here.
847 Cycles lat = lookupLatency;
848 // @TODO: make this a parameter
849 bool last_level_cache = false;
851 // Forward the request if the system is in cache bypass mode.
852 if (system->bypassCaches())
853 return ticksToCycles(memSidePort->sendAtomic(pkt));
855 promoteWholeLineWrites(pkt);
857 if (pkt->memInhibitAsserted()) {
858 assert(!pkt->req->isUncacheable());
859 // have to invalidate ourselves and any lower caches even if
860 // upper cache will be responding
861 if (pkt->isInvalidate()) {
862 BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
863 if (blk && blk->isValid()) {
864 tags->invalidate(blk);
866 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x (%s):"
868 pkt->cmdString(), pkt->getAddr(),
869 pkt->isSecure() ? "s" : "ns");
871 if (!last_level_cache) {
872 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x (%s)\n",
873 pkt->cmdString(), pkt->getAddr(),
874 pkt->isSecure() ? "s" : "ns");
875 lat += ticksToCycles(memSidePort->sendAtomic(pkt));
878 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
879 pkt->cmdString(), pkt->getAddr());
882 return lat * clockPeriod();
885 // should assert here that there are no outstanding MSHRs or
886 // writebacks... that would mean that someone used an atomic
887 // access in timing mode
890 PacketList writebacks;
892 if (!access(pkt, blk, lat, writebacks)) {
895 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
897 bool is_forward = (bus_pkt == NULL);
900 // just forwarding the same request to the next level
901 // no local cache operation involved
905 DPRINTF(Cache, "Sending an atomic %s for %x (%s)\n",
906 bus_pkt->cmdString(), bus_pkt->getAddr(),
907 bus_pkt->isSecure() ? "s" : "ns");
910 CacheBlk::State old_state = blk ? blk->status : 0;
913 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
915 DPRINTF(Cache, "Receive response: %s for addr %x (%s) in state %i\n",
916 bus_pkt->cmdString(), bus_pkt->getAddr(),
917 bus_pkt->isSecure() ? "s" : "ns",
920 // If packet was a forward, the response (if any) is already
921 // in place in the bus_pkt == pkt structure, so we don't need
922 // to do anything. Otherwise, use the separate bus_pkt to
923 // generate response to pkt and then delete it.
925 if (pkt->needsResponse()) {
926 assert(bus_pkt->isResponse());
927 if (bus_pkt->isError()) {
928 pkt->makeAtomicResponse();
929 pkt->copyError(bus_pkt);
930 } else if (pkt->isWriteInvalidate()) {
931 // note the use of pkt, not bus_pkt here.
933 blk = handleFill(pkt, blk, writebacks);
934 satisfyCpuSideRequest(pkt, blk);
936 satisfyCpuSideRequest(pkt, blk);
938 } else if (bus_pkt->isRead() ||
939 bus_pkt->cmd == MemCmd::UpgradeResp) {
940 // we're updating cache state to allow us to
941 // satisfy the upstream request from the cache
942 blk = handleFill(bus_pkt, blk, writebacks);
943 satisfyCpuSideRequest(pkt, blk);
945 // we're satisfying the upstream request without
946 // modifying cache state, e.g., a write-through
947 pkt->makeAtomicResponse();
954 // Note that we don't invoke the prefetcher at all in atomic mode.
955 // It's not clear how to do it properly, particularly for
956 // prefetchers that aggressively generate prefetch candidates and
957 // rely on bandwidth contention to throttle them; these will tend
958 // to pollute the cache in atomic mode since there is no bandwidth
959 // contention. If we ever do want to enable prefetching in atomic
960 // mode, though, this is the place to do it... see timingAccess()
961 // for an example (though we'd want to issue the prefetch(es)
962 // immediately rather than calling requestMemSideBus() as we do
965 // Handle writebacks if needed
966 while (!writebacks.empty()){
967 PacketPtr wbPkt = writebacks.front();
968 memSidePort->sendAtomic(wbPkt);
969 writebacks.pop_front();
973 if (pkt->needsResponse()) {
974 pkt->makeAtomicResponse();
977 return lat * clockPeriod();
981 template<class TagStore>
983 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
985 if (system->bypassCaches()) {
986 // Packets from the memory side are snoop request and
987 // shouldn't happen in bypass mode.
990 // The cache should be flushed if we are in cache bypass mode,
991 // so we don't need to check if we need to update anything.
992 memSidePort->sendFunctional(pkt);
996 Addr blk_addr = blockAlign(pkt->getAddr());
997 bool is_secure = pkt->isSecure();
998 BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure);
999 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1001 pkt->pushLabel(name());
1003 CacheBlkPrintWrapper cbpw(blk);
1005 // Note that just because an L2/L3 has valid data doesn't mean an
1006 // L1 doesn't have a more up-to-date modified copy that still
1007 // needs to be found. As a result we always update the request if
1008 // we have it, but only declare it satisfied if we are the owner.
1010 // see if we have data at all (owned or otherwise)
1011 bool have_data = blk && blk->isValid()
1012 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
1015 // data we have is dirty if marked as such or if valid & ownership
1016 // pending due to outstanding UpgradeReq
1018 have_data && (blk->isDirty() ||
1019 (mshr && mshr->inService && mshr->isPendingDirty()));
1021 bool done = have_dirty
1022 || cpuSidePort->checkFunctional(pkt)
1023 || mshrQueue.checkFunctional(pkt, blk_addr)
1024 || writeBuffer.checkFunctional(pkt, blk_addr)
1025 || memSidePort->checkFunctional(pkt);
1027 DPRINTF(Cache, "functional %s %x (%s) %s%s%s\n",
1028 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns",
1029 (blk && blk->isValid()) ? "valid " : "",
1030 have_data ? "data " : "", done ? "done " : "");
1032 // We're leaving the cache, so pop cache->name() label
1036 pkt->makeResponse();
1038 // if it came as a request from the CPU side then make sure it
1039 // continues towards the memory side
1041 memSidePort->sendFunctional(pkt);
1042 } else if (forwardSnoops && cpuSidePort->isSnooping()) {
1043 // if it came from the memory side, it must be a snoop request
1044 // and we should only forward it if we are forwarding snoops
1045 cpuSidePort->sendFunctionalSnoop(pkt);
1051 /////////////////////////////////////////////////////
1053 // Response handling: responses from the memory side
1055 /////////////////////////////////////////////////////
1058 template<class TagStore>
1060 Cache<TagStore>::recvTimingResp(PacketPtr pkt)
1062 assert(pkt->isResponse());
1064 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1065 bool is_error = pkt->isError();
1070 DPRINTF(Cache, "Cache received packet with error for address %x (%s), "
1071 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
1075 DPRINTF(Cache, "Handling response to %s for address %x (%s)\n",
1076 pkt->cmdString(), pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
1078 MSHRQueue *mq = mshr->queue;
1079 bool wasFull = mq->isFull();
1081 if (mshr == noTargetMSHR) {
1082 // we always clear at least one target
1083 clearBlocked(Blocked_NoTargets);
1084 noTargetMSHR = NULL;
1087 // Initial target is used just for stats
1088 MSHR::Target *initial_tgt = mshr->getTarget();
1089 BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1090 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
1091 Tick miss_latency = curTick() - initial_tgt->recvTime;
1092 PacketList writebacks;
1093 // We need forward_time here because we have a call of
1094 // allocateWriteBuffer() that need this parameter to specify the
1095 // time to request the bus. In this case we use forward latency
1096 // because there is a writeback. We pay also here for headerDelay
1097 // that is charged of bus latencies if the packet comes from the
1099 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1101 if (pkt->req->isUncacheable()) {
1102 assert(pkt->req->masterId() < system->maxMasters());
1103 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1106 assert(pkt->req->masterId() < system->maxMasters());
1107 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
1111 bool is_fill = !mshr->isForward &&
1112 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1114 if (is_fill && !is_error) {
1115 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
1118 // give mshr a chance to do some dirty work
1119 mshr->handleFill(pkt, blk);
1121 blk = handleFill(pkt, blk, writebacks);
1122 assert(blk != NULL);
1125 // First offset for critical word first calculations
1126 int initial_offset = 0;
1128 if (mshr->hasTargets()) {
1129 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
1132 while (mshr->hasTargets()) {
1133 MSHR::Target *target = mshr->getTarget();
1135 switch (target->source) {
1136 case MSHR::Target::FromCPU:
1137 Tick completion_time;
1138 // Here we charge on completion_time the delay of the xbar if the
1139 // packet comes from it, charged on headerDelay.
1140 completion_time = pkt->headerDelay;
1142 // Software prefetch handling for cache closest to core
1143 if (target->pkt->cmd.isSWPrefetch() && isTopLevel) {
1144 // a software prefetch would have already been ack'd immediately
1145 // with dummy data so the core would be able to retire it.
1146 // this request completes right here, so we deallocate it.
1147 delete target->pkt->req;
1149 break; // skip response
1152 // unlike the other packet flows, where data is found in other
1153 // caches or memory and brought back, write invalidates always
1154 // have the data right away, so the above check for "is fill?"
1155 // cannot actually be determined until examining the stored MSHR
1156 // state. We "catch up" with that logic here, which is duplicated
1158 if (target->pkt->isWriteInvalidate() && isTopLevel) {
1161 // NB: we use the original packet here and not the response!
1162 mshr->handleFill(target->pkt, blk);
1163 blk = handleFill(target->pkt, blk, writebacks);
1164 assert(blk != NULL);
1170 satisfyCpuSideRequest(target->pkt, blk,
1171 true, mshr->hasPostDowngrade());
1173 // How many bytes past the first request is this one
1174 int transfer_offset =
1175 target->pkt->getOffset(blkSize) - initial_offset;
1176 if (transfer_offset < 0) {
1177 transfer_offset += blkSize;
1180 // If not critical word (offset) return payloadDelay.
1181 // responseLatency is the latency of the return path
1182 // from lower level caches/memory to an upper level cache or
1184 completion_time += clockEdge(responseLatency) +
1185 (transfer_offset ? pkt->payloadDelay : 0);
1187 assert(!target->pkt->req->isUncacheable());
1189 assert(target->pkt->req->masterId() < system->maxMasters());
1190 missLatency[target->pkt->cmdToIndex()][target->pkt->req->masterId()] +=
1191 completion_time - target->recvTime;
1192 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
1193 // failed StoreCond upgrade
1194 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
1195 target->pkt->cmd == MemCmd::StoreCondFailReq ||
1196 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
1197 // responseLatency is the latency of the return path
1198 // from lower level caches/memory to an upper level cache or
1200 completion_time += clockEdge(responseLatency) +
1202 target->pkt->req->setExtraData(0);
1204 // not a cache fill, just forwarding response
1205 // responseLatency is the latency of the return path
1206 // from lower level cahces/memory to the core.
1207 completion_time += clockEdge(responseLatency) +
1209 if (pkt->isRead() && !is_error) {
1210 target->pkt->setData(pkt->getConstPtr<uint8_t>());
1213 target->pkt->makeTimingResponse();
1214 // if this packet is an error copy that to the new packet
1216 target->pkt->copyError(pkt);
1217 if (target->pkt->cmd == MemCmd::ReadResp &&
1218 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
1219 // If intermediate cache got ReadRespWithInvalidate,
1220 // propagate that. Response should not have
1221 // isInvalidate() set otherwise.
1222 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
1223 DPRINTF(Cache, "%s updated cmd to %s for address %x\n",
1224 __func__, target->pkt->cmdString(),
1225 target->pkt->getAddr());
1227 // Reset the bus additional time as it is now accounted for
1228 target->pkt->headerDelay = target->pkt->payloadDelay = 0;
1229 cpuSidePort->schedTimingResp(target->pkt, completion_time);
1232 case MSHR::Target::FromPrefetcher:
1233 assert(target->pkt->cmd == MemCmd::HardPFReq);
1235 blk->status |= BlkHWPrefetched;
1236 delete target->pkt->req;
1240 case MSHR::Target::FromSnoop:
1241 // I don't believe that a snoop can be in an error state
1243 // response to snoop request
1244 DPRINTF(Cache, "processing deferred snoop...\n");
1245 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
1246 handleSnoop(target->pkt, blk, true, true,
1247 mshr->hasPostInvalidate());
1251 panic("Illegal target->source enum %d\n", target->source);
1257 if (blk && blk->isValid()) {
1258 if ((pkt->isInvalidate() || mshr->hasPostInvalidate()) &&
1259 (!pkt->isWriteInvalidate() || !isTopLevel)) {
1260 assert(blk != tempBlock);
1261 tags->invalidate(blk);
1263 } else if (mshr->hasPostDowngrade()) {
1264 blk->status &= ~BlkWritable;
1268 if (mshr->promoteDeferredTargets()) {
1269 // avoid later read getting stale data while write miss is
1270 // outstanding.. see comment in timingAccess()
1272 blk->status &= ~BlkReadable;
1275 mq->markPending(mshr);
1276 requestMemSideBus((RequestCause)mq->index, clockEdge() +
1279 mq->deallocate(mshr);
1280 if (wasFull && !mq->isFull()) {
1281 clearBlocked((BlockedCause)mq->index);
1284 // Request the bus for a prefetch if this deallocation freed enough
1285 // MSHRs for a prefetch to take place
1286 if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) {
1287 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
1289 if (next_pf_time != MaxTick)
1290 requestMemSideBus(Request_PF, next_pf_time);
1293 // reset the xbar additional timinig as it is now accounted for
1294 pkt->headerDelay = pkt->payloadDelay = 0;
1296 // copy writebacks to write buffer
1297 while (!writebacks.empty()) {
1298 PacketPtr wbPkt = writebacks.front();
1299 allocateWriteBuffer(wbPkt, clockEdge(forwardLatency), true);
1300 writebacks.pop_front();
1302 // if we used temp block, clear it out
1303 if (blk == tempBlock) {
1304 if (blk->isDirty()) {
1305 // We use forwardLatency here because we are copying
1306 // writebacks to write buffer. It specifies the latency to
1307 // allocate an internal buffer and to schedule an event to the
1309 allocateWriteBuffer(writebackBlk(blk), forward_time, true);
1314 DPRINTF(Cache, "Leaving %s with %s for address %x\n", __func__,
1315 pkt->cmdString(), pkt->getAddr());
1322 template<class TagStore>
1324 Cache<TagStore>::writebackBlk(BlkType *blk)
1326 assert(blk && blk->isValid() && blk->isDirty());
1328 writebacks[Request::wbMasterId]++;
1330 Request *writebackReq =
1331 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1332 Request::wbMasterId);
1333 if (blk->isSecure())
1334 writebackReq->setFlags(Request::SECURE);
1336 writebackReq->taskId(blk->task_id);
1337 blk->task_id= ContextSwitchTaskId::Unknown;
1338 blk->tickInserted = curTick();
1340 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback);
1341 if (blk->isWritable()) {
1342 writeback->setSupplyExclusive();
1344 writeback->allocate();
1345 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1347 blk->status &= ~BlkDirty;
1351 template<class TagStore>
1353 Cache<TagStore>::memWriteback()
1355 WrappedBlkVisitor visitor(*this, &Cache<TagStore>::writebackVisitor);
1356 tags->forEachBlk(visitor);
1359 template<class TagStore>
1361 Cache<TagStore>::memInvalidate()
1363 WrappedBlkVisitor visitor(*this, &Cache<TagStore>::invalidateVisitor);
1364 tags->forEachBlk(visitor);
1367 template<class TagStore>
1369 Cache<TagStore>::isDirty() const
1371 CacheBlkIsDirtyVisitor<BlkType> visitor;
1372 tags->forEachBlk(visitor);
1374 return visitor.isDirty();
1377 template<class TagStore>
1379 Cache<TagStore>::writebackVisitor(BlkType &blk)
1381 if (blk.isDirty()) {
1382 assert(blk.isValid());
1384 Request request(tags->regenerateBlkAddr(blk.tag, blk.set),
1385 blkSize, 0, Request::funcMasterId);
1386 request.taskId(blk.task_id);
1388 Packet packet(&request, MemCmd::WriteReq);
1389 packet.dataStatic(blk.data);
1391 memSidePort->sendFunctional(&packet);
1393 blk.status &= ~BlkDirty;
1399 template<class TagStore>
1401 Cache<TagStore>::invalidateVisitor(BlkType &blk)
1405 warn_once("Invalidating dirty cache lines. Expect things to break.\n");
1407 if (blk.isValid()) {
1408 assert(!blk.isDirty());
1409 tags->invalidate(dynamic_cast< BlkType *>(&blk));
1416 template<class TagStore>
1418 Cache<TagStore>::uncacheableFlush(PacketPtr pkt)
1420 DPRINTF(Cache, "%s%s %x uncacheable\n", pkt->cmdString(),
1421 pkt->req->isInstFetch() ? " (ifetch)" : "",
1424 if (pkt->req->isClearLL())
1427 BlkType *blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
1429 writebackVisitor(*blk);
1430 invalidateVisitor(*blk);
1435 template<class TagStore>
1436 typename Cache<TagStore>::BlkType*
1437 Cache<TagStore>::allocateBlock(Addr addr, bool is_secure,
1438 PacketList &writebacks)
1440 BlkType *blk = tags->findVictim(addr);
1442 if (blk->isValid()) {
1443 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1444 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1446 // must be an outstanding upgrade request
1447 // on a block we're about to replace...
1448 assert(!blk->isWritable() || blk->isDirty());
1449 assert(repl_mshr->needsExclusive());
1450 // too hard to replace block with transient state
1451 // allocation failed, block not inserted
1454 DPRINTF(Cache, "replacement: replacing %x (%s) with %x (%s): %s\n",
1455 repl_addr, blk->isSecure() ? "s" : "ns",
1456 addr, is_secure ? "s" : "ns",
1457 blk->isDirty() ? "writeback" : "clean");
1459 if (blk->isDirty()) {
1460 // Save writeback packet for handling by caller
1461 writebacks.push_back(writebackBlk(blk));
1470 // Note that the reason we return a list of writebacks rather than
1471 // inserting them directly in the write buffer is that this function
1472 // is called by both atomic and timing-mode accesses, and in atomic
1473 // mode we don't mess with the write buffer (we just perform the
1474 // writebacks atomically once the original request is complete).
1475 template<class TagStore>
1476 typename Cache<TagStore>::BlkType*
1477 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1478 PacketList &writebacks)
1480 assert(pkt->isResponse() || pkt->isWriteInvalidate());
1481 Addr addr = pkt->getAddr();
1482 bool is_secure = pkt->isSecure();
1484 CacheBlk::State old_state = blk ? blk->status : 0;
1488 // better have read new data...
1489 assert(pkt->hasData());
1491 // only read responses and (original) write invalidate req's have data;
1492 // note that we don't write the data here for write invalidate - that
1493 // happens in the subsequent satisfyCpuSideRequest.
1494 assert(pkt->isRead() || pkt->isWriteInvalidate());
1496 // need to do a replacement
1497 blk = allocateBlock(addr, is_secure, writebacks);
1499 // No replaceable block... just use temporary storage to
1500 // complete the current request and then get rid of it
1501 assert(!tempBlock->isValid());
1503 tempBlock->set = tags->extractSet(addr);
1504 tempBlock->tag = tags->extractTag(addr);
1505 // @todo: set security state as well...
1506 DPRINTF(Cache, "using temp block for %x (%s)\n", addr,
1507 is_secure ? "s" : "ns");
1509 tags->insertBlock(pkt, blk);
1512 // we should never be overwriting a valid block
1513 assert(!blk->isValid());
1515 // existing block... probably an upgrade
1516 assert(blk->tag == tags->extractTag(addr));
1517 // either we're getting new data or the block should already be valid
1518 assert(pkt->hasData() || blk->isValid());
1519 // don't clear block status... if block is already dirty we
1520 // don't want to lose that
1524 blk->status |= BlkSecure;
1525 blk->status |= BlkValid | BlkReadable;
1527 if (!pkt->sharedAsserted()) {
1528 blk->status |= BlkWritable;
1529 // If we got this via cache-to-cache transfer (i.e., from a
1530 // cache that was an owner) and took away that owner's copy,
1531 // then we need to write it back. Normally this happens
1532 // anyway as a side effect of getting a copy to write it, but
1533 // there are cases (such as failed store conditionals or
1534 // compare-and-swaps) where we'll demand an exclusive copy but
1535 // end up not writing it.
1536 if (pkt->memInhibitAsserted())
1537 blk->status |= BlkDirty;
1540 DPRINTF(Cache, "Block addr %x (%s) moving from state %x to %s\n",
1541 addr, is_secure ? "s" : "ns", old_state, blk->print());
1543 // if we got new data, copy it in (checking for a read response
1544 // and a response that has data is the same in the end)
1545 if (pkt->isRead()) {
1546 assert(pkt->hasData());
1547 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
1549 // We pay for fillLatency here.
1550 blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1557 /////////////////////////////////////////////////////
1559 // Snoop path: requests coming in from the memory side
1561 /////////////////////////////////////////////////////
1563 template<class TagStore>
1566 doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1567 bool already_copied, bool pending_inval)
1570 assert(req_pkt->isRequest());
1571 assert(req_pkt->needsResponse());
1573 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
1574 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize());
1575 // timing-mode snoop responses require a new packet, unless we
1576 // already made a copy...
1577 PacketPtr pkt = req_pkt;
1578 if (!already_copied)
1579 // do not clear flags, and allocate space for data if the
1580 // packet needs it (the only packets that carry data are read
1582 pkt = new Packet(req_pkt, false, req_pkt->isRead());
1584 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1585 pkt->makeTimingResponse();
1586 if (pkt->isRead()) {
1587 pkt->setDataFromBlock(blk_data, blkSize);
1589 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1590 // Assume we defer a response to a read from a far-away cache
1591 // A, then later defer a ReadExcl from a cache B on the same
1592 // bus as us. We'll assert MemInhibit in both cases, but in
1593 // the latter case MemInhibit will keep the invalidation from
1594 // reaching cache A. This special response tells cache A that
1595 // it gets the block to satisfy its read, but must immediately
1597 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1599 DPRINTF(Cache, "%s created response: %s address %x size %d\n",
1600 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1601 // Here we condiser forward_time, paying for just forward latency and
1602 // also charging the delay provided by the xbar.
1603 // forward_time is used as send_time in next allocateWriteBuffer().
1604 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1605 // Here we reset the timing of the packet.
1606 pkt->headerDelay = pkt->payloadDelay = 0;
1607 memSidePort->schedTimingSnoopResp(pkt, forward_time);
1610 template<class TagStore>
1612 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1613 bool is_timing, bool is_deferred,
1616 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
1617 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1618 // deferred snoops can only happen in timing mode
1619 assert(!(is_deferred && !is_timing));
1620 // pending_inval only makes sense on deferred snoops
1621 assert(!(pending_inval && !is_deferred));
1622 assert(pkt->isRequest());
1624 // the packet may get modified if we or a forwarded snooper
1625 // responds in atomic mode, so remember a few things about the
1626 // original packet up front
1627 bool invalidate = pkt->isInvalidate();
1628 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1630 if (forwardSnoops) {
1631 // first propagate snoop upward to see if anyone above us wants to
1632 // handle it. save & restore packet src since it will get
1633 // rewritten to be relative to cpu-side bus (if any)
1634 bool alreadyResponded = pkt->memInhibitAsserted();
1636 Packet snoopPkt(pkt, true, false); // clear flags, no allocation
1637 snoopPkt.setExpressSnoop();
1638 snoopPkt.pushSenderState(new ForwardResponseRecord());
1639 // the snoop packet does not need to wait any additional
1641 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1642 cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1643 if (snoopPkt.memInhibitAsserted()) {
1644 // cache-to-cache response from some upper cache
1645 assert(!alreadyResponded);
1646 pkt->assertMemInhibit();
1648 delete snoopPkt.popSenderState();
1650 if (snoopPkt.sharedAsserted()) {
1651 pkt->assertShared();
1653 // If this request is a prefetch and an
1654 // upper level squashes the prefetch request,
1655 // make sure to propogate the squash to the requester.
1656 if (snoopPkt.prefetchSquashed()) {
1657 pkt->setPrefetchSquashed();
1660 cpuSidePort->sendAtomicSnoop(pkt);
1661 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1662 // cache-to-cache response from some upper cache:
1663 // forward response to original requester
1664 assert(pkt->isResponse());
1669 if (!blk || !blk->isValid()) {
1670 DPRINTF(Cache, "%s snoop miss for %s address %x size %d\n",
1671 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1674 DPRINTF(Cache, "%s snoop hit for %s for address %x size %d, "
1675 "old state is %s\n", __func__, pkt->cmdString(),
1676 pkt->getAddr(), pkt->getSize(), blk->print());
1679 // we may end up modifying both the block state and the packet (if
1680 // we respond in atomic mode), so just figure out what to do now
1681 // and then do it later. If we find dirty data while snooping for a
1682 // WriteInvalidate, we don't care, since no merging needs to take place.
1683 // We need the eviction to happen as normal, but the data needn't be
1684 // sent anywhere. nor should the writeback be inhibited at the memory
1685 // controller for any reason.
1686 bool respond = blk->isDirty() && pkt->needsResponse()
1687 && !pkt->isWriteInvalidate();
1688 bool have_exclusive = blk->isWritable();
1690 // Invalidate any prefetch's from below that would strip write permissions
1691 // MemCmd::HardPFReq is only observed by upstream caches. After missing
1692 // above and in it's own cache, a new MemCmd::ReadReq is created that
1693 // downstream caches observe.
1694 if (pkt->cmd == MemCmd::HardPFReq) {
1695 DPRINTF(Cache, "Squashing prefetch from lower cache %#x\n",
1697 pkt->setPrefetchSquashed();
1701 if (pkt->isRead() && !invalidate) {
1702 assert(!needs_exclusive);
1703 pkt->assertShared();
1704 int bits_to_clear = BlkWritable;
1705 const bool haveOwnershipState = true; // for now
1706 if (!haveOwnershipState) {
1707 // if we don't support pure ownership (dirty && !writable),
1708 // have to clear dirty bit here, assume memory snarfs data
1709 // on cache-to-cache xfer
1710 bits_to_clear |= BlkDirty;
1712 blk->status &= ~bits_to_clear;
1716 // prevent anyone else from responding, cache as well as
1717 // memory, and also prevent any memory from even seeing the
1718 // request (with current inhibited semantics), note that this
1719 // applies both to reads and writes and that for writes it
1720 // works thanks to the fact that we still have dirty data and
1721 // will write it back at a later point
1722 pkt->assertMemInhibit();
1723 if (have_exclusive) {
1724 pkt->setSupplyExclusive();
1727 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1729 pkt->makeAtomicResponse();
1730 pkt->setDataFromBlock(blk->data, blkSize);
1732 } else if (is_timing && is_deferred) {
1733 // if it's a deferred timing snoop then we've made a copy of
1734 // the packet, and so if we're not using that copy to respond
1735 // then we need to delete it here.
1739 // Do this last in case it deallocates block data or something
1742 if (blk != tempBlock)
1743 tags->invalidate(blk);
1747 DPRINTF(Cache, "new state is %s\n", blk->print());
1751 template<class TagStore>
1753 Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
1755 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
1756 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1758 // Snoops shouldn't happen when bypassing caches
1759 assert(!system->bypassCaches());
1761 // check if the packet is for an address range covered by this
1762 // cache, partly to not waste time looking for it, but also to
1763 // ensure that we only forward the snoop upwards if it is within
1764 // our address ranges
1765 bool in_range = false;
1766 for (AddrRangeList::const_iterator r = addrRanges.begin();
1767 r != addrRanges.end(); ++r) {
1768 if (r->contains(pkt->getAddr())) {
1774 // Note that some deferred snoops don't have requests, since the
1775 // original access may have already completed
1776 if ((pkt->req && pkt->req->isUncacheable()) ||
1777 pkt->cmd == MemCmd::Writeback || !in_range) {
1778 //Can't get a hit on an uncacheable address
1779 //Revisit this for multi level coherence
1783 bool is_secure = pkt->isSecure();
1784 BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure);
1786 Addr blk_addr = blockAlign(pkt->getAddr());
1787 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1789 // Squash any prefetch requests from below on MSHR hits
1790 if (mshr && pkt->cmd == MemCmd::HardPFReq) {
1791 DPRINTF(Cache, "Squashing prefetch from lower cache on mshr hit %#x\n",
1793 pkt->setPrefetchSquashed();
1797 // Let the MSHR itself track the snoop and decide whether we want
1798 // to go ahead and do the regular cache snoop
1799 if (mshr && mshr->handleSnoop(pkt, order++)) {
1800 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x (%s)."
1801 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1804 if (mshr->getNumTargets() > numTarget)
1805 warn("allocating bonus target for snoop"); //handle later
1809 //We also need to check the writeback buffers and handle those
1810 std::vector<MSHR *> writebacks;
1811 if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) {
1812 DPRINTF(Cache, "Snoop hit in writeback to addr: %x (%s)\n",
1813 pkt->getAddr(), is_secure ? "s" : "ns");
1815 //Look through writebacks for any non-uncachable writes, use that
1816 if (writebacks.size()) {
1817 // We should only ever find a single match
1818 assert(writebacks.size() == 1);
1819 mshr = writebacks[0];
1820 assert(!mshr->isUncacheable());
1821 assert(mshr->getNumTargets() == 1);
1822 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1823 assert(wb_pkt->cmd == MemCmd::Writeback);
1825 assert(!pkt->memInhibitAsserted());
1826 pkt->assertMemInhibit();
1827 if (!pkt->needsExclusive()) {
1828 pkt->assertShared();
1829 // the writeback is no longer the exclusive copy in the system
1830 wb_pkt->clearSupplyExclusive();
1832 // if we're not asserting the shared line, we need to
1833 // invalidate our copy. we'll do that below as long as
1834 // the packet's invalidate flag is set...
1835 assert(pkt->isInvalidate());
1837 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1840 if (pkt->isInvalidate()) {
1841 // Invalidation trumps our writeback... discard here
1842 markInService(mshr, false);
1845 } // writebacks.size()
1848 // If this was a shared writeback, there may still be
1849 // other shared copies above that require invalidation.
1850 // We could be more selective and return here if the
1851 // request is non-exclusive or if the writeback is
1853 handleSnoop(pkt, blk, true, false, false);
1856 template<class TagStore>
1858 Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
1860 // Express snoop responses from master to slave, e.g., from L1 to L2
1861 cache->recvTimingSnoopResp(pkt);
1865 template<class TagStore>
1867 Cache<TagStore>::recvAtomicSnoop(PacketPtr pkt)
1869 // Snoops shouldn't happen when bypassing caches
1870 assert(!system->bypassCaches());
1872 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1873 // Can't get a hit on an uncacheable address
1874 // Revisit this for multi level coherence
1878 BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1879 handleSnoop(pkt, blk, false, false, false);
1880 // We consider forwardLatency here because a snoop occurs in atomic mode
1881 return forwardLatency * clockPeriod();
1885 template<class TagStore>
1887 Cache<TagStore>::getNextMSHR()
1889 // Check both MSHR queue and write buffer for potential requests
1890 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1891 MSHR *write_mshr = writeBuffer.getNextMSHR();
1893 // Now figure out which one to send... some cases are easy
1894 if (miss_mshr && !write_mshr) {
1897 if (write_mshr && !miss_mshr) {
1901 if (miss_mshr && write_mshr) {
1902 // We have one of each... normally we favor the miss request
1903 // unless the write buffer is full
1904 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1905 // Write buffer is full, so we'd like to issue a write;
1906 // need to search MSHR queue for conflicting earlier miss.
1907 MSHR *conflict_mshr =
1908 mshrQueue.findPending(write_mshr->addr, write_mshr->size,
1909 write_mshr->isSecure);
1911 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1912 // Service misses in order until conflict is cleared.
1913 return conflict_mshr;
1916 // No conflicts; issue write
1920 // Write buffer isn't full, but need to check it for
1921 // conflicting earlier writeback
1922 MSHR *conflict_mshr =
1923 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size,
1924 miss_mshr->isSecure);
1925 if (conflict_mshr) {
1926 // not sure why we don't check order here... it was in the
1927 // original code but commented out.
1929 // The only way this happens is if we are
1930 // doing a write and we didn't have permissions
1931 // then subsequently saw a writeback (owned got evicted)
1932 // We need to make sure to perform the writeback first
1933 // To preserve the dirty data, then we can issue the write
1935 // should we return write_mshr here instead? I.e. do we
1936 // have to flush writes in order? I don't think so... not
1937 // for Alpha anyway. Maybe for x86?
1938 return conflict_mshr;
1941 // No conflicts; issue read
1945 // fall through... no pending requests. Try a prefetch.
1946 assert(!miss_mshr && !write_mshr);
1947 if (prefetcher && mshrQueue.canPrefetch()) {
1948 // If we have a miss queue slot, we can try a prefetch
1949 PacketPtr pkt = prefetcher->getPacket();
1951 Addr pf_addr = blockAlign(pkt->getAddr());
1952 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
1953 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
1954 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
1955 // Update statistic on number of prefetches issued
1956 // (hwpf_mshr_misses)
1957 assert(pkt->req->masterId() < system->maxMasters());
1958 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1959 // Don't request bus, since we already have it
1960 return allocateMissBuffer(pkt, curTick(), false);
1962 // free the request and packet
1973 template<class TagStore>
1975 Cache<TagStore>::getTimingPacket()
1977 MSHR *mshr = getNextMSHR();
1983 // use request from 1st target
1984 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1985 PacketPtr pkt = NULL;
1987 DPRINTF(CachePort, "%s %s for address %x size %d\n", __func__,
1988 tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize());
1990 if (mshr->isForwardNoResponse()) {
1991 // no response expected, just forward packet as it is
1992 assert(tags->findBlock(mshr->addr, mshr->isSecure) == NULL);
1995 BlkType *blk = tags->findBlock(mshr->addr, mshr->isSecure);
1997 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1998 // We need to check the caches above us to verify that
1999 // they don't have a copy of this block in the dirty state
2000 // at the moment. Without this check we could get a stale
2001 // copy from memory that might get used in place of the
2003 Packet snoop_pkt(tgt_pkt, true, false);
2004 snoop_pkt.setExpressSnoop();
2005 snoop_pkt.senderState = mshr;
2006 cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2008 // Check to see if the prefetch was squashed by an upper cache (to
2009 // prevent us from grabbing the line) or if a Check to see if a
2010 // writeback arrived between the time the prefetch was placed in
2011 // the MSHRs and when it was selected to be sent or if the
2012 // prefetch was squashed by an upper cache.
2014 // It is important to check msmInhibitAsserted before
2015 // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it
2016 // will be sending a response which will arrive at the MSHR
2017 // allocated ofr this request. Checking the prefetchSquash first
2018 // may result in the MSHR being prematurely deallocated.
2020 if (snoop_pkt.memInhibitAsserted()) {
2021 // If we are getting a non-shared response it is dirty
2022 bool pending_dirty_resp = !snoop_pkt.sharedAsserted();
2023 markInService(mshr, pending_dirty_resp);
2024 DPRINTF(Cache, "Upward snoop of prefetch for addr"
2026 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
2030 if (snoop_pkt.prefetchSquashed() || blk != NULL) {
2031 DPRINTF(Cache, "Prefetch squashed by cache. "
2032 "Deallocating mshr target %#x.\n", mshr->addr);
2034 // Deallocate the mshr target
2035 if (mshr->queue->forceDeallocateTarget(mshr)) {
2036 // Clear block if this deallocation resulted freed an
2037 // mshr when all had previously been utilized
2038 clearBlocked((BlockedCause)(mshr->queue->index));
2045 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
2047 mshr->isForward = (pkt == NULL);
2049 if (mshr->isForward) {
2050 // not a cache block request, but a response is expected
2051 // make copy of current packet to forward, keep current
2052 // copy for response handling
2053 pkt = new Packet(tgt_pkt, false, true);
2054 if (pkt->isWrite()) {
2055 pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
2060 assert(pkt != NULL);
2061 pkt->senderState = mshr;
2066 template<class TagStore>
2068 Cache<TagStore>::nextMSHRReadyTime() const
2070 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
2071 writeBuffer.nextMSHRReadyTime());
2073 // Don't signal prefetch ready time if no MSHRs available
2074 // Will signal once enoguh MSHRs are deallocated
2075 if (prefetcher && mshrQueue.canPrefetch()) {
2076 nextReady = std::min(nextReady,
2077 prefetcher->nextPrefetchReadyTime());
2083 template<class TagStore>
2085 Cache<TagStore>::serialize(std::ostream &os)
2087 bool dirty(isDirty());
2090 warn("*** The cache still contains dirty data. ***\n");
2091 warn(" Make sure to drain the system using the correct flags.\n");
2092 warn(" This checkpoint will not restore correctly and dirty data in "
2093 "the cache will be lost!\n");
2096 // Since we don't checkpoint the data in the cache, any dirty data
2097 // will be lost when restoring from a checkpoint of a system that
2098 // wasn't drained properly. Flag the checkpoint as invalid if the
2099 // cache contains dirty data.
2100 bool bad_checkpoint(dirty);
2101 SERIALIZE_SCALAR(bad_checkpoint);
2104 template<class TagStore>
2106 Cache<TagStore>::unserialize(Checkpoint *cp, const std::string §ion)
2108 bool bad_checkpoint;
2109 UNSERIALIZE_SCALAR(bad_checkpoint);
2110 if (bad_checkpoint) {
2111 fatal("Restoring from checkpoints with dirty caches is not supported "
2112 "in the classic memory system. Please remove any caches or "
2113 " drain them properly before taking checkpoints.\n");
2123 template<class TagStore>
2125 Cache<TagStore>::CpuSidePort::getAddrRanges() const
2127 return cache->getAddrRanges();
2130 template<class TagStore>
2132 Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
2134 assert(!cache->system->bypassCaches());
2136 bool success = false;
2138 // always let inhibited requests through, even if blocked,
2139 // ultimately we should check if this is an express snoop, but at
2140 // the moment that flag is only set in the cache itself
2141 if (pkt->memInhibitAsserted()) {
2142 // do not change the current retry state
2143 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
2144 assert(bypass_success);
2146 } else if (blocked || mustSendRetry) {
2147 // either already committed to send a retry, or blocked
2150 // pass it on to the cache, and let the cache decide if we
2151 // have to retry or not
2152 success = cache->recvTimingReq(pkt);
2155 // remember if we have to retry
2156 mustSendRetry = !success;
2160 template<class TagStore>
2162 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
2164 return cache->recvAtomic(pkt);
2167 template<class TagStore>
2169 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
2171 // functional request
2172 cache->functionalAccess(pkt, true);
2175 template<class TagStore>
2177 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
2178 const std::string &_label)
2179 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
2189 template<class TagStore>
2191 Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
2193 cache->recvTimingResp(pkt);
2197 // Express snooping requests to memside port
2198 template<class TagStore>
2200 Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2202 // handle snooping requests
2203 cache->recvTimingSnoopReq(pkt);
2206 template<class TagStore>
2208 Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2210 return cache->recvAtomicSnoop(pkt);
2213 template<class TagStore>
2215 Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2217 // functional snoop (note that in contrast to atomic we don't have
2218 // a specific functionalSnoop method, as they have the same
2219 // behaviour regardless)
2220 cache->functionalAccess(pkt, false);
2223 template<class TagStore>
2225 Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket()
2228 assert(!waitingOnRetry);
2230 // there should never be any deferred request packets in the
2231 // queue, instead we resly on the cache to provide the packets
2232 // from the MSHR queue or write queue
2233 assert(deferredPacketReadyTime() == MaxTick);
2235 // check for request packets (requests & writebacks)
2236 PacketPtr pkt = cache.getTimingPacket();
2238 // can happen if e.g. we attempt a writeback and fail, but
2239 // before the retry, the writeback is eliminated because
2240 // we snoop another cache's ReadEx.
2242 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
2243 // in most cases getTimingPacket allocates a new packet, and
2244 // we must delete it unless it is successfully sent
2245 bool delete_pkt = !mshr->isForwardNoResponse();
2247 // let our snoop responses go first if there are responses to
2248 // the same addresses we are about to writeback, note that
2249 // this creates a dependency between requests and snoop
2250 // responses, but that should not be a problem since there is
2251 // a chain already and the key is that the snoop responses can
2252 // sink unconditionally
2253 if (snoopRespQueue.hasAddr(pkt->getAddr())) {
2254 DPRINTF(CachePort, "Waiting for snoop response to be sent\n");
2255 Tick when = snoopRespQueue.deferredPacketReadyTime();
2256 schedSendEvent(when);
2265 waitingOnRetry = !masterPort.sendTimingReq(pkt);
2267 if (waitingOnRetry) {
2268 DPRINTF(CachePort, "now waiting on a retry\n");
2270 // we are awaiting a retry, but we
2271 // delete the packet and will be creating a new packet
2272 // when we get the opportunity
2275 // note that we have now masked any requestBus and
2276 // schedSendEvent (we will wait for a retry before
2277 // doing anything), and this is so even if we do not
2278 // care about this packet and might override it before
2281 // As part of the call to sendTimingReq the packet is
2282 // forwarded to all neighbouring caches (and any
2283 // caches above them) as a snoop. The packet is also
2284 // sent to any potential cache below as the
2285 // interconnect is not allowed to buffer the
2286 // packet. Thus at this point we know if any of the
2287 // neighbouring, or the downstream cache is
2288 // responding, and if so, if it is with a dirty line
2290 bool pending_dirty_resp = !pkt->sharedAsserted() &&
2291 pkt->memInhibitAsserted();
2293 cache.markInService(mshr, pending_dirty_resp);
2297 // if we succeeded and are not waiting for a retry, schedule the
2298 // next send considering when the next MSHR is ready, note that
2299 // snoop responses have their own packet queue and thus schedule
2301 if (!waitingOnRetry) {
2302 schedSendEvent(cache.nextMSHRReadyTime());
2306 template<class TagStore>
2308 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
2309 const std::string &_label)
2310 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2311 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2312 _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2316 #endif//__MEM_CACHE_CACHE_IMPL_HH__