sim: Remove FastAlloc
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 */
47
48 /**
49 * @file
50 * Cache definitions.
51 */
52
53 #include "base/misc.hh"
54 #include "base/range.hh"
55 #include "base/types.hh"
56 #include "debug/Cache.hh"
57 #include "debug/CachePort.hh"
58 #include "mem/cache/prefetch/base.hh"
59 #include "mem/cache/blk.hh"
60 #include "mem/cache/cache.hh"
61 #include "mem/cache/mshr.hh"
62 #include "sim/sim_exit.hh"
63
64 template<class TagStore>
65 Cache<TagStore>::Cache(const Params *p, TagStore *tags)
66 : BaseCache(p),
67 tags(tags),
68 prefetcher(p->prefetcher),
69 doFastWrites(true),
70 prefetchOnAccess(p->prefetch_on_access)
71 {
72 tempBlock = new BlkType();
73 tempBlock->data = new uint8_t[blkSize];
74
75 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
76 "CpuSidePort");
77 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
78 "MemSidePort");
79
80 tags->setCache(this);
81 if (prefetcher)
82 prefetcher->setCache(this);
83 }
84
85 template<class TagStore>
86 void
87 Cache<TagStore>::regStats()
88 {
89 BaseCache::regStats();
90 tags->regStats(name());
91 }
92
93 template<class TagStore>
94 void
95 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
96 {
97 uint64_t overwrite_val;
98 bool overwrite_mem;
99 uint64_t condition_val64;
100 uint32_t condition_val32;
101
102 int offset = tags->extractBlkOffset(pkt->getAddr());
103 uint8_t *blk_data = blk->data + offset;
104
105 assert(sizeof(uint64_t) >= pkt->getSize());
106
107 overwrite_mem = true;
108 // keep a copy of our possible write value, and copy what is at the
109 // memory address into the packet
110 pkt->writeData((uint8_t *)&overwrite_val);
111 pkt->setData(blk_data);
112
113 if (pkt->req->isCondSwap()) {
114 if (pkt->getSize() == sizeof(uint64_t)) {
115 condition_val64 = pkt->req->getExtraData();
116 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
117 sizeof(uint64_t));
118 } else if (pkt->getSize() == sizeof(uint32_t)) {
119 condition_val32 = (uint32_t)pkt->req->getExtraData();
120 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
121 sizeof(uint32_t));
122 } else
123 panic("Invalid size for conditional read/write\n");
124 }
125
126 if (overwrite_mem) {
127 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
128 blk->status |= BlkDirty;
129 }
130 }
131
132
133 template<class TagStore>
134 void
135 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
136 bool deferred_response,
137 bool pending_downgrade)
138 {
139 assert(blk && blk->isValid());
140 // Occasionally this is not true... if we are a lower-level cache
141 // satisfying a string of Read and ReadEx requests from
142 // upper-level caches, a Read will mark the block as shared but we
143 // can satisfy a following ReadEx anyway since we can rely on the
144 // Read requester(s) to have buffered the ReadEx snoop and to
145 // invalidate their blocks after receiving them.
146 // assert(!pkt->needsExclusive() || blk->isWritable());
147 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
148
149 // Check RMW operations first since both isRead() and
150 // isWrite() will be true for them
151 if (pkt->cmd == MemCmd::SwapReq) {
152 cmpAndSwap(blk, pkt);
153 } else if (pkt->isWrite()) {
154 if (blk->checkWrite(pkt)) {
155 pkt->writeDataToBlock(blk->data, blkSize);
156 blk->status |= BlkDirty;
157 }
158 } else if (pkt->isRead()) {
159 if (pkt->isLLSC()) {
160 blk->trackLoadLocked(pkt);
161 }
162 pkt->setDataFromBlock(blk->data, blkSize);
163 if (pkt->getSize() == blkSize) {
164 // special handling for coherent block requests from
165 // upper-level caches
166 if (pkt->needsExclusive()) {
167 // if we have a dirty copy, make sure the recipient
168 // keeps it marked dirty
169 if (blk->isDirty()) {
170 pkt->assertMemInhibit();
171 }
172 // on ReadExReq we give up our copy unconditionally
173 tags->invalidateBlk(blk);
174 } else if (blk->isWritable() && !pending_downgrade
175 && !pkt->sharedAsserted()) {
176 // we can give the requester an exclusive copy (by not
177 // asserting shared line) on a read request if:
178 // - we have an exclusive copy at this level (& below)
179 // - we don't have a pending snoop from below
180 // signaling another read request
181 // - no other cache above has a copy (otherwise it
182 // would have asseretd shared line on request)
183
184 if (blk->isDirty()) {
185 // special considerations if we're owner:
186 if (!deferred_response && !isTopLevel) {
187 // if we are responding immediately and can
188 // signal that we're transferring ownership
189 // along with exclusivity, do so
190 pkt->assertMemInhibit();
191 blk->status &= ~BlkDirty;
192 } else {
193 // if we're responding after our own miss,
194 // there's a window where the recipient didn't
195 // know it was getting ownership and may not
196 // have responded to snoops correctly, so we
197 // can't pass off ownership *or* exclusivity
198 pkt->assertShared();
199 }
200 }
201 } else {
202 // otherwise only respond with a shared copy
203 pkt->assertShared();
204 }
205 }
206 } else {
207 // Not a read or write... must be an upgrade. it's OK
208 // to just ack those as long as we have an exclusive
209 // copy at this level.
210 assert(pkt->isUpgrade());
211 tags->invalidateBlk(blk);
212 }
213 }
214
215
216 /////////////////////////////////////////////////////
217 //
218 // MSHR helper functions
219 //
220 /////////////////////////////////////////////////////
221
222
223 template<class TagStore>
224 void
225 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
226 {
227 markInServiceInternal(mshr, pkt);
228 #if 0
229 if (mshr->originalCmd == MemCmd::HardPFReq) {
230 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
231 name());
232 //Also clear pending if need be
233 if (!prefetcher->havePending())
234 {
235 deassertMemSideBusRequest(Request_PF);
236 }
237 }
238 #endif
239 }
240
241
242 template<class TagStore>
243 void
244 Cache<TagStore>::squash(int threadNum)
245 {
246 bool unblock = false;
247 BlockedCause cause = NUM_BLOCKED_CAUSES;
248
249 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
250 noTargetMSHR = NULL;
251 unblock = true;
252 cause = Blocked_NoTargets;
253 }
254 if (mshrQueue.isFull()) {
255 unblock = true;
256 cause = Blocked_NoMSHRs;
257 }
258 mshrQueue.squash(threadNum);
259 if (unblock && !mshrQueue.isFull()) {
260 clearBlocked(cause);
261 }
262 }
263
264 /////////////////////////////////////////////////////
265 //
266 // Access path: requests coming in from the CPU side
267 //
268 /////////////////////////////////////////////////////
269
270 template<class TagStore>
271 bool
272 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
273 int &lat, PacketList &writebacks)
274 {
275 if (pkt->req->isUncacheable()) {
276 if (pkt->req->isClearLL()) {
277 tags->clearLocks();
278 } else {
279 blk = tags->findBlock(pkt->getAddr());
280 if (blk != NULL) {
281 tags->invalidateBlk(blk);
282 }
283 }
284
285 blk = NULL;
286 lat = hitLatency;
287 return false;
288 }
289
290 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
291 blk = tags->accessBlock(pkt->getAddr(), lat, id);
292
293 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
294 pkt->req->isInstFetch() ? " (ifetch)" : "",
295 pkt->getAddr(), (blk) ? "hit" : "miss");
296
297 if (blk != NULL) {
298
299 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
300 // OK to satisfy access
301 incHitCount(pkt);
302 satisfyCpuSideRequest(pkt, blk);
303 return true;
304 }
305 }
306
307 // Can't satisfy access normally... either no block (blk == NULL)
308 // or have block but need exclusive & only have shared.
309
310 // Writeback handling is special case. We can write the block
311 // into the cache without having a writeable copy (or any copy at
312 // all).
313 if (pkt->cmd == MemCmd::Writeback) {
314 assert(blkSize == pkt->getSize());
315 if (blk == NULL) {
316 // need to do a replacement
317 blk = allocateBlock(pkt->getAddr(), writebacks);
318 if (blk == NULL) {
319 // no replaceable block available, give up.
320 // writeback will be forwarded to next level.
321 incMissCount(pkt);
322 return false;
323 }
324 int id = pkt->req->masterId();
325 tags->insertBlock(pkt->getAddr(), blk, id);
326 blk->status = BlkValid | BlkReadable;
327 }
328 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
329 blk->status |= BlkDirty;
330 if (pkt->isSupplyExclusive()) {
331 blk->status |= BlkWritable;
332 }
333 // nothing else to do; writeback doesn't expect response
334 assert(!pkt->needsResponse());
335 incHitCount(pkt);
336 return true;
337 }
338
339 incMissCount(pkt);
340
341 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
342 // complete miss on store conditional... just give up now
343 pkt->req->setExtraData(0);
344 return true;
345 }
346
347 return false;
348 }
349
350
351 class ForwardResponseRecord : public Packet::SenderState
352 {
353 Packet::SenderState *prevSenderState;
354 PortID prevSrc;
355 #ifndef NDEBUG
356 BaseCache *cache;
357 #endif
358 public:
359 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
360 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
361 #ifndef NDEBUG
362 , cache(_cache)
363 #endif
364 {}
365 void restore(Packet *pkt, BaseCache *_cache)
366 {
367 assert(_cache == cache);
368 pkt->senderState = prevSenderState;
369 pkt->setDest(prevSrc);
370 }
371 };
372
373
374 template<class TagStore>
375 bool
376 Cache<TagStore>::timingAccess(PacketPtr pkt)
377 {
378 //@todo Add back in MemDebug Calls
379 // MemDebug::cacheAccess(pkt);
380
381 // we charge hitLatency for doing just about anything here
382 Tick time = curTick() + hitLatency;
383
384 if (pkt->isResponse()) {
385 // must be cache-to-cache response from upper to lower level
386 ForwardResponseRecord *rec =
387 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
388
389 if (rec == NULL) {
390 assert(pkt->cmd == MemCmd::HardPFResp);
391 // Check if it's a prefetch response and handle it. We shouldn't
392 // get any other kinds of responses without FRRs.
393 DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
394 pkt->getAddr());
395 handleResponse(pkt);
396 return true;
397 }
398
399 rec->restore(pkt, this);
400 delete rec;
401 memSidePort->respond(pkt, time);
402 return true;
403 }
404
405 assert(pkt->isRequest());
406
407 if (pkt->memInhibitAsserted()) {
408 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
409 pkt->getAddr());
410 assert(!pkt->req->isUncacheable());
411 // Special tweak for multilevel coherence: snoop downward here
412 // on invalidates since there may be other caches below here
413 // that have shared copies. Not necessary if we know that
414 // supplier had exclusive copy to begin with.
415 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
416 Packet *snoopPkt = new Packet(pkt, true); // clear flags
417 snoopPkt->setExpressSnoop();
418 snoopPkt->assertMemInhibit();
419 memSidePort->sendTimingReq(snoopPkt);
420 // main memory will delete snoopPkt
421 }
422 // since we're the official target but we aren't responding,
423 // delete the packet now.
424 delete pkt;
425 return true;
426 }
427
428 if (pkt->req->isUncacheable()) {
429 if (pkt->req->isClearLL()) {
430 tags->clearLocks();
431 } else {
432 BlkType *blk = tags->findBlock(pkt->getAddr());
433 if (blk != NULL) {
434 tags->invalidateBlk(blk);
435 }
436 }
437
438 // writes go in write buffer, reads use MSHR
439 if (pkt->isWrite() && !pkt->isRead()) {
440 allocateWriteBuffer(pkt, time, true);
441 } else {
442 allocateUncachedReadBuffer(pkt, time, true);
443 }
444 assert(pkt->needsResponse()); // else we should delete it here??
445 return true;
446 }
447
448 int lat = hitLatency;
449 BlkType *blk = NULL;
450 PacketList writebacks;
451
452 bool satisfied = access(pkt, blk, lat, writebacks);
453
454 #if 0
455 /** @todo make the fast write alloc (wh64) work with coherence. */
456
457 // If this is a block size write/hint (WH64) allocate the block here
458 // if the coherence protocol allows it.
459 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
460 (pkt->cmd == MemCmd::WriteReq
461 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
462 // not outstanding misses, can do this
463 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
464 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
465 if (outstanding_miss) {
466 warn("WriteInv doing a fastallocate"
467 "with an outstanding miss to the same address\n");
468 }
469 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
470 writebacks);
471 ++fastWrites;
472 }
473 }
474 #endif
475
476 // track time of availability of next prefetch, if any
477 Tick next_pf_time = 0;
478
479 bool needsResponse = pkt->needsResponse();
480
481 if (satisfied) {
482 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
483 if (blk)
484 blk->status &= ~BlkHWPrefetched;
485 next_pf_time = prefetcher->notify(pkt, time);
486 }
487
488 if (needsResponse) {
489 pkt->makeTimingResponse();
490 cpuSidePort->respond(pkt, curTick()+lat);
491 } else {
492 delete pkt;
493 }
494 } else {
495 // miss
496
497 Addr blk_addr = blockAlign(pkt->getAddr());
498 MSHR *mshr = mshrQueue.findMatch(blk_addr);
499
500 if (mshr) {
501 // MSHR hit
502 //@todo remove hw_pf here
503 assert(pkt->req->masterId() < system->maxMasters());
504 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
505 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
506 mshr->threadNum = -1;
507 }
508 mshr->allocateTarget(pkt, time, order++);
509 if (mshr->getNumTargets() == numTarget) {
510 noTargetMSHR = mshr;
511 setBlocked(Blocked_NoTargets);
512 // need to be careful with this... if this mshr isn't
513 // ready yet (i.e. time > curTick()_, we don't want to
514 // move it ahead of mshrs that are ready
515 // mshrQueue.moveToFront(mshr);
516 }
517 } else {
518 // no MSHR
519 assert(pkt->req->masterId() < system->maxMasters());
520 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
521 // always mark as cache fill for now... if we implement
522 // no-write-allocate or bypass accesses this will have to
523 // be changed.
524 if (pkt->cmd == MemCmd::Writeback) {
525 allocateWriteBuffer(pkt, time, true);
526 } else {
527 if (blk && blk->isValid()) {
528 // If we have a write miss to a valid block, we
529 // need to mark the block non-readable. Otherwise
530 // if we allow reads while there's an outstanding
531 // write miss, the read could return stale data
532 // out of the cache block... a more aggressive
533 // system could detect the overlap (if any) and
534 // forward data out of the MSHRs, but we don't do
535 // that yet. Note that we do need to leave the
536 // block valid so that it stays in the cache, in
537 // case we get an upgrade response (and hence no
538 // new data) when the write miss completes.
539 // As long as CPUs do proper store/load forwarding
540 // internally, and have a sufficiently weak memory
541 // model, this is probably unnecessary, but at some
542 // point it must have seemed like we needed it...
543 assert(pkt->needsExclusive() && !blk->isWritable());
544 blk->status &= ~BlkReadable;
545 }
546
547 allocateMissBuffer(pkt, time, true);
548 }
549
550 if (prefetcher) {
551 next_pf_time = prefetcher->notify(pkt, time);
552 }
553 }
554 }
555
556 if (next_pf_time != 0)
557 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
558
559 // copy writebacks to write buffer
560 while (!writebacks.empty()) {
561 PacketPtr wbPkt = writebacks.front();
562 allocateWriteBuffer(wbPkt, time, true);
563 writebacks.pop_front();
564 }
565
566 return true;
567 }
568
569
570 // See comment in cache.hh.
571 template<class TagStore>
572 PacketPtr
573 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
574 bool needsExclusive)
575 {
576 bool blkValid = blk && blk->isValid();
577
578 if (cpu_pkt->req->isUncacheable()) {
579 //assert(blk == NULL);
580 return NULL;
581 }
582
583 if (!blkValid &&
584 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
585 // Writebacks that weren't allocated in access() and upgrades
586 // from upper-level caches that missed completely just go
587 // through.
588 return NULL;
589 }
590
591 assert(cpu_pkt->needsResponse());
592
593 MemCmd cmd;
594 // @TODO make useUpgrades a parameter.
595 // Note that ownership protocols require upgrade, otherwise a
596 // write miss on a shared owned block will generate a ReadExcl,
597 // which will clobber the owned copy.
598 const bool useUpgrades = true;
599 if (blkValid && useUpgrades) {
600 // only reason to be here is that blk is shared
601 // (read-only) and we need exclusive
602 assert(needsExclusive && !blk->isWritable());
603 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
604 } else {
605 // block is invalid
606 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
607 }
608 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
609
610 pkt->allocate();
611 return pkt;
612 }
613
614
615 template<class TagStore>
616 Tick
617 Cache<TagStore>::atomicAccess(PacketPtr pkt)
618 {
619 int lat = hitLatency;
620
621 // @TODO: make this a parameter
622 bool last_level_cache = false;
623
624 if (pkt->memInhibitAsserted()) {
625 assert(!pkt->req->isUncacheable());
626 // have to invalidate ourselves and any lower caches even if
627 // upper cache will be responding
628 if (pkt->isInvalidate()) {
629 BlkType *blk = tags->findBlock(pkt->getAddr());
630 if (blk && blk->isValid()) {
631 tags->invalidateBlk(blk);
632 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
633 pkt->cmdString(), pkt->getAddr());
634 }
635 if (!last_level_cache) {
636 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
637 pkt->cmdString(), pkt->getAddr());
638 lat += memSidePort->sendAtomic(pkt);
639 }
640 } else {
641 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
642 pkt->cmdString(), pkt->getAddr());
643 }
644
645 return lat;
646 }
647
648 // should assert here that there are no outstanding MSHRs or
649 // writebacks... that would mean that someone used an atomic
650 // access in timing mode
651
652 BlkType *blk = NULL;
653 PacketList writebacks;
654
655 if (!access(pkt, blk, lat, writebacks)) {
656 // MISS
657 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
658
659 bool is_forward = (bus_pkt == NULL);
660
661 if (is_forward) {
662 // just forwarding the same request to the next level
663 // no local cache operation involved
664 bus_pkt = pkt;
665 }
666
667 DPRINTF(Cache, "Sending an atomic %s for %x\n",
668 bus_pkt->cmdString(), bus_pkt->getAddr());
669
670 #if TRACING_ON
671 CacheBlk::State old_state = blk ? blk->status : 0;
672 #endif
673
674 lat += memSidePort->sendAtomic(bus_pkt);
675
676 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
677 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
678
679 assert(!bus_pkt->wasNacked());
680
681 // If packet was a forward, the response (if any) is already
682 // in place in the bus_pkt == pkt structure, so we don't need
683 // to do anything. Otherwise, use the separate bus_pkt to
684 // generate response to pkt and then delete it.
685 if (!is_forward) {
686 if (pkt->needsResponse()) {
687 assert(bus_pkt->isResponse());
688 if (bus_pkt->isError()) {
689 pkt->makeAtomicResponse();
690 pkt->copyError(bus_pkt);
691 } else if (bus_pkt->isRead() ||
692 bus_pkt->cmd == MemCmd::UpgradeResp) {
693 // we're updating cache state to allow us to
694 // satisfy the upstream request from the cache
695 blk = handleFill(bus_pkt, blk, writebacks);
696 satisfyCpuSideRequest(pkt, blk);
697 } else {
698 // we're satisfying the upstream request without
699 // modifying cache state, e.g., a write-through
700 pkt->makeAtomicResponse();
701 }
702 }
703 delete bus_pkt;
704 }
705 }
706
707 // Note that we don't invoke the prefetcher at all in atomic mode.
708 // It's not clear how to do it properly, particularly for
709 // prefetchers that aggressively generate prefetch candidates and
710 // rely on bandwidth contention to throttle them; these will tend
711 // to pollute the cache in atomic mode since there is no bandwidth
712 // contention. If we ever do want to enable prefetching in atomic
713 // mode, though, this is the place to do it... see timingAccess()
714 // for an example (though we'd want to issue the prefetch(es)
715 // immediately rather than calling requestMemSideBus() as we do
716 // there).
717
718 // Handle writebacks if needed
719 while (!writebacks.empty()){
720 PacketPtr wbPkt = writebacks.front();
721 memSidePort->sendAtomic(wbPkt);
722 writebacks.pop_front();
723 delete wbPkt;
724 }
725
726 // We now have the block one way or another (hit or completed miss)
727
728 if (pkt->needsResponse()) {
729 pkt->makeAtomicResponse();
730 }
731
732 return lat;
733 }
734
735
736 template<class TagStore>
737 void
738 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
739 {
740 Addr blk_addr = blockAlign(pkt->getAddr());
741 BlkType *blk = tags->findBlock(pkt->getAddr());
742 MSHR *mshr = mshrQueue.findMatch(blk_addr);
743
744 pkt->pushLabel(name());
745
746 CacheBlkPrintWrapper cbpw(blk);
747
748 // Note that just because an L2/L3 has valid data doesn't mean an
749 // L1 doesn't have a more up-to-date modified copy that still
750 // needs to be found. As a result we always update the request if
751 // we have it, but only declare it satisfied if we are the owner.
752
753 // see if we have data at all (owned or otherwise)
754 bool have_data = blk && blk->isValid()
755 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
756
757 // data we have is dirty if marked as such or if valid & ownership
758 // pending due to outstanding UpgradeReq
759 bool have_dirty =
760 have_data && (blk->isDirty() ||
761 (mshr && mshr->inService && mshr->isPendingDirty()));
762
763 bool done = have_dirty
764 || cpuSidePort->checkFunctional(pkt)
765 || mshrQueue.checkFunctional(pkt, blk_addr)
766 || writeBuffer.checkFunctional(pkt, blk_addr)
767 || memSidePort->checkFunctional(pkt);
768
769 DPRINTF(Cache, "functional %s %x %s%s%s\n",
770 pkt->cmdString(), pkt->getAddr(),
771 (blk && blk->isValid()) ? "valid " : "",
772 have_data ? "data " : "", done ? "done " : "");
773
774 // We're leaving the cache, so pop cache->name() label
775 pkt->popLabel();
776
777 if (done) {
778 pkt->makeResponse();
779 } else {
780 // if it came as a request from the CPU side then make sure it
781 // continues towards the memory side
782 if (fromCpuSide) {
783 memSidePort->sendFunctional(pkt);
784 } else if (forwardSnoops && cpuSidePort->getMasterPort().isSnooping()) {
785 // if it came from the memory side, it must be a snoop request
786 // and we should only forward it if we are forwarding snoops
787 cpuSidePort->sendFunctionalSnoop(pkt);
788 }
789 }
790 }
791
792
793 /////////////////////////////////////////////////////
794 //
795 // Response handling: responses from the memory side
796 //
797 /////////////////////////////////////////////////////
798
799
800 template<class TagStore>
801 void
802 Cache<TagStore>::handleResponse(PacketPtr pkt)
803 {
804 Tick time = curTick() + hitLatency;
805 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
806 bool is_error = pkt->isError();
807
808 assert(mshr);
809
810 if (pkt->wasNacked()) {
811 //pkt->reinitFromRequest();
812 warn("NACKs from devices not connected to the same bus "
813 "not implemented\n");
814 return;
815 }
816 if (is_error) {
817 DPRINTF(Cache, "Cache received packet with error for address %x, "
818 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
819 }
820
821 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
822
823 MSHRQueue *mq = mshr->queue;
824 bool wasFull = mq->isFull();
825
826 if (mshr == noTargetMSHR) {
827 // we always clear at least one target
828 clearBlocked(Blocked_NoTargets);
829 noTargetMSHR = NULL;
830 }
831
832 // Initial target is used just for stats
833 MSHR::Target *initial_tgt = mshr->getTarget();
834 BlkType *blk = tags->findBlock(pkt->getAddr());
835 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
836 Tick miss_latency = curTick() - initial_tgt->recvTime;
837 PacketList writebacks;
838
839 if (pkt->req->isUncacheable()) {
840 assert(pkt->req->masterId() < system->maxMasters());
841 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
842 miss_latency;
843 } else {
844 assert(pkt->req->masterId() < system->maxMasters());
845 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
846 miss_latency;
847 }
848
849 bool is_fill = !mshr->isForward &&
850 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
851
852 if (is_fill && !is_error) {
853 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
854 pkt->getAddr());
855
856 // give mshr a chance to do some dirty work
857 mshr->handleFill(pkt, blk);
858
859 blk = handleFill(pkt, blk, writebacks);
860 assert(blk != NULL);
861 }
862
863 // First offset for critical word first calculations
864 int initial_offset = 0;
865
866 if (mshr->hasTargets()) {
867 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
868 }
869
870 while (mshr->hasTargets()) {
871 MSHR::Target *target = mshr->getTarget();
872
873 switch (target->source) {
874 case MSHR::Target::FromCPU:
875 Tick completion_time;
876 if (is_fill) {
877 satisfyCpuSideRequest(target->pkt, blk,
878 true, mshr->hasPostDowngrade());
879 // How many bytes past the first request is this one
880 int transfer_offset =
881 target->pkt->getOffset(blkSize) - initial_offset;
882 if (transfer_offset < 0) {
883 transfer_offset += blkSize;
884 }
885
886 // If critical word (no offset) return first word time
887 completion_time = tags->getHitLatency() +
888 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
889
890 assert(!target->pkt->req->isUncacheable());
891
892 assert(pkt->req->masterId() < system->maxMasters());
893 missLatency[target->pkt->cmdToIndex()][target->pkt->req->masterId()] +=
894 completion_time - target->recvTime;
895 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
896 // failed StoreCond upgrade
897 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
898 target->pkt->cmd == MemCmd::StoreCondFailReq ||
899 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
900 completion_time = tags->getHitLatency() + pkt->finishTime;
901 target->pkt->req->setExtraData(0);
902 } else {
903 // not a cache fill, just forwarding response
904 completion_time = tags->getHitLatency() + pkt->finishTime;
905 if (pkt->isRead() && !is_error) {
906 target->pkt->setData(pkt->getPtr<uint8_t>());
907 }
908 }
909 target->pkt->makeTimingResponse();
910 // if this packet is an error copy that to the new packet
911 if (is_error)
912 target->pkt->copyError(pkt);
913 if (target->pkt->cmd == MemCmd::ReadResp &&
914 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
915 // If intermediate cache got ReadRespWithInvalidate,
916 // propagate that. Response should not have
917 // isInvalidate() set otherwise.
918 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
919 }
920 cpuSidePort->respond(target->pkt, completion_time);
921 break;
922
923 case MSHR::Target::FromPrefetcher:
924 assert(target->pkt->cmd == MemCmd::HardPFReq);
925 if (blk)
926 blk->status |= BlkHWPrefetched;
927 delete target->pkt->req;
928 delete target->pkt;
929 break;
930
931 case MSHR::Target::FromSnoop:
932 // I don't believe that a snoop can be in an error state
933 assert(!is_error);
934 // response to snoop request
935 DPRINTF(Cache, "processing deferred snoop...\n");
936 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
937 handleSnoop(target->pkt, blk, true, true,
938 mshr->hasPostInvalidate());
939 break;
940
941 default:
942 panic("Illegal target->source enum %d\n", target->source);
943 }
944
945 mshr->popTarget();
946 }
947
948 if (blk) {
949 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
950 tags->invalidateBlk(blk);
951 } else if (mshr->hasPostDowngrade()) {
952 blk->status &= ~BlkWritable;
953 }
954 }
955
956 if (mshr->promoteDeferredTargets()) {
957 // avoid later read getting stale data while write miss is
958 // outstanding.. see comment in timingAccess()
959 if (blk) {
960 blk->status &= ~BlkReadable;
961 }
962 MSHRQueue *mq = mshr->queue;
963 mq->markPending(mshr);
964 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
965 } else {
966 mq->deallocate(mshr);
967 if (wasFull && !mq->isFull()) {
968 clearBlocked((BlockedCause)mq->index);
969 }
970 }
971
972 // copy writebacks to write buffer
973 while (!writebacks.empty()) {
974 PacketPtr wbPkt = writebacks.front();
975 allocateWriteBuffer(wbPkt, time, true);
976 writebacks.pop_front();
977 }
978 // if we used temp block, clear it out
979 if (blk == tempBlock) {
980 if (blk->isDirty()) {
981 allocateWriteBuffer(writebackBlk(blk), time, true);
982 }
983 blk->status &= ~BlkValid;
984 tags->invalidateBlk(blk);
985 }
986
987 delete pkt;
988 }
989
990
991
992
993 template<class TagStore>
994 PacketPtr
995 Cache<TagStore>::writebackBlk(BlkType *blk)
996 {
997 assert(blk && blk->isValid() && blk->isDirty());
998
999 writebacks[Request::wbMasterId]++;
1000
1001 Request *writebackReq =
1002 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1003 Request::wbMasterId);
1004 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback);
1005 if (blk->isWritable()) {
1006 writeback->setSupplyExclusive();
1007 }
1008 writeback->allocate();
1009 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1010
1011 blk->status &= ~BlkDirty;
1012 return writeback;
1013 }
1014
1015
1016 template<class TagStore>
1017 typename Cache<TagStore>::BlkType*
1018 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1019 {
1020 BlkType *blk = tags->findVictim(addr, writebacks);
1021
1022 if (blk->isValid()) {
1023 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1024 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1025 if (repl_mshr) {
1026 // must be an outstanding upgrade request on block
1027 // we're about to replace...
1028 assert(!blk->isWritable());
1029 assert(repl_mshr->needsExclusive());
1030 // too hard to replace block with transient state
1031 // allocation failed, block not inserted
1032 return NULL;
1033 } else {
1034 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1035 repl_addr, addr,
1036 blk->isDirty() ? "writeback" : "clean");
1037
1038 if (blk->isDirty()) {
1039 // Save writeback packet for handling by caller
1040 writebacks.push_back(writebackBlk(blk));
1041 }
1042 }
1043 }
1044
1045 return blk;
1046 }
1047
1048
1049 // Note that the reason we return a list of writebacks rather than
1050 // inserting them directly in the write buffer is that this function
1051 // is called by both atomic and timing-mode accesses, and in atomic
1052 // mode we don't mess with the write buffer (we just perform the
1053 // writebacks atomically once the original request is complete).
1054 template<class TagStore>
1055 typename Cache<TagStore>::BlkType*
1056 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1057 PacketList &writebacks)
1058 {
1059 Addr addr = pkt->getAddr();
1060 #if TRACING_ON
1061 CacheBlk::State old_state = blk ? blk->status : 0;
1062 #endif
1063
1064 if (blk == NULL) {
1065 // better have read new data...
1066 assert(pkt->hasData());
1067 // need to do a replacement
1068 blk = allocateBlock(addr, writebacks);
1069 if (blk == NULL) {
1070 // No replaceable block... just use temporary storage to
1071 // complete the current request and then get rid of it
1072 assert(!tempBlock->isValid());
1073 blk = tempBlock;
1074 tempBlock->set = tags->extractSet(addr);
1075 tempBlock->tag = tags->extractTag(addr);
1076 DPRINTF(Cache, "using temp block for %x\n", addr);
1077 } else {
1078 int id = pkt->req->masterId();
1079 tags->insertBlock(pkt->getAddr(), blk, id);
1080 }
1081
1082 // starting from scratch with a new block
1083 blk->status = 0;
1084 } else {
1085 // existing block... probably an upgrade
1086 assert(blk->tag == tags->extractTag(addr));
1087 // either we're getting new data or the block should already be valid
1088 assert(pkt->hasData() || blk->isValid());
1089 // don't clear block status... if block is already dirty we
1090 // don't want to lose that
1091 }
1092
1093 blk->status |= BlkValid | BlkReadable;
1094
1095 if (!pkt->sharedAsserted()) {
1096 blk->status |= BlkWritable;
1097 // If we got this via cache-to-cache transfer (i.e., from a
1098 // cache that was an owner) and took away that owner's copy,
1099 // then we need to write it back. Normally this happens
1100 // anyway as a side effect of getting a copy to write it, but
1101 // there are cases (such as failed store conditionals or
1102 // compare-and-swaps) where we'll demand an exclusive copy but
1103 // end up not writing it.
1104 if (pkt->memInhibitAsserted())
1105 blk->status |= BlkDirty;
1106 }
1107
1108 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1109 addr, old_state, blk->status);
1110
1111 // if we got new data, copy it in
1112 if (pkt->isRead()) {
1113 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1114 }
1115
1116 blk->whenReady = pkt->finishTime;
1117
1118 return blk;
1119 }
1120
1121
1122 /////////////////////////////////////////////////////
1123 //
1124 // Snoop path: requests coming in from the memory side
1125 //
1126 /////////////////////////////////////////////////////
1127
1128 template<class TagStore>
1129 void
1130 Cache<TagStore>::
1131 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1132 bool already_copied, bool pending_inval)
1133 {
1134 // timing-mode snoop responses require a new packet, unless we
1135 // already made a copy...
1136 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1137 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1138 pkt->allocate();
1139 pkt->makeTimingResponse();
1140 if (pkt->isRead()) {
1141 pkt->setDataFromBlock(blk_data, blkSize);
1142 }
1143 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1144 // Assume we defer a response to a read from a far-away cache
1145 // A, then later defer a ReadExcl from a cache B on the same
1146 // bus as us. We'll assert MemInhibit in both cases, but in
1147 // the latter case MemInhibit will keep the invalidation from
1148 // reaching cache A. This special response tells cache A that
1149 // it gets the block to satisfy its read, but must immediately
1150 // invalidate it.
1151 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1152 }
1153 memSidePort->respond(pkt, curTick() + hitLatency);
1154 }
1155
1156 template<class TagStore>
1157 void
1158 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1159 bool is_timing, bool is_deferred,
1160 bool pending_inval)
1161 {
1162 // deferred snoops can only happen in timing mode
1163 assert(!(is_deferred && !is_timing));
1164 // pending_inval only makes sense on deferred snoops
1165 assert(!(pending_inval && !is_deferred));
1166 assert(pkt->isRequest());
1167
1168 // the packet may get modified if we or a forwarded snooper
1169 // responds in atomic mode, so remember a few things about the
1170 // original packet up front
1171 bool invalidate = pkt->isInvalidate();
1172 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1173
1174 if (forwardSnoops) {
1175 // first propagate snoop upward to see if anyone above us wants to
1176 // handle it. save & restore packet src since it will get
1177 // rewritten to be relative to cpu-side bus (if any)
1178 bool alreadyResponded = pkt->memInhibitAsserted();
1179 if (is_timing) {
1180 Packet snoopPkt(pkt, true); // clear flags
1181 snoopPkt.setExpressSnoop();
1182 snoopPkt.senderState = new ForwardResponseRecord(pkt, this);
1183 cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1184 if (snoopPkt.memInhibitAsserted()) {
1185 // cache-to-cache response from some upper cache
1186 assert(!alreadyResponded);
1187 pkt->assertMemInhibit();
1188 } else {
1189 delete snoopPkt.senderState;
1190 }
1191 if (snoopPkt.sharedAsserted()) {
1192 pkt->assertShared();
1193 }
1194 } else {
1195 cpuSidePort->sendAtomicSnoop(pkt);
1196 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1197 // cache-to-cache response from some upper cache:
1198 // forward response to original requester
1199 assert(pkt->isResponse());
1200 }
1201 }
1202 }
1203
1204 if (!blk || !blk->isValid()) {
1205 return;
1206 }
1207
1208 // we may end up modifying both the block state and the packet (if
1209 // we respond in atomic mode), so just figure out what to do now
1210 // and then do it later
1211 bool respond = blk->isDirty() && pkt->needsResponse();
1212 bool have_exclusive = blk->isWritable();
1213
1214 if (pkt->isRead() && !invalidate) {
1215 assert(!needs_exclusive);
1216 pkt->assertShared();
1217 int bits_to_clear = BlkWritable;
1218 const bool haveOwnershipState = true; // for now
1219 if (!haveOwnershipState) {
1220 // if we don't support pure ownership (dirty && !writable),
1221 // have to clear dirty bit here, assume memory snarfs data
1222 // on cache-to-cache xfer
1223 bits_to_clear |= BlkDirty;
1224 }
1225 blk->status &= ~bits_to_clear;
1226 }
1227
1228 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1229 pkt->cmdString(), blockAlign(pkt->getAddr()),
1230 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1231
1232 if (respond) {
1233 assert(!pkt->memInhibitAsserted());
1234 pkt->assertMemInhibit();
1235 if (have_exclusive) {
1236 pkt->setSupplyExclusive();
1237 }
1238 if (is_timing) {
1239 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1240 } else {
1241 pkt->makeAtomicResponse();
1242 pkt->setDataFromBlock(blk->data, blkSize);
1243 }
1244 } else if (is_timing && is_deferred) {
1245 // if it's a deferred timing snoop then we've made a copy of
1246 // the packet, and so if we're not using that copy to respond
1247 // then we need to delete it here.
1248 delete pkt;
1249 }
1250
1251 // Do this last in case it deallocates block data or something
1252 // like that
1253 if (invalidate) {
1254 tags->invalidateBlk(blk);
1255 }
1256 }
1257
1258
1259 template<class TagStore>
1260 void
1261 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1262 {
1263 // Note that some deferred snoops don't have requests, since the
1264 // original access may have already completed
1265 if ((pkt->req && pkt->req->isUncacheable()) ||
1266 pkt->cmd == MemCmd::Writeback) {
1267 //Can't get a hit on an uncacheable address
1268 //Revisit this for multi level coherence
1269 return;
1270 }
1271
1272 BlkType *blk = tags->findBlock(pkt->getAddr());
1273
1274 Addr blk_addr = blockAlign(pkt->getAddr());
1275 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1276
1277 // Let the MSHR itself track the snoop and decide whether we want
1278 // to go ahead and do the regular cache snoop
1279 if (mshr && mshr->handleSnoop(pkt, order++)) {
1280 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1281 blk_addr);
1282 if (mshr->getNumTargets() > numTarget)
1283 warn("allocating bonus target for snoop"); //handle later
1284 return;
1285 }
1286
1287 //We also need to check the writeback buffers and handle those
1288 std::vector<MSHR *> writebacks;
1289 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1290 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1291 pkt->getAddr());
1292
1293 //Look through writebacks for any non-uncachable writes, use that
1294 if (writebacks.size()) {
1295 // We should only ever find a single match
1296 assert(writebacks.size() == 1);
1297 mshr = writebacks[0];
1298 assert(!mshr->isUncacheable());
1299 assert(mshr->getNumTargets() == 1);
1300 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1301 assert(wb_pkt->cmd == MemCmd::Writeback);
1302
1303 assert(!pkt->memInhibitAsserted());
1304 pkt->assertMemInhibit();
1305 if (!pkt->needsExclusive()) {
1306 pkt->assertShared();
1307 // the writeback is no longer the exclusive copy in the system
1308 wb_pkt->clearSupplyExclusive();
1309 } else {
1310 // if we're not asserting the shared line, we need to
1311 // invalidate our copy. we'll do that below as long as
1312 // the packet's invalidate flag is set...
1313 assert(pkt->isInvalidate());
1314 }
1315 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1316 false, false);
1317
1318 if (pkt->isInvalidate()) {
1319 // Invalidation trumps our writeback... discard here
1320 markInService(mshr);
1321 delete wb_pkt;
1322 }
1323 } // writebacks.size()
1324 }
1325
1326 // If this was a shared writeback, there may still be
1327 // other shared copies above that require invalidation.
1328 // We could be more selective and return here if the
1329 // request is non-exclusive or if the writeback is
1330 // exclusive.
1331 handleSnoop(pkt, blk, true, false, false);
1332 }
1333
1334 template<class TagStore>
1335 bool
1336 Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
1337 {
1338 // Express snoop responses from master to slave, e.g., from L1 to L2
1339 cache->timingAccess(pkt);
1340 return true;
1341 }
1342
1343 template<class TagStore>
1344 Tick
1345 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1346 {
1347 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1348 // Can't get a hit on an uncacheable address
1349 // Revisit this for multi level coherence
1350 return hitLatency;
1351 }
1352
1353 BlkType *blk = tags->findBlock(pkt->getAddr());
1354 handleSnoop(pkt, blk, false, false, false);
1355 return hitLatency;
1356 }
1357
1358
1359 template<class TagStore>
1360 MSHR *
1361 Cache<TagStore>::getNextMSHR()
1362 {
1363 // Check both MSHR queue and write buffer for potential requests
1364 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1365 MSHR *write_mshr = writeBuffer.getNextMSHR();
1366
1367 // Now figure out which one to send... some cases are easy
1368 if (miss_mshr && !write_mshr) {
1369 return miss_mshr;
1370 }
1371 if (write_mshr && !miss_mshr) {
1372 return write_mshr;
1373 }
1374
1375 if (miss_mshr && write_mshr) {
1376 // We have one of each... normally we favor the miss request
1377 // unless the write buffer is full
1378 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1379 // Write buffer is full, so we'd like to issue a write;
1380 // need to search MSHR queue for conflicting earlier miss.
1381 MSHR *conflict_mshr =
1382 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1383
1384 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1385 // Service misses in order until conflict is cleared.
1386 return conflict_mshr;
1387 }
1388
1389 // No conflicts; issue write
1390 return write_mshr;
1391 }
1392
1393 // Write buffer isn't full, but need to check it for
1394 // conflicting earlier writeback
1395 MSHR *conflict_mshr =
1396 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1397 if (conflict_mshr) {
1398 // not sure why we don't check order here... it was in the
1399 // original code but commented out.
1400
1401 // The only way this happens is if we are
1402 // doing a write and we didn't have permissions
1403 // then subsequently saw a writeback (owned got evicted)
1404 // We need to make sure to perform the writeback first
1405 // To preserve the dirty data, then we can issue the write
1406
1407 // should we return write_mshr here instead? I.e. do we
1408 // have to flush writes in order? I don't think so... not
1409 // for Alpha anyway. Maybe for x86?
1410 return conflict_mshr;
1411 }
1412
1413 // No conflicts; issue read
1414 return miss_mshr;
1415 }
1416
1417 // fall through... no pending requests. Try a prefetch.
1418 assert(!miss_mshr && !write_mshr);
1419 if (prefetcher && !mshrQueue.isFull()) {
1420 // If we have a miss queue slot, we can try a prefetch
1421 PacketPtr pkt = prefetcher->getPacket();
1422 if (pkt) {
1423 Addr pf_addr = blockAlign(pkt->getAddr());
1424 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
1425 !writeBuffer.findMatch(pf_addr)) {
1426 // Update statistic on number of prefetches issued
1427 // (hwpf_mshr_misses)
1428 assert(pkt->req->masterId() < system->maxMasters());
1429 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1430 // Don't request bus, since we already have it
1431 return allocateMissBuffer(pkt, curTick(), false);
1432 } else {
1433 // free the request and packet
1434 delete pkt->req;
1435 delete pkt;
1436 }
1437 }
1438 }
1439
1440 return NULL;
1441 }
1442
1443
1444 template<class TagStore>
1445 PacketPtr
1446 Cache<TagStore>::getTimingPacket()
1447 {
1448 MSHR *mshr = getNextMSHR();
1449
1450 if (mshr == NULL) {
1451 return NULL;
1452 }
1453
1454 // use request from 1st target
1455 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1456 PacketPtr pkt = NULL;
1457
1458 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1459 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1460 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1461 // in MSHR, so now that we are getting around to processing
1462 // it, just treat it as if we got a failure response
1463 pkt = new Packet(tgt_pkt);
1464 pkt->cmd = MemCmd::UpgradeFailResp;
1465 pkt->senderState = mshr;
1466 pkt->firstWordTime = pkt->finishTime = curTick();
1467 handleResponse(pkt);
1468 return NULL;
1469 } else if (mshr->isForwardNoResponse()) {
1470 // no response expected, just forward packet as it is
1471 assert(tags->findBlock(mshr->addr) == NULL);
1472 pkt = tgt_pkt;
1473 } else {
1474 BlkType *blk = tags->findBlock(mshr->addr);
1475
1476 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1477 // It might be possible for a writeback to arrive between
1478 // the time the prefetch is placed in the MSHRs and when
1479 // it's selected to send... if so, this assert will catch
1480 // that, and then we'll have to figure out what to do.
1481 assert(blk == NULL);
1482
1483 // We need to check the caches above us to verify that they don't have
1484 // a copy of this block in the dirty state at the moment. Without this
1485 // check we could get a stale copy from memory that might get used
1486 // in place of the dirty one.
1487 PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
1488 snoop_pkt->setExpressSnoop();
1489 snoop_pkt->senderState = mshr;
1490 cpuSidePort->sendTimingSnoopReq(snoop_pkt);
1491
1492 if (snoop_pkt->memInhibitAsserted()) {
1493 markInService(mshr, snoop_pkt);
1494 DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
1495 tgt_pkt->getAddr());
1496 delete snoop_pkt;
1497 return NULL;
1498 }
1499 delete snoop_pkt;
1500 }
1501
1502 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1503
1504 mshr->isForward = (pkt == NULL);
1505
1506 if (mshr->isForward) {
1507 // not a cache block request, but a response is expected
1508 // make copy of current packet to forward, keep current
1509 // copy for response handling
1510 pkt = new Packet(tgt_pkt);
1511 pkt->allocate();
1512 if (pkt->isWrite()) {
1513 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1514 }
1515 }
1516 }
1517
1518 assert(pkt != NULL);
1519 pkt->senderState = mshr;
1520 return pkt;
1521 }
1522
1523
1524 template<class TagStore>
1525 Tick
1526 Cache<TagStore>::nextMSHRReadyTime()
1527 {
1528 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1529 writeBuffer.nextMSHRReadyTime());
1530
1531 if (prefetcher) {
1532 nextReady = std::min(nextReady,
1533 prefetcher->nextPrefetchReadyTime());
1534 }
1535
1536 return nextReady;
1537 }
1538
1539 template<class TagStore>
1540 void
1541 Cache<TagStore>::serialize(std::ostream &os)
1542 {
1543 warn("*** Creating checkpoints with caches is not supported. ***\n");
1544 warn(" Remove any caches before taking checkpoints\n");
1545 warn(" This checkpoint will not restore correctly and dirty data in "
1546 "the cache will be lost!\n");
1547
1548 // Since we don't write back the data dirty in the caches to the physical
1549 // memory if caches exist in the system we won't be able to restore
1550 // from the checkpoint as any data dirty in the caches will be lost.
1551
1552 bool bad_checkpoint = true;
1553 SERIALIZE_SCALAR(bad_checkpoint);
1554 }
1555
1556 template<class TagStore>
1557 void
1558 Cache<TagStore>::unserialize(Checkpoint *cp, const std::string &section)
1559 {
1560 bool bad_checkpoint;
1561 UNSERIALIZE_SCALAR(bad_checkpoint);
1562 if (bad_checkpoint) {
1563 fatal("Restoring from checkpoints with caches is not supported in the "
1564 "classic memory system. Please remove any caches before taking "
1565 "checkpoints.\n");
1566 }
1567 }
1568
1569 ///////////////
1570 //
1571 // CpuSidePort
1572 //
1573 ///////////////
1574
1575 template<class TagStore>
1576 AddrRangeList
1577 Cache<TagStore>::CpuSidePort::getAddrRanges()
1578 {
1579 return cache->getAddrRanges();
1580 }
1581
1582 template<class TagStore>
1583 bool
1584 Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
1585 {
1586 // always let inhibited requests through even if blocked
1587 if (!pkt->memInhibitAsserted() && blocked) {
1588 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1589 mustSendRetry = true;
1590 return false;
1591 }
1592
1593 cache->timingAccess(pkt);
1594 return true;
1595 }
1596
1597 template<class TagStore>
1598 Tick
1599 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1600 {
1601 // atomic request
1602 return cache->atomicAccess(pkt);
1603 }
1604
1605 template<class TagStore>
1606 void
1607 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1608 {
1609 // functional request
1610 cache->functionalAccess(pkt, true);
1611 }
1612
1613 template<class TagStore>
1614 Cache<TagStore>::
1615 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1616 const std::string &_label)
1617 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
1618 {
1619 }
1620
1621 ///////////////
1622 //
1623 // MemSidePort
1624 //
1625 ///////////////
1626
1627 template<class TagStore>
1628 bool
1629 Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
1630 {
1631 // this needs to be fixed so that the cache updates the mshr and sends the
1632 // packet back out on the link, but it probably won't happen so until this
1633 // gets fixed, just panic when it does
1634 if (pkt->wasNacked())
1635 panic("Need to implement cache resending nacked packets!\n");
1636
1637 cache->handleResponse(pkt);
1638 return true;
1639 }
1640
1641 // Express snooping requests to memside port
1642 template<class TagStore>
1643 void
1644 Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
1645 {
1646 // handle snooping requests
1647 cache->snoopTiming(pkt);
1648 }
1649
1650 template<class TagStore>
1651 Tick
1652 Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
1653 {
1654 // atomic snoop
1655 return cache->snoopAtomic(pkt);
1656 }
1657
1658 template<class TagStore>
1659 void
1660 Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
1661 {
1662 // functional snoop (note that in contrast to atomic we don't have
1663 // a specific functionalSnoop method, as they have the same
1664 // behaviour regardless)
1665 cache->functionalAccess(pkt, false);
1666 }
1667
1668 template<class TagStore>
1669 void
1670 Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
1671 {
1672 // if we have a response packet waiting we have to start with that
1673 if (deferredPacketReady()) {
1674 // use the normal approach from the timing port
1675 trySendTiming();
1676 } else {
1677 // check for request packets (requests & writebacks)
1678 PacketPtr pkt = cache.getTimingPacket();
1679 if (pkt == NULL) {
1680 // can happen if e.g. we attempt a writeback and fail, but
1681 // before the retry, the writeback is eliminated because
1682 // we snoop another cache's ReadEx.
1683 waitingOnRetry = false;
1684 } else {
1685 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1686
1687 waitingOnRetry = !masterPort.sendTimingReq(pkt);
1688
1689 if (waitingOnRetry) {
1690 DPRINTF(CachePort, "now waiting on a retry\n");
1691 if (!mshr->isForwardNoResponse()) {
1692 // we are awaiting a retry, but we
1693 // delete the packet and will be creating a new packet
1694 // when we get the opportunity
1695 delete pkt;
1696 }
1697 // note that we have now masked any requestBus and
1698 // schedSendEvent (we will wait for a retry before
1699 // doing anything), and this is so even if we do not
1700 // care about this packet and might override it before
1701 // it gets retried
1702 } else {
1703 cache.markInService(mshr, pkt);
1704 }
1705 }
1706 }
1707
1708 // if we succeeded and are not waiting for a retry, schedule the
1709 // next send, not only looking at the response transmit list, but
1710 // also considering when the next MSHR is ready
1711 if (!waitingOnRetry) {
1712 scheduleSend(cache.nextMSHRReadyTime());
1713 }
1714 }
1715
1716 template<class TagStore>
1717 Cache<TagStore>::
1718 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1719 const std::string &_label)
1720 : BaseCache::CacheMasterPort(_name, _cache, _queue),
1721 _queue(*_cache, *this, _label), cache(_cache)
1722 {
1723 }