MEM: Introduce the master/slave port sub-classes in C++
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 */
47
48 /**
49 * @file
50 * Cache definitions.
51 */
52
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "debug/CachePort.hh"
59 #include "mem/cache/prefetch/base.hh"
60 #include "mem/cache/blk.hh"
61 #include "mem/cache/cache.hh"
62 #include "mem/cache/mshr.hh"
63 #include "sim/sim_exit.hh"
64
65 template<class TagStore>
66 Cache<TagStore>::Cache(const Params *p, TagStore *tags)
67 : BaseCache(p),
68 tags(tags),
69 prefetcher(p->prefetcher),
70 doFastWrites(true),
71 prefetchOnAccess(p->prefetch_on_access)
72 {
73 tempBlock = new BlkType();
74 tempBlock->data = new uint8_t[blkSize];
75
76 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
77 "CpuSidePort");
78 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
79 "MemSidePort");
80
81 tags->setCache(this);
82 if (prefetcher)
83 prefetcher->setCache(this);
84 }
85
86 template<class TagStore>
87 void
88 Cache<TagStore>::regStats()
89 {
90 BaseCache::regStats();
91 tags->regStats(name());
92 }
93
94 template<class TagStore>
95 void
96 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
97 {
98 uint64_t overwrite_val;
99 bool overwrite_mem;
100 uint64_t condition_val64;
101 uint32_t condition_val32;
102
103 int offset = tags->extractBlkOffset(pkt->getAddr());
104 uint8_t *blk_data = blk->data + offset;
105
106 assert(sizeof(uint64_t) >= pkt->getSize());
107
108 overwrite_mem = true;
109 // keep a copy of our possible write value, and copy what is at the
110 // memory address into the packet
111 pkt->writeData((uint8_t *)&overwrite_val);
112 pkt->setData(blk_data);
113
114 if (pkt->req->isCondSwap()) {
115 if (pkt->getSize() == sizeof(uint64_t)) {
116 condition_val64 = pkt->req->getExtraData();
117 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
118 sizeof(uint64_t));
119 } else if (pkt->getSize() == sizeof(uint32_t)) {
120 condition_val32 = (uint32_t)pkt->req->getExtraData();
121 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
122 sizeof(uint32_t));
123 } else
124 panic("Invalid size for conditional read/write\n");
125 }
126
127 if (overwrite_mem) {
128 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
129 blk->status |= BlkDirty;
130 }
131 }
132
133
134 template<class TagStore>
135 void
136 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
137 bool deferred_response,
138 bool pending_downgrade)
139 {
140 assert(blk && blk->isValid());
141 // Occasionally this is not true... if we are a lower-level cache
142 // satisfying a string of Read and ReadEx requests from
143 // upper-level caches, a Read will mark the block as shared but we
144 // can satisfy a following ReadEx anyway since we can rely on the
145 // Read requester(s) to have buffered the ReadEx snoop and to
146 // invalidate their blocks after receiving them.
147 // assert(!pkt->needsExclusive() || blk->isWritable());
148 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
149
150 // Check RMW operations first since both isRead() and
151 // isWrite() will be true for them
152 if (pkt->cmd == MemCmd::SwapReq) {
153 cmpAndSwap(blk, pkt);
154 } else if (pkt->isWrite()) {
155 if (blk->checkWrite(pkt)) {
156 pkt->writeDataToBlock(blk->data, blkSize);
157 blk->status |= BlkDirty;
158 }
159 } else if (pkt->isRead()) {
160 if (pkt->isLLSC()) {
161 blk->trackLoadLocked(pkt);
162 }
163 pkt->setDataFromBlock(blk->data, blkSize);
164 if (pkt->getSize() == blkSize) {
165 // special handling for coherent block requests from
166 // upper-level caches
167 if (pkt->needsExclusive()) {
168 // if we have a dirty copy, make sure the recipient
169 // keeps it marked dirty
170 if (blk->isDirty()) {
171 pkt->assertMemInhibit();
172 }
173 // on ReadExReq we give up our copy unconditionally
174 tags->invalidateBlk(blk);
175 } else if (blk->isWritable() && !pending_downgrade
176 && !pkt->sharedAsserted()) {
177 // we can give the requester an exclusive copy (by not
178 // asserting shared line) on a read request if:
179 // - we have an exclusive copy at this level (& below)
180 // - we don't have a pending snoop from below
181 // signaling another read request
182 // - no other cache above has a copy (otherwise it
183 // would have asseretd shared line on request)
184
185 if (blk->isDirty()) {
186 // special considerations if we're owner:
187 if (!deferred_response && !isTopLevel) {
188 // if we are responding immediately and can
189 // signal that we're transferring ownership
190 // along with exclusivity, do so
191 pkt->assertMemInhibit();
192 blk->status &= ~BlkDirty;
193 } else {
194 // if we're responding after our own miss,
195 // there's a window where the recipient didn't
196 // know it was getting ownership and may not
197 // have responded to snoops correctly, so we
198 // can't pass off ownership *or* exclusivity
199 pkt->assertShared();
200 }
201 }
202 } else {
203 // otherwise only respond with a shared copy
204 pkt->assertShared();
205 }
206 }
207 } else {
208 // Not a read or write... must be an upgrade. it's OK
209 // to just ack those as long as we have an exclusive
210 // copy at this level.
211 assert(pkt->isUpgrade());
212 tags->invalidateBlk(blk);
213 }
214 }
215
216
217 /////////////////////////////////////////////////////
218 //
219 // MSHR helper functions
220 //
221 /////////////////////////////////////////////////////
222
223
224 template<class TagStore>
225 void
226 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
227 {
228 markInServiceInternal(mshr, pkt);
229 #if 0
230 if (mshr->originalCmd == MemCmd::HardPFReq) {
231 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
232 name());
233 //Also clear pending if need be
234 if (!prefetcher->havePending())
235 {
236 deassertMemSideBusRequest(Request_PF);
237 }
238 }
239 #endif
240 }
241
242
243 template<class TagStore>
244 void
245 Cache<TagStore>::squash(int threadNum)
246 {
247 bool unblock = false;
248 BlockedCause cause = NUM_BLOCKED_CAUSES;
249
250 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
251 noTargetMSHR = NULL;
252 unblock = true;
253 cause = Blocked_NoTargets;
254 }
255 if (mshrQueue.isFull()) {
256 unblock = true;
257 cause = Blocked_NoMSHRs;
258 }
259 mshrQueue.squash(threadNum);
260 if (unblock && !mshrQueue.isFull()) {
261 clearBlocked(cause);
262 }
263 }
264
265 /////////////////////////////////////////////////////
266 //
267 // Access path: requests coming in from the CPU side
268 //
269 /////////////////////////////////////////////////////
270
271 template<class TagStore>
272 bool
273 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
274 int &lat, PacketList &writebacks)
275 {
276 if (pkt->req->isUncacheable()) {
277 if (pkt->req->isClearLL()) {
278 tags->clearLocks();
279 } else {
280 blk = tags->findBlock(pkt->getAddr());
281 if (blk != NULL) {
282 tags->invalidateBlk(blk);
283 }
284 }
285
286 blk = NULL;
287 lat = hitLatency;
288 return false;
289 }
290
291 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
292 blk = tags->accessBlock(pkt->getAddr(), lat, id);
293
294 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
295 pkt->req->isInstFetch() ? " (ifetch)" : "",
296 pkt->getAddr(), (blk) ? "hit" : "miss");
297
298 if (blk != NULL) {
299
300 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
301 // OK to satisfy access
302 incHitCount(pkt);
303 satisfyCpuSideRequest(pkt, blk);
304 return true;
305 }
306 }
307
308 // Can't satisfy access normally... either no block (blk == NULL)
309 // or have block but need exclusive & only have shared.
310
311 // Writeback handling is special case. We can write the block
312 // into the cache without having a writeable copy (or any copy at
313 // all).
314 if (pkt->cmd == MemCmd::Writeback) {
315 assert(blkSize == pkt->getSize());
316 if (blk == NULL) {
317 // need to do a replacement
318 blk = allocateBlock(pkt->getAddr(), writebacks);
319 if (blk == NULL) {
320 // no replaceable block available, give up.
321 // writeback will be forwarded to next level.
322 incMissCount(pkt);
323 return false;
324 }
325 int id = pkt->req->masterId();
326 tags->insertBlock(pkt->getAddr(), blk, id);
327 blk->status = BlkValid | BlkReadable;
328 }
329 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
330 blk->status |= BlkDirty;
331 if (pkt->isSupplyExclusive()) {
332 blk->status |= BlkWritable;
333 }
334 // nothing else to do; writeback doesn't expect response
335 assert(!pkt->needsResponse());
336 incHitCount(pkt);
337 return true;
338 }
339
340 incMissCount(pkt);
341
342 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
343 // complete miss on store conditional... just give up now
344 pkt->req->setExtraData(0);
345 return true;
346 }
347
348 return false;
349 }
350
351
352 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
353 {
354 Packet::SenderState *prevSenderState;
355 int prevSrc;
356 #ifndef NDEBUG
357 BaseCache *cache;
358 #endif
359 public:
360 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
361 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
362 #ifndef NDEBUG
363 , cache(_cache)
364 #endif
365 {}
366 void restore(Packet *pkt, BaseCache *_cache)
367 {
368 assert(_cache == cache);
369 pkt->senderState = prevSenderState;
370 pkt->setDest(prevSrc);
371 }
372 };
373
374
375 template<class TagStore>
376 bool
377 Cache<TagStore>::timingAccess(PacketPtr pkt)
378 {
379 //@todo Add back in MemDebug Calls
380 // MemDebug::cacheAccess(pkt);
381
382 // we charge hitLatency for doing just about anything here
383 Tick time = curTick() + hitLatency;
384
385 if (pkt->isResponse()) {
386 // must be cache-to-cache response from upper to lower level
387 ForwardResponseRecord *rec =
388 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
389
390 if (rec == NULL) {
391 assert(pkt->cmd == MemCmd::HardPFResp);
392 // Check if it's a prefetch response and handle it. We shouldn't
393 // get any other kinds of responses without FRRs.
394 DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
395 pkt->getAddr());
396 handleResponse(pkt);
397 return true;
398 }
399
400 rec->restore(pkt, this);
401 delete rec;
402 memSidePort->respond(pkt, time);
403 return true;
404 }
405
406 assert(pkt->isRequest());
407
408 if (pkt->memInhibitAsserted()) {
409 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
410 pkt->getAddr());
411 assert(!pkt->req->isUncacheable());
412 // Special tweak for multilevel coherence: snoop downward here
413 // on invalidates since there may be other caches below here
414 // that have shared copies. Not necessary if we know that
415 // supplier had exclusive copy to begin with.
416 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
417 Packet *snoopPkt = new Packet(pkt, true); // clear flags
418 snoopPkt->setExpressSnoop();
419 snoopPkt->assertMemInhibit();
420 memSidePort->sendTiming(snoopPkt);
421 // main memory will delete snoopPkt
422 }
423 // since we're the official target but we aren't responding,
424 // delete the packet now.
425 delete pkt;
426 return true;
427 }
428
429 if (pkt->req->isUncacheable()) {
430 if (pkt->req->isClearLL()) {
431 tags->clearLocks();
432 } else {
433 BlkType *blk = tags->findBlock(pkt->getAddr());
434 if (blk != NULL) {
435 tags->invalidateBlk(blk);
436 }
437 }
438
439 // writes go in write buffer, reads use MSHR
440 if (pkt->isWrite() && !pkt->isRead()) {
441 allocateWriteBuffer(pkt, time, true);
442 } else {
443 allocateUncachedReadBuffer(pkt, time, true);
444 }
445 assert(pkt->needsResponse()); // else we should delete it here??
446 return true;
447 }
448
449 int lat = hitLatency;
450 BlkType *blk = NULL;
451 PacketList writebacks;
452
453 bool satisfied = access(pkt, blk, lat, writebacks);
454
455 #if 0
456 /** @todo make the fast write alloc (wh64) work with coherence. */
457
458 // If this is a block size write/hint (WH64) allocate the block here
459 // if the coherence protocol allows it.
460 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
461 (pkt->cmd == MemCmd::WriteReq
462 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
463 // not outstanding misses, can do this
464 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
465 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
466 if (outstanding_miss) {
467 warn("WriteInv doing a fastallocate"
468 "with an outstanding miss to the same address\n");
469 }
470 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
471 writebacks);
472 ++fastWrites;
473 }
474 }
475 #endif
476
477 // track time of availability of next prefetch, if any
478 Tick next_pf_time = 0;
479
480 bool needsResponse = pkt->needsResponse();
481
482 if (satisfied) {
483 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
484 if (blk)
485 blk->status &= ~BlkHWPrefetched;
486 next_pf_time = prefetcher->notify(pkt, time);
487 }
488
489 if (needsResponse) {
490 pkt->makeTimingResponse();
491 cpuSidePort->respond(pkt, curTick()+lat);
492 } else {
493 delete pkt;
494 }
495 } else {
496 // miss
497
498 Addr blk_addr = blockAlign(pkt->getAddr());
499 MSHR *mshr = mshrQueue.findMatch(blk_addr);
500
501 if (mshr) {
502 // MSHR hit
503 //@todo remove hw_pf here
504 assert(pkt->req->masterId() < system->maxMasters());
505 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
506 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
507 mshr->threadNum = -1;
508 }
509 mshr->allocateTarget(pkt, time, order++);
510 if (mshr->getNumTargets() == numTarget) {
511 noTargetMSHR = mshr;
512 setBlocked(Blocked_NoTargets);
513 // need to be careful with this... if this mshr isn't
514 // ready yet (i.e. time > curTick()_, we don't want to
515 // move it ahead of mshrs that are ready
516 // mshrQueue.moveToFront(mshr);
517 }
518 } else {
519 // no MSHR
520 assert(pkt->req->masterId() < system->maxMasters());
521 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
522 // always mark as cache fill for now... if we implement
523 // no-write-allocate or bypass accesses this will have to
524 // be changed.
525 if (pkt->cmd == MemCmd::Writeback) {
526 allocateWriteBuffer(pkt, time, true);
527 } else {
528 if (blk && blk->isValid()) {
529 // If we have a write miss to a valid block, we
530 // need to mark the block non-readable. Otherwise
531 // if we allow reads while there's an outstanding
532 // write miss, the read could return stale data
533 // out of the cache block... a more aggressive
534 // system could detect the overlap (if any) and
535 // forward data out of the MSHRs, but we don't do
536 // that yet. Note that we do need to leave the
537 // block valid so that it stays in the cache, in
538 // case we get an upgrade response (and hence no
539 // new data) when the write miss completes.
540 // As long as CPUs do proper store/load forwarding
541 // internally, and have a sufficiently weak memory
542 // model, this is probably unnecessary, but at some
543 // point it must have seemed like we needed it...
544 assert(pkt->needsExclusive() && !blk->isWritable());
545 blk->status &= ~BlkReadable;
546 }
547
548 allocateMissBuffer(pkt, time, true);
549 }
550
551 if (prefetcher) {
552 next_pf_time = prefetcher->notify(pkt, time);
553 }
554 }
555 }
556
557 if (next_pf_time != 0)
558 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
559
560 // copy writebacks to write buffer
561 while (!writebacks.empty()) {
562 PacketPtr wbPkt = writebacks.front();
563 allocateWriteBuffer(wbPkt, time, true);
564 writebacks.pop_front();
565 }
566
567 return true;
568 }
569
570
571 // See comment in cache.hh.
572 template<class TagStore>
573 PacketPtr
574 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
575 bool needsExclusive)
576 {
577 bool blkValid = blk && blk->isValid();
578
579 if (cpu_pkt->req->isUncacheable()) {
580 //assert(blk == NULL);
581 return NULL;
582 }
583
584 if (!blkValid &&
585 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
586 // Writebacks that weren't allocated in access() and upgrades
587 // from upper-level caches that missed completely just go
588 // through.
589 return NULL;
590 }
591
592 assert(cpu_pkt->needsResponse());
593
594 MemCmd cmd;
595 // @TODO make useUpgrades a parameter.
596 // Note that ownership protocols require upgrade, otherwise a
597 // write miss on a shared owned block will generate a ReadExcl,
598 // which will clobber the owned copy.
599 const bool useUpgrades = true;
600 if (blkValid && useUpgrades) {
601 // only reason to be here is that blk is shared
602 // (read-only) and we need exclusive
603 assert(needsExclusive && !blk->isWritable());
604 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
605 } else {
606 // block is invalid
607 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
608 }
609 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
610
611 pkt->allocate();
612 return pkt;
613 }
614
615
616 template<class TagStore>
617 Tick
618 Cache<TagStore>::atomicAccess(PacketPtr pkt)
619 {
620 int lat = hitLatency;
621
622 // @TODO: make this a parameter
623 bool last_level_cache = false;
624
625 if (pkt->memInhibitAsserted()) {
626 assert(!pkt->req->isUncacheable());
627 // have to invalidate ourselves and any lower caches even if
628 // upper cache will be responding
629 if (pkt->isInvalidate()) {
630 BlkType *blk = tags->findBlock(pkt->getAddr());
631 if (blk && blk->isValid()) {
632 tags->invalidateBlk(blk);
633 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
634 pkt->cmdString(), pkt->getAddr());
635 }
636 if (!last_level_cache) {
637 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
638 pkt->cmdString(), pkt->getAddr());
639 lat += memSidePort->sendAtomic(pkt);
640 }
641 } else {
642 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
643 pkt->cmdString(), pkt->getAddr());
644 }
645
646 return lat;
647 }
648
649 // should assert here that there are no outstanding MSHRs or
650 // writebacks... that would mean that someone used an atomic
651 // access in timing mode
652
653 BlkType *blk = NULL;
654 PacketList writebacks;
655
656 if (!access(pkt, blk, lat, writebacks)) {
657 // MISS
658 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
659
660 bool is_forward = (bus_pkt == NULL);
661
662 if (is_forward) {
663 // just forwarding the same request to the next level
664 // no local cache operation involved
665 bus_pkt = pkt;
666 }
667
668 DPRINTF(Cache, "Sending an atomic %s for %x\n",
669 bus_pkt->cmdString(), bus_pkt->getAddr());
670
671 #if TRACING_ON
672 CacheBlk::State old_state = blk ? blk->status : 0;
673 #endif
674
675 lat += memSidePort->sendAtomic(bus_pkt);
676
677 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
678 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
679
680 assert(!bus_pkt->wasNacked());
681
682 // If packet was a forward, the response (if any) is already
683 // in place in the bus_pkt == pkt structure, so we don't need
684 // to do anything. Otherwise, use the separate bus_pkt to
685 // generate response to pkt and then delete it.
686 if (!is_forward) {
687 if (pkt->needsResponse()) {
688 assert(bus_pkt->isResponse());
689 if (bus_pkt->isError()) {
690 pkt->makeAtomicResponse();
691 pkt->copyError(bus_pkt);
692 } else if (bus_pkt->isRead() ||
693 bus_pkt->cmd == MemCmd::UpgradeResp) {
694 // we're updating cache state to allow us to
695 // satisfy the upstream request from the cache
696 blk = handleFill(bus_pkt, blk, writebacks);
697 satisfyCpuSideRequest(pkt, blk);
698 } else {
699 // we're satisfying the upstream request without
700 // modifying cache state, e.g., a write-through
701 pkt->makeAtomicResponse();
702 }
703 }
704 delete bus_pkt;
705 }
706 }
707
708 // Note that we don't invoke the prefetcher at all in atomic mode.
709 // It's not clear how to do it properly, particularly for
710 // prefetchers that aggressively generate prefetch candidates and
711 // rely on bandwidth contention to throttle them; these will tend
712 // to pollute the cache in atomic mode since there is no bandwidth
713 // contention. If we ever do want to enable prefetching in atomic
714 // mode, though, this is the place to do it... see timingAccess()
715 // for an example (though we'd want to issue the prefetch(es)
716 // immediately rather than calling requestMemSideBus() as we do
717 // there).
718
719 // Handle writebacks if needed
720 while (!writebacks.empty()){
721 PacketPtr wbPkt = writebacks.front();
722 memSidePort->sendAtomic(wbPkt);
723 writebacks.pop_front();
724 delete wbPkt;
725 }
726
727 // We now have the block one way or another (hit or completed miss)
728
729 if (pkt->needsResponse()) {
730 pkt->makeAtomicResponse();
731 }
732
733 return lat;
734 }
735
736
737 template<class TagStore>
738 void
739 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
740 {
741 Addr blk_addr = blockAlign(pkt->getAddr());
742 BlkType *blk = tags->findBlock(pkt->getAddr());
743 MSHR *mshr = mshrQueue.findMatch(blk_addr);
744
745 pkt->pushLabel(name());
746
747 CacheBlkPrintWrapper cbpw(blk);
748
749 // Note that just because an L2/L3 has valid data doesn't mean an
750 // L1 doesn't have a more up-to-date modified copy that still
751 // needs to be found. As a result we always update the request if
752 // we have it, but only declare it satisfied if we are the owner.
753
754 // see if we have data at all (owned or otherwise)
755 bool have_data = blk && blk->isValid()
756 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
757
758 // data we have is dirty if marked as such or if valid & ownership
759 // pending due to outstanding UpgradeReq
760 bool have_dirty =
761 have_data && (blk->isDirty() ||
762 (mshr && mshr->inService && mshr->isPendingDirty()));
763
764 bool done = have_dirty
765 || cpuSidePort->checkFunctional(pkt)
766 || mshrQueue.checkFunctional(pkt, blk_addr)
767 || writeBuffer.checkFunctional(pkt, blk_addr)
768 || memSidePort->checkFunctional(pkt);
769
770 DPRINTF(Cache, "functional %s %x %s%s%s\n",
771 pkt->cmdString(), pkt->getAddr(),
772 (blk && blk->isValid()) ? "valid " : "",
773 have_data ? "data " : "", done ? "done " : "");
774
775 // We're leaving the cache, so pop cache->name() label
776 pkt->popLabel();
777
778 if (done) {
779 pkt->makeResponse();
780 } else {
781 // if it came as a request from the CPU side then make sure it
782 // continues towards the memory side
783 if (fromCpuSide) {
784 memSidePort->sendFunctional(pkt);
785 } else if (forwardSnoops && cpuSidePort->getMasterPort().isSnooping()) {
786 // if it came from the memory side, it must be a snoop request
787 // and we should only forward it if we are forwarding snoops
788 cpuSidePort->sendFunctional(pkt);
789 }
790 }
791 }
792
793
794 /////////////////////////////////////////////////////
795 //
796 // Response handling: responses from the memory side
797 //
798 /////////////////////////////////////////////////////
799
800
801 template<class TagStore>
802 void
803 Cache<TagStore>::handleResponse(PacketPtr pkt)
804 {
805 Tick time = curTick() + hitLatency;
806 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
807 bool is_error = pkt->isError();
808
809 assert(mshr);
810
811 if (pkt->wasNacked()) {
812 //pkt->reinitFromRequest();
813 warn("NACKs from devices not connected to the same bus "
814 "not implemented\n");
815 return;
816 }
817 if (is_error) {
818 DPRINTF(Cache, "Cache received packet with error for address %x, "
819 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
820 }
821
822 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
823
824 MSHRQueue *mq = mshr->queue;
825 bool wasFull = mq->isFull();
826
827 if (mshr == noTargetMSHR) {
828 // we always clear at least one target
829 clearBlocked(Blocked_NoTargets);
830 noTargetMSHR = NULL;
831 }
832
833 // Initial target is used just for stats
834 MSHR::Target *initial_tgt = mshr->getTarget();
835 BlkType *blk = tags->findBlock(pkt->getAddr());
836 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
837 Tick miss_latency = curTick() - initial_tgt->recvTime;
838 PacketList writebacks;
839
840 if (pkt->req->isUncacheable()) {
841 assert(pkt->req->masterId() < system->maxMasters());
842 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
843 miss_latency;
844 } else {
845 assert(pkt->req->masterId() < system->maxMasters());
846 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
847 miss_latency;
848 }
849
850 bool is_fill = !mshr->isForward &&
851 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
852
853 if (is_fill && !is_error) {
854 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
855 pkt->getAddr());
856
857 // give mshr a chance to do some dirty work
858 mshr->handleFill(pkt, blk);
859
860 blk = handleFill(pkt, blk, writebacks);
861 assert(blk != NULL);
862 }
863
864 // First offset for critical word first calculations
865 int initial_offset = 0;
866
867 if (mshr->hasTargets()) {
868 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
869 }
870
871 while (mshr->hasTargets()) {
872 MSHR::Target *target = mshr->getTarget();
873
874 switch (target->source) {
875 case MSHR::Target::FromCPU:
876 Tick completion_time;
877 if (is_fill) {
878 satisfyCpuSideRequest(target->pkt, blk,
879 true, mshr->hasPostDowngrade());
880 // How many bytes past the first request is this one
881 int transfer_offset =
882 target->pkt->getOffset(blkSize) - initial_offset;
883 if (transfer_offset < 0) {
884 transfer_offset += blkSize;
885 }
886
887 // If critical word (no offset) return first word time
888 completion_time = tags->getHitLatency() +
889 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
890
891 assert(!target->pkt->req->isUncacheable());
892
893 assert(pkt->req->masterId() < system->maxMasters());
894 missLatency[target->pkt->cmdToIndex()][target->pkt->req->masterId()] +=
895 completion_time - target->recvTime;
896 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
897 // failed StoreCond upgrade
898 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
899 target->pkt->cmd == MemCmd::StoreCondFailReq ||
900 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
901 completion_time = tags->getHitLatency() + pkt->finishTime;
902 target->pkt->req->setExtraData(0);
903 } else {
904 // not a cache fill, just forwarding response
905 completion_time = tags->getHitLatency() + pkt->finishTime;
906 if (pkt->isRead() && !is_error) {
907 target->pkt->setData(pkt->getPtr<uint8_t>());
908 }
909 }
910 target->pkt->makeTimingResponse();
911 // if this packet is an error copy that to the new packet
912 if (is_error)
913 target->pkt->copyError(pkt);
914 if (target->pkt->cmd == MemCmd::ReadResp &&
915 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
916 // If intermediate cache got ReadRespWithInvalidate,
917 // propagate that. Response should not have
918 // isInvalidate() set otherwise.
919 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
920 }
921 cpuSidePort->respond(target->pkt, completion_time);
922 break;
923
924 case MSHR::Target::FromPrefetcher:
925 assert(target->pkt->cmd == MemCmd::HardPFReq);
926 if (blk)
927 blk->status |= BlkHWPrefetched;
928 delete target->pkt->req;
929 delete target->pkt;
930 break;
931
932 case MSHR::Target::FromSnoop:
933 // I don't believe that a snoop can be in an error state
934 assert(!is_error);
935 // response to snoop request
936 DPRINTF(Cache, "processing deferred snoop...\n");
937 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
938 handleSnoop(target->pkt, blk, true, true,
939 mshr->hasPostInvalidate());
940 break;
941
942 default:
943 panic("Illegal target->source enum %d\n", target->source);
944 }
945
946 mshr->popTarget();
947 }
948
949 if (blk) {
950 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
951 tags->invalidateBlk(blk);
952 } else if (mshr->hasPostDowngrade()) {
953 blk->status &= ~BlkWritable;
954 }
955 }
956
957 if (mshr->promoteDeferredTargets()) {
958 // avoid later read getting stale data while write miss is
959 // outstanding.. see comment in timingAccess()
960 if (blk) {
961 blk->status &= ~BlkReadable;
962 }
963 MSHRQueue *mq = mshr->queue;
964 mq->markPending(mshr);
965 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
966 } else {
967 mq->deallocate(mshr);
968 if (wasFull && !mq->isFull()) {
969 clearBlocked((BlockedCause)mq->index);
970 }
971 }
972
973 // copy writebacks to write buffer
974 while (!writebacks.empty()) {
975 PacketPtr wbPkt = writebacks.front();
976 allocateWriteBuffer(wbPkt, time, true);
977 writebacks.pop_front();
978 }
979 // if we used temp block, clear it out
980 if (blk == tempBlock) {
981 if (blk->isDirty()) {
982 allocateWriteBuffer(writebackBlk(blk), time, true);
983 }
984 blk->status &= ~BlkValid;
985 tags->invalidateBlk(blk);
986 }
987
988 delete pkt;
989 }
990
991
992
993
994 template<class TagStore>
995 PacketPtr
996 Cache<TagStore>::writebackBlk(BlkType *blk)
997 {
998 assert(blk && blk->isValid() && blk->isDirty());
999
1000 writebacks[Request::wbMasterId]++;
1001
1002 Request *writebackReq =
1003 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1004 Request::wbMasterId);
1005 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
1006 if (blk->isWritable()) {
1007 writeback->setSupplyExclusive();
1008 }
1009 writeback->allocate();
1010 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1011
1012 blk->status &= ~BlkDirty;
1013 return writeback;
1014 }
1015
1016
1017 template<class TagStore>
1018 typename Cache<TagStore>::BlkType*
1019 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1020 {
1021 BlkType *blk = tags->findVictim(addr, writebacks);
1022
1023 if (blk->isValid()) {
1024 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1025 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1026 if (repl_mshr) {
1027 // must be an outstanding upgrade request on block
1028 // we're about to replace...
1029 assert(!blk->isWritable());
1030 assert(repl_mshr->needsExclusive());
1031 // too hard to replace block with transient state
1032 // allocation failed, block not inserted
1033 return NULL;
1034 } else {
1035 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1036 repl_addr, addr,
1037 blk->isDirty() ? "writeback" : "clean");
1038
1039 if (blk->isDirty()) {
1040 // Save writeback packet for handling by caller
1041 writebacks.push_back(writebackBlk(blk));
1042 }
1043 }
1044 }
1045
1046 return blk;
1047 }
1048
1049
1050 // Note that the reason we return a list of writebacks rather than
1051 // inserting them directly in the write buffer is that this function
1052 // is called by both atomic and timing-mode accesses, and in atomic
1053 // mode we don't mess with the write buffer (we just perform the
1054 // writebacks atomically once the original request is complete).
1055 template<class TagStore>
1056 typename Cache<TagStore>::BlkType*
1057 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1058 PacketList &writebacks)
1059 {
1060 Addr addr = pkt->getAddr();
1061 #if TRACING_ON
1062 CacheBlk::State old_state = blk ? blk->status : 0;
1063 #endif
1064
1065 if (blk == NULL) {
1066 // better have read new data...
1067 assert(pkt->hasData());
1068 // need to do a replacement
1069 blk = allocateBlock(addr, writebacks);
1070 if (blk == NULL) {
1071 // No replaceable block... just use temporary storage to
1072 // complete the current request and then get rid of it
1073 assert(!tempBlock->isValid());
1074 blk = tempBlock;
1075 tempBlock->set = tags->extractSet(addr);
1076 tempBlock->tag = tags->extractTag(addr);
1077 DPRINTF(Cache, "using temp block for %x\n", addr);
1078 } else {
1079 int id = pkt->req->masterId();
1080 tags->insertBlock(pkt->getAddr(), blk, id);
1081 }
1082
1083 // starting from scratch with a new block
1084 blk->status = 0;
1085 } else {
1086 // existing block... probably an upgrade
1087 assert(blk->tag == tags->extractTag(addr));
1088 // either we're getting new data or the block should already be valid
1089 assert(pkt->hasData() || blk->isValid());
1090 // don't clear block status... if block is already dirty we
1091 // don't want to lose that
1092 }
1093
1094 blk->status |= BlkValid | BlkReadable;
1095
1096 if (!pkt->sharedAsserted()) {
1097 blk->status |= BlkWritable;
1098 // If we got this via cache-to-cache transfer (i.e., from a
1099 // cache that was an owner) and took away that owner's copy,
1100 // then we need to write it back. Normally this happens
1101 // anyway as a side effect of getting a copy to write it, but
1102 // there are cases (such as failed store conditionals or
1103 // compare-and-swaps) where we'll demand an exclusive copy but
1104 // end up not writing it.
1105 if (pkt->memInhibitAsserted())
1106 blk->status |= BlkDirty;
1107 }
1108
1109 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1110 addr, old_state, blk->status);
1111
1112 // if we got new data, copy it in
1113 if (pkt->isRead()) {
1114 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1115 }
1116
1117 blk->whenReady = pkt->finishTime;
1118
1119 return blk;
1120 }
1121
1122
1123 /////////////////////////////////////////////////////
1124 //
1125 // Snoop path: requests coming in from the memory side
1126 //
1127 /////////////////////////////////////////////////////
1128
1129 template<class TagStore>
1130 void
1131 Cache<TagStore>::
1132 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1133 bool already_copied, bool pending_inval)
1134 {
1135 // timing-mode snoop responses require a new packet, unless we
1136 // already made a copy...
1137 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1138 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1139 pkt->allocate();
1140 pkt->makeTimingResponse();
1141 if (pkt->isRead()) {
1142 pkt->setDataFromBlock(blk_data, blkSize);
1143 }
1144 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1145 // Assume we defer a response to a read from a far-away cache
1146 // A, then later defer a ReadExcl from a cache B on the same
1147 // bus as us. We'll assert MemInhibit in both cases, but in
1148 // the latter case MemInhibit will keep the invalidation from
1149 // reaching cache A. This special response tells cache A that
1150 // it gets the block to satisfy its read, but must immediately
1151 // invalidate it.
1152 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1153 }
1154 memSidePort->respond(pkt, curTick() + hitLatency);
1155 }
1156
1157 template<class TagStore>
1158 void
1159 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1160 bool is_timing, bool is_deferred,
1161 bool pending_inval)
1162 {
1163 // deferred snoops can only happen in timing mode
1164 assert(!(is_deferred && !is_timing));
1165 // pending_inval only makes sense on deferred snoops
1166 assert(!(pending_inval && !is_deferred));
1167 assert(pkt->isRequest());
1168
1169 // the packet may get modified if we or a forwarded snooper
1170 // responds in atomic mode, so remember a few things about the
1171 // original packet up front
1172 bool invalidate = pkt->isInvalidate();
1173 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1174
1175 if (forwardSnoops) {
1176 // first propagate snoop upward to see if anyone above us wants to
1177 // handle it. save & restore packet src since it will get
1178 // rewritten to be relative to cpu-side bus (if any)
1179 bool alreadyResponded = pkt->memInhibitAsserted();
1180 if (is_timing) {
1181 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1182 snoopPkt->setExpressSnoop();
1183 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1184 cpuSidePort->sendTiming(snoopPkt);
1185 if (snoopPkt->memInhibitAsserted()) {
1186 // cache-to-cache response from some upper cache
1187 assert(!alreadyResponded);
1188 pkt->assertMemInhibit();
1189 } else {
1190 delete snoopPkt->senderState;
1191 }
1192 if (snoopPkt->sharedAsserted()) {
1193 pkt->assertShared();
1194 }
1195 delete snoopPkt;
1196 } else {
1197 int origSrc = pkt->getSrc();
1198 cpuSidePort->sendAtomic(pkt);
1199 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1200 // cache-to-cache response from some upper cache:
1201 // forward response to original requester
1202 assert(pkt->isResponse());
1203 }
1204 pkt->setSrc(origSrc);
1205 }
1206 }
1207
1208 if (!blk || !blk->isValid()) {
1209 return;
1210 }
1211
1212 // we may end up modifying both the block state and the packet (if
1213 // we respond in atomic mode), so just figure out what to do now
1214 // and then do it later
1215 bool respond = blk->isDirty() && pkt->needsResponse();
1216 bool have_exclusive = blk->isWritable();
1217
1218 if (pkt->isRead() && !invalidate) {
1219 assert(!needs_exclusive);
1220 pkt->assertShared();
1221 int bits_to_clear = BlkWritable;
1222 const bool haveOwnershipState = true; // for now
1223 if (!haveOwnershipState) {
1224 // if we don't support pure ownership (dirty && !writable),
1225 // have to clear dirty bit here, assume memory snarfs data
1226 // on cache-to-cache xfer
1227 bits_to_clear |= BlkDirty;
1228 }
1229 blk->status &= ~bits_to_clear;
1230 }
1231
1232 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1233 pkt->cmdString(), blockAlign(pkt->getAddr()),
1234 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1235
1236 if (respond) {
1237 assert(!pkt->memInhibitAsserted());
1238 pkt->assertMemInhibit();
1239 if (have_exclusive) {
1240 pkt->setSupplyExclusive();
1241 }
1242 if (is_timing) {
1243 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1244 } else {
1245 pkt->makeAtomicResponse();
1246 pkt->setDataFromBlock(blk->data, blkSize);
1247 }
1248 } else if (is_timing && is_deferred) {
1249 // if it's a deferred timing snoop then we've made a copy of
1250 // the packet, and so if we're not using that copy to respond
1251 // then we need to delete it here.
1252 delete pkt;
1253 }
1254
1255 // Do this last in case it deallocates block data or something
1256 // like that
1257 if (invalidate) {
1258 tags->invalidateBlk(blk);
1259 }
1260 }
1261
1262
1263 template<class TagStore>
1264 void
1265 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1266 {
1267 // Note that some deferred snoops don't have requests, since the
1268 // original access may have already completed
1269 if ((pkt->req && pkt->req->isUncacheable()) ||
1270 pkt->cmd == MemCmd::Writeback) {
1271 //Can't get a hit on an uncacheable address
1272 //Revisit this for multi level coherence
1273 return;
1274 }
1275
1276 BlkType *blk = tags->findBlock(pkt->getAddr());
1277
1278 Addr blk_addr = blockAlign(pkt->getAddr());
1279 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1280
1281 // Let the MSHR itself track the snoop and decide whether we want
1282 // to go ahead and do the regular cache snoop
1283 if (mshr && mshr->handleSnoop(pkt, order++)) {
1284 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1285 blk_addr);
1286 if (mshr->getNumTargets() > numTarget)
1287 warn("allocating bonus target for snoop"); //handle later
1288 return;
1289 }
1290
1291 //We also need to check the writeback buffers and handle those
1292 std::vector<MSHR *> writebacks;
1293 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1294 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1295 pkt->getAddr());
1296
1297 //Look through writebacks for any non-uncachable writes, use that
1298 for (int i = 0; i < writebacks.size(); i++) {
1299 mshr = writebacks[i];
1300 assert(!mshr->isUncacheable());
1301 assert(mshr->getNumTargets() == 1);
1302 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1303 assert(wb_pkt->cmd == MemCmd::Writeback);
1304
1305 assert(!pkt->memInhibitAsserted());
1306 pkt->assertMemInhibit();
1307 if (!pkt->needsExclusive()) {
1308 pkt->assertShared();
1309 // the writeback is no longer the exclusive copy in the system
1310 wb_pkt->clearSupplyExclusive();
1311 } else {
1312 // if we're not asserting the shared line, we need to
1313 // invalidate our copy. we'll do that below as long as
1314 // the packet's invalidate flag is set...
1315 assert(pkt->isInvalidate());
1316 }
1317 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1318 false, false);
1319
1320 if (pkt->isInvalidate()) {
1321 // Invalidation trumps our writeback... discard here
1322 markInService(mshr);
1323 delete wb_pkt;
1324 }
1325
1326 // If this was a shared writeback, there may still be
1327 // other shared copies above that require invalidation.
1328 // We could be more selective and return here if the
1329 // request is non-exclusive or if the writeback is
1330 // exclusive.
1331 break;
1332 }
1333 }
1334
1335 handleSnoop(pkt, blk, true, false, false);
1336 }
1337
1338
1339 template<class TagStore>
1340 Tick
1341 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1342 {
1343 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1344 // Can't get a hit on an uncacheable address
1345 // Revisit this for multi level coherence
1346 return hitLatency;
1347 }
1348
1349 BlkType *blk = tags->findBlock(pkt->getAddr());
1350 handleSnoop(pkt, blk, false, false, false);
1351 return hitLatency;
1352 }
1353
1354
1355 template<class TagStore>
1356 MSHR *
1357 Cache<TagStore>::getNextMSHR()
1358 {
1359 // Check both MSHR queue and write buffer for potential requests
1360 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1361 MSHR *write_mshr = writeBuffer.getNextMSHR();
1362
1363 // Now figure out which one to send... some cases are easy
1364 if (miss_mshr && !write_mshr) {
1365 return miss_mshr;
1366 }
1367 if (write_mshr && !miss_mshr) {
1368 return write_mshr;
1369 }
1370
1371 if (miss_mshr && write_mshr) {
1372 // We have one of each... normally we favor the miss request
1373 // unless the write buffer is full
1374 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1375 // Write buffer is full, so we'd like to issue a write;
1376 // need to search MSHR queue for conflicting earlier miss.
1377 MSHR *conflict_mshr =
1378 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1379
1380 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1381 // Service misses in order until conflict is cleared.
1382 return conflict_mshr;
1383 }
1384
1385 // No conflicts; issue write
1386 return write_mshr;
1387 }
1388
1389 // Write buffer isn't full, but need to check it for
1390 // conflicting earlier writeback
1391 MSHR *conflict_mshr =
1392 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1393 if (conflict_mshr) {
1394 // not sure why we don't check order here... it was in the
1395 // original code but commented out.
1396
1397 // The only way this happens is if we are
1398 // doing a write and we didn't have permissions
1399 // then subsequently saw a writeback (owned got evicted)
1400 // We need to make sure to perform the writeback first
1401 // To preserve the dirty data, then we can issue the write
1402
1403 // should we return write_mshr here instead? I.e. do we
1404 // have to flush writes in order? I don't think so... not
1405 // for Alpha anyway. Maybe for x86?
1406 return conflict_mshr;
1407 }
1408
1409 // No conflicts; issue read
1410 return miss_mshr;
1411 }
1412
1413 // fall through... no pending requests. Try a prefetch.
1414 assert(!miss_mshr && !write_mshr);
1415 if (prefetcher && !mshrQueue.isFull()) {
1416 // If we have a miss queue slot, we can try a prefetch
1417 PacketPtr pkt = prefetcher->getPacket();
1418 if (pkt) {
1419 Addr pf_addr = blockAlign(pkt->getAddr());
1420 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
1421 !writeBuffer.findMatch(pf_addr)) {
1422 // Update statistic on number of prefetches issued
1423 // (hwpf_mshr_misses)
1424 assert(pkt->req->masterId() < system->maxMasters());
1425 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1426 // Don't request bus, since we already have it
1427 return allocateMissBuffer(pkt, curTick(), false);
1428 } else {
1429 // free the request and packet
1430 delete pkt->req;
1431 delete pkt;
1432 }
1433 }
1434 }
1435
1436 return NULL;
1437 }
1438
1439
1440 template<class TagStore>
1441 PacketPtr
1442 Cache<TagStore>::getTimingPacket()
1443 {
1444 MSHR *mshr = getNextMSHR();
1445
1446 if (mshr == NULL) {
1447 return NULL;
1448 }
1449
1450 // use request from 1st target
1451 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1452 PacketPtr pkt = NULL;
1453
1454 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1455 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1456 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1457 // in MSHR, so now that we are getting around to processing
1458 // it, just treat it as if we got a failure response
1459 pkt = new Packet(tgt_pkt);
1460 pkt->cmd = MemCmd::UpgradeFailResp;
1461 pkt->senderState = mshr;
1462 pkt->firstWordTime = pkt->finishTime = curTick();
1463 handleResponse(pkt);
1464 return NULL;
1465 } else if (mshr->isForwardNoResponse()) {
1466 // no response expected, just forward packet as it is
1467 assert(tags->findBlock(mshr->addr) == NULL);
1468 pkt = tgt_pkt;
1469 } else {
1470 BlkType *blk = tags->findBlock(mshr->addr);
1471
1472 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1473 // It might be possible for a writeback to arrive between
1474 // the time the prefetch is placed in the MSHRs and when
1475 // it's selected to send... if so, this assert will catch
1476 // that, and then we'll have to figure out what to do.
1477 assert(blk == NULL);
1478
1479 // We need to check the caches above us to verify that they don't have
1480 // a copy of this block in the dirty state at the moment. Without this
1481 // check we could get a stale copy from memory that might get used
1482 // in place of the dirty one.
1483 PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
1484 snoop_pkt->setExpressSnoop();
1485 snoop_pkt->senderState = mshr;
1486 cpuSidePort->sendTiming(snoop_pkt);
1487
1488 if (snoop_pkt->memInhibitAsserted()) {
1489 markInService(mshr, snoop_pkt);
1490 DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
1491 tgt_pkt->getAddr());
1492 delete snoop_pkt;
1493 return NULL;
1494 }
1495 delete snoop_pkt;
1496 }
1497
1498 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1499
1500 mshr->isForward = (pkt == NULL);
1501
1502 if (mshr->isForward) {
1503 // not a cache block request, but a response is expected
1504 // make copy of current packet to forward, keep current
1505 // copy for response handling
1506 pkt = new Packet(tgt_pkt);
1507 pkt->allocate();
1508 if (pkt->isWrite()) {
1509 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1510 }
1511 }
1512 }
1513
1514 assert(pkt != NULL);
1515 pkt->senderState = mshr;
1516 return pkt;
1517 }
1518
1519
1520 template<class TagStore>
1521 Tick
1522 Cache<TagStore>::nextMSHRReadyTime()
1523 {
1524 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1525 writeBuffer.nextMSHRReadyTime());
1526
1527 if (prefetcher) {
1528 nextReady = std::min(nextReady,
1529 prefetcher->nextPrefetchReadyTime());
1530 }
1531
1532 return nextReady;
1533 }
1534
1535
1536 ///////////////
1537 //
1538 // CpuSidePort
1539 //
1540 ///////////////
1541
1542 template<class TagStore>
1543 AddrRangeList
1544 Cache<TagStore>::CpuSidePort::getAddrRanges()
1545 {
1546 return cache->getAddrRanges();
1547 }
1548
1549 template<class TagStore>
1550 bool
1551 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1552 {
1553 // illegal to block responses... can lead to deadlock
1554 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1555 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1556 mustSendRetry = true;
1557 return false;
1558 }
1559
1560 cache->timingAccess(pkt);
1561 return true;
1562 }
1563
1564 template<class TagStore>
1565 Tick
1566 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1567 {
1568 assert(pkt->isRequest());
1569 // atomic request
1570 return cache->atomicAccess(pkt);
1571 }
1572
1573 template<class TagStore>
1574 void
1575 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1576 {
1577 assert(pkt->isRequest());
1578 // functional request
1579 cache->functionalAccess(pkt, true);
1580 }
1581
1582 template<class TagStore>
1583 Cache<TagStore>::
1584 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1585 const std::string &_label)
1586 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
1587 {
1588 }
1589
1590 ///////////////
1591 //
1592 // MemSidePort
1593 //
1594 ///////////////
1595
1596 template<class TagStore>
1597 bool
1598 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1599 {
1600 // this needs to be fixed so that the cache updates the mshr and sends the
1601 // packet back out on the link, but it probably won't happen so until this
1602 // gets fixed, just panic when it does
1603 if (pkt->wasNacked())
1604 panic("Need to implement cache resending nacked packets!\n");
1605
1606 if (pkt->isResponse()) {
1607 cache->handleResponse(pkt);
1608 } else {
1609 cache->snoopTiming(pkt);
1610 }
1611 return true;
1612 }
1613
1614 template<class TagStore>
1615 Tick
1616 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1617 {
1618 assert(pkt->isRequest());
1619 // atomic snoop
1620 return cache->snoopAtomic(pkt);
1621 }
1622
1623 template<class TagStore>
1624 void
1625 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1626 {
1627 assert(pkt->isRequest());
1628 // functional snoop (note that in contrast to atomic we don't have
1629 // a specific functionalSnoop method, as they have the same
1630 // behaviour regardless)
1631 cache->functionalAccess(pkt, false);
1632 }
1633
1634 template<class TagStore>
1635 void
1636 Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
1637 {
1638 // if we have a response packet waiting we have to start with that
1639 if (deferredPacketReady()) {
1640 // use the normal approach from the timing port
1641 trySendTiming();
1642 } else {
1643 // check for request packets (requests & writebacks)
1644 PacketPtr pkt = cache.getTimingPacket();
1645 if (pkt == NULL) {
1646 // can happen if e.g. we attempt a writeback and fail, but
1647 // before the retry, the writeback is eliminated because
1648 // we snoop another cache's ReadEx.
1649 waitingOnRetry = false;
1650 } else {
1651 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1652
1653 waitingOnRetry = !port.sendTiming(pkt);
1654
1655 if (waitingOnRetry) {
1656 DPRINTF(CachePort, "now waiting on a retry\n");
1657 if (!mshr->isForwardNoResponse()) {
1658 // we are awaiting a retry, but we
1659 // delete the packet and will be creating a new packet
1660 // when we get the opportunity
1661 delete pkt;
1662 }
1663 // note that we have now masked any requestBus and
1664 // schedSendEvent (we will wait for a retry before
1665 // doing anything), and this is so even if we do not
1666 // care about this packet and might override it before
1667 // it gets retried
1668 } else {
1669 cache.markInService(mshr, pkt);
1670 }
1671 }
1672 }
1673
1674 // if we succeeded and are not waiting for a retry, schedule the
1675 // next send, not only looking at the response transmit list, but
1676 // also considering when the next MSHR is ready
1677 if (!waitingOnRetry) {
1678 scheduleSend(cache.nextMSHRReadyTime());
1679 }
1680 }
1681
1682 template<class TagStore>
1683 Cache<TagStore>::
1684 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1685 const std::string &_label)
1686 : BaseCache::CacheMasterPort(_name, _cache, _queue),
1687 _queue(*_cache, *this, _label), cache(_cache)
1688 {
1689 }