Merge with the main repo.
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 */
47
48 /**
49 * @file
50 * Cache definitions.
51 */
52
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "debug/CachePort.hh"
59 #include "mem/cache/prefetch/base.hh"
60 #include "mem/cache/blk.hh"
61 #include "mem/cache/cache.hh"
62 #include "mem/cache/mshr.hh"
63 #include "sim/sim_exit.hh"
64
65 template<class TagStore>
66 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
67 : BaseCache(p),
68 tags(tags),
69 prefetcher(pf),
70 doFastWrites(true),
71 prefetchOnAccess(p->prefetch_on_access)
72 {
73 tempBlock = new BlkType();
74 tempBlock->data = new uint8_t[blkSize];
75
76 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
77 "CpuSidePort");
78 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
79 "MemSidePort");
80 cpuSidePort->setOtherPort(memSidePort);
81 memSidePort->setOtherPort(cpuSidePort);
82
83 tags->setCache(this);
84 if (prefetcher)
85 prefetcher->setCache(this);
86 }
87
88 template<class TagStore>
89 void
90 Cache<TagStore>::regStats()
91 {
92 BaseCache::regStats();
93 tags->regStats(name());
94 if (prefetcher)
95 prefetcher->regStats(name());
96 }
97
98 template<class TagStore>
99 Port *
100 Cache<TagStore>::getPort(const std::string &if_name, int idx)
101 {
102 if (if_name == "" || if_name == "cpu_side") {
103 return cpuSidePort;
104 } else if (if_name == "mem_side") {
105 return memSidePort;
106 } else {
107 panic("Port name %s unrecognized\n", if_name);
108 }
109 }
110
111 template<class TagStore>
112 void
113 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
114 {
115 uint64_t overwrite_val;
116 bool overwrite_mem;
117 uint64_t condition_val64;
118 uint32_t condition_val32;
119
120 int offset = tags->extractBlkOffset(pkt->getAddr());
121 uint8_t *blk_data = blk->data + offset;
122
123 assert(sizeof(uint64_t) >= pkt->getSize());
124
125 overwrite_mem = true;
126 // keep a copy of our possible write value, and copy what is at the
127 // memory address into the packet
128 pkt->writeData((uint8_t *)&overwrite_val);
129 pkt->setData(blk_data);
130
131 if (pkt->req->isCondSwap()) {
132 if (pkt->getSize() == sizeof(uint64_t)) {
133 condition_val64 = pkt->req->getExtraData();
134 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
135 sizeof(uint64_t));
136 } else if (pkt->getSize() == sizeof(uint32_t)) {
137 condition_val32 = (uint32_t)pkt->req->getExtraData();
138 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
139 sizeof(uint32_t));
140 } else
141 panic("Invalid size for conditional read/write\n");
142 }
143
144 if (overwrite_mem) {
145 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
146 blk->status |= BlkDirty;
147 }
148 }
149
150
151 template<class TagStore>
152 void
153 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
154 bool deferred_response,
155 bool pending_downgrade)
156 {
157 assert(blk && blk->isValid());
158 // Occasionally this is not true... if we are a lower-level cache
159 // satisfying a string of Read and ReadEx requests from
160 // upper-level caches, a Read will mark the block as shared but we
161 // can satisfy a following ReadEx anyway since we can rely on the
162 // Read requester(s) to have buffered the ReadEx snoop and to
163 // invalidate their blocks after receiving them.
164 // assert(!pkt->needsExclusive() || blk->isWritable());
165 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
166
167 // Check RMW operations first since both isRead() and
168 // isWrite() will be true for them
169 if (pkt->cmd == MemCmd::SwapReq) {
170 cmpAndSwap(blk, pkt);
171 } else if (pkt->isWrite()) {
172 if (blk->checkWrite(pkt)) {
173 pkt->writeDataToBlock(blk->data, blkSize);
174 blk->status |= BlkDirty;
175 }
176 } else if (pkt->isRead()) {
177 if (pkt->isLLSC()) {
178 blk->trackLoadLocked(pkt);
179 }
180 pkt->setDataFromBlock(blk->data, blkSize);
181 if (pkt->getSize() == blkSize) {
182 // special handling for coherent block requests from
183 // upper-level caches
184 if (pkt->needsExclusive()) {
185 // if we have a dirty copy, make sure the recipient
186 // keeps it marked dirty
187 if (blk->isDirty()) {
188 pkt->assertMemInhibit();
189 }
190 // on ReadExReq we give up our copy unconditionally
191 tags->invalidateBlk(blk);
192 } else if (blk->isWritable() && !pending_downgrade
193 && !pkt->sharedAsserted()) {
194 // we can give the requester an exclusive copy (by not
195 // asserting shared line) on a read request if:
196 // - we have an exclusive copy at this level (& below)
197 // - we don't have a pending snoop from below
198 // signaling another read request
199 // - no other cache above has a copy (otherwise it
200 // would have asseretd shared line on request)
201
202 if (blk->isDirty()) {
203 // special considerations if we're owner:
204 if (!deferred_response && !isTopLevel) {
205 // if we are responding immediately and can
206 // signal that we're transferring ownership
207 // along with exclusivity, do so
208 pkt->assertMemInhibit();
209 blk->status &= ~BlkDirty;
210 } else {
211 // if we're responding after our own miss,
212 // there's a window where the recipient didn't
213 // know it was getting ownership and may not
214 // have responded to snoops correctly, so we
215 // can't pass off ownership *or* exclusivity
216 pkt->assertShared();
217 }
218 }
219 } else {
220 // otherwise only respond with a shared copy
221 pkt->assertShared();
222 }
223 }
224 } else {
225 // Not a read or write... must be an upgrade. it's OK
226 // to just ack those as long as we have an exclusive
227 // copy at this level.
228 assert(pkt->isUpgrade());
229 tags->invalidateBlk(blk);
230 }
231 }
232
233
234 /////////////////////////////////////////////////////
235 //
236 // MSHR helper functions
237 //
238 /////////////////////////////////////////////////////
239
240
241 template<class TagStore>
242 void
243 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
244 {
245 markInServiceInternal(mshr, pkt);
246 #if 0
247 if (mshr->originalCmd == MemCmd::HardPFReq) {
248 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
249 name());
250 //Also clear pending if need be
251 if (!prefetcher->havePending())
252 {
253 deassertMemSideBusRequest(Request_PF);
254 }
255 }
256 #endif
257 }
258
259
260 template<class TagStore>
261 void
262 Cache<TagStore>::squash(int threadNum)
263 {
264 bool unblock = false;
265 BlockedCause cause = NUM_BLOCKED_CAUSES;
266
267 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
268 noTargetMSHR = NULL;
269 unblock = true;
270 cause = Blocked_NoTargets;
271 }
272 if (mshrQueue.isFull()) {
273 unblock = true;
274 cause = Blocked_NoMSHRs;
275 }
276 mshrQueue.squash(threadNum);
277 if (unblock && !mshrQueue.isFull()) {
278 clearBlocked(cause);
279 }
280 }
281
282 /////////////////////////////////////////////////////
283 //
284 // Access path: requests coming in from the CPU side
285 //
286 /////////////////////////////////////////////////////
287
288 template<class TagStore>
289 bool
290 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
291 int &lat, PacketList &writebacks)
292 {
293 if (pkt->req->isUncacheable()) {
294 if (pkt->req->isClearLL()) {
295 tags->clearLocks();
296 } else {
297 blk = tags->findBlock(pkt->getAddr());
298 if (blk != NULL) {
299 tags->invalidateBlk(blk);
300 }
301 }
302
303 blk = NULL;
304 lat = hitLatency;
305 return false;
306 }
307
308 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
309 blk = tags->accessBlock(pkt->getAddr(), lat, id);
310
311 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
312 pkt->req->isInstFetch() ? " (ifetch)" : "",
313 pkt->getAddr(), (blk) ? "hit" : "miss");
314
315 if (blk != NULL) {
316
317 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
318 // OK to satisfy access
319 incHitCount(pkt, id);
320 satisfyCpuSideRequest(pkt, blk);
321 return true;
322 }
323 }
324
325 // Can't satisfy access normally... either no block (blk == NULL)
326 // or have block but need exclusive & only have shared.
327
328 // Writeback handling is special case. We can write the block
329 // into the cache without having a writeable copy (or any copy at
330 // all).
331 if (pkt->cmd == MemCmd::Writeback) {
332 assert(blkSize == pkt->getSize());
333 if (blk == NULL) {
334 // need to do a replacement
335 blk = allocateBlock(pkt->getAddr(), writebacks);
336 if (blk == NULL) {
337 // no replaceable block available, give up.
338 // writeback will be forwarded to next level.
339 incMissCount(pkt, id);
340 return false;
341 }
342 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
343 tags->insertBlock(pkt->getAddr(), blk, id);
344 blk->status = BlkValid | BlkReadable;
345 }
346 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
347 blk->status |= BlkDirty;
348 if (pkt->isSupplyExclusive()) {
349 blk->status |= BlkWritable;
350 }
351 // nothing else to do; writeback doesn't expect response
352 assert(!pkt->needsResponse());
353 incHitCount(pkt, id);
354 return true;
355 }
356
357 incMissCount(pkt, id);
358
359 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
360 // complete miss on store conditional... just give up now
361 pkt->req->setExtraData(0);
362 return true;
363 }
364
365 return false;
366 }
367
368
369 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
370 {
371 Packet::SenderState *prevSenderState;
372 int prevSrc;
373 #ifndef NDEBUG
374 BaseCache *cache;
375 #endif
376 public:
377 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
378 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
379 #ifndef NDEBUG
380 , cache(_cache)
381 #endif
382 {}
383 void restore(Packet *pkt, BaseCache *_cache)
384 {
385 assert(_cache == cache);
386 pkt->senderState = prevSenderState;
387 pkt->setDest(prevSrc);
388 }
389 };
390
391
392 template<class TagStore>
393 bool
394 Cache<TagStore>::timingAccess(PacketPtr pkt)
395 {
396 //@todo Add back in MemDebug Calls
397 // MemDebug::cacheAccess(pkt);
398
399 // we charge hitLatency for doing just about anything here
400 Tick time = curTick() + hitLatency;
401
402 if (pkt->isResponse()) {
403 // must be cache-to-cache response from upper to lower level
404 ForwardResponseRecord *rec =
405 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
406
407 if (rec == NULL) {
408 assert(pkt->cmd == MemCmd::HardPFResp);
409 // Check if it's a prefetch response and handle it. We shouldn't
410 // get any other kinds of responses without FRRs.
411 DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
412 pkt->getAddr());
413 handleResponse(pkt);
414 return true;
415 }
416
417 rec->restore(pkt, this);
418 delete rec;
419 memSidePort->respond(pkt, time);
420 return true;
421 }
422
423 assert(pkt->isRequest());
424
425 if (pkt->memInhibitAsserted()) {
426 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
427 pkt->getAddr());
428 assert(!pkt->req->isUncacheable());
429 // Special tweak for multilevel coherence: snoop downward here
430 // on invalidates since there may be other caches below here
431 // that have shared copies. Not necessary if we know that
432 // supplier had exclusive copy to begin with.
433 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
434 Packet *snoopPkt = new Packet(pkt, true); // clear flags
435 snoopPkt->setExpressSnoop();
436 snoopPkt->assertMemInhibit();
437 memSidePort->sendTiming(snoopPkt);
438 // main memory will delete snoopPkt
439 }
440 // since we're the official target but we aren't responding,
441 // delete the packet now.
442 delete pkt;
443 return true;
444 }
445
446 if (pkt->req->isUncacheable()) {
447 if (pkt->req->isClearLL()) {
448 tags->clearLocks();
449 } else {
450 BlkType *blk = tags->findBlock(pkt->getAddr());
451 if (blk != NULL) {
452 tags->invalidateBlk(blk);
453 }
454 }
455
456 // writes go in write buffer, reads use MSHR
457 if (pkt->isWrite() && !pkt->isRead()) {
458 allocateWriteBuffer(pkt, time, true);
459 } else {
460 allocateUncachedReadBuffer(pkt, time, true);
461 }
462 assert(pkt->needsResponse()); // else we should delete it here??
463 return true;
464 }
465
466 int lat = hitLatency;
467 BlkType *blk = NULL;
468 PacketList writebacks;
469
470 bool satisfied = access(pkt, blk, lat, writebacks);
471
472 #if 0
473 /** @todo make the fast write alloc (wh64) work with coherence. */
474
475 // If this is a block size write/hint (WH64) allocate the block here
476 // if the coherence protocol allows it.
477 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
478 (pkt->cmd == MemCmd::WriteReq
479 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
480 // not outstanding misses, can do this
481 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
482 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
483 if (outstanding_miss) {
484 warn("WriteInv doing a fastallocate"
485 "with an outstanding miss to the same address\n");
486 }
487 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
488 writebacks);
489 ++fastWrites;
490 }
491 }
492 #endif
493
494 // track time of availability of next prefetch, if any
495 Tick next_pf_time = 0;
496
497 bool needsResponse = pkt->needsResponse();
498
499 if (satisfied) {
500 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
501 if (blk)
502 blk->status &= ~BlkHWPrefetched;
503 next_pf_time = prefetcher->notify(pkt, time);
504 }
505
506 if (needsResponse) {
507 pkt->makeTimingResponse();
508 cpuSidePort->respond(pkt, curTick()+lat);
509 } else {
510 delete pkt;
511 }
512 } else {
513 // miss
514
515 Addr blk_addr = blockAlign(pkt->getAddr());
516 MSHR *mshr = mshrQueue.findMatch(blk_addr);
517
518 if (mshr) {
519 // MSHR hit
520 //@todo remove hw_pf here
521 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
522 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
523 mshr->threadNum = -1;
524 }
525 mshr->allocateTarget(pkt, time, order++);
526 if (mshr->getNumTargets() == numTarget) {
527 noTargetMSHR = mshr;
528 setBlocked(Blocked_NoTargets);
529 // need to be careful with this... if this mshr isn't
530 // ready yet (i.e. time > curTick()_, we don't want to
531 // move it ahead of mshrs that are ready
532 // mshrQueue.moveToFront(mshr);
533 }
534 } else {
535 // no MSHR
536 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
537 // always mark as cache fill for now... if we implement
538 // no-write-allocate or bypass accesses this will have to
539 // be changed.
540 if (pkt->cmd == MemCmd::Writeback) {
541 allocateWriteBuffer(pkt, time, true);
542 } else {
543 if (blk && blk->isValid()) {
544 // If we have a write miss to a valid block, we
545 // need to mark the block non-readable. Otherwise
546 // if we allow reads while there's an outstanding
547 // write miss, the read could return stale data
548 // out of the cache block... a more aggressive
549 // system could detect the overlap (if any) and
550 // forward data out of the MSHRs, but we don't do
551 // that yet. Note that we do need to leave the
552 // block valid so that it stays in the cache, in
553 // case we get an upgrade response (and hence no
554 // new data) when the write miss completes.
555 // As long as CPUs do proper store/load forwarding
556 // internally, and have a sufficiently weak memory
557 // model, this is probably unnecessary, but at some
558 // point it must have seemed like we needed it...
559 assert(pkt->needsExclusive() && !blk->isWritable());
560 blk->status &= ~BlkReadable;
561 }
562
563 allocateMissBuffer(pkt, time, true);
564 }
565
566 if (prefetcher) {
567 next_pf_time = prefetcher->notify(pkt, time);
568 }
569 }
570 }
571
572 if (next_pf_time != 0)
573 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
574
575 // copy writebacks to write buffer
576 while (!writebacks.empty()) {
577 PacketPtr wbPkt = writebacks.front();
578 allocateWriteBuffer(wbPkt, time, true);
579 writebacks.pop_front();
580 }
581
582 return true;
583 }
584
585
586 // See comment in cache.hh.
587 template<class TagStore>
588 PacketPtr
589 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
590 bool needsExclusive)
591 {
592 bool blkValid = blk && blk->isValid();
593
594 if (cpu_pkt->req->isUncacheable()) {
595 //assert(blk == NULL);
596 return NULL;
597 }
598
599 if (!blkValid &&
600 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
601 // Writebacks that weren't allocated in access() and upgrades
602 // from upper-level caches that missed completely just go
603 // through.
604 return NULL;
605 }
606
607 assert(cpu_pkt->needsResponse());
608
609 MemCmd cmd;
610 // @TODO make useUpgrades a parameter.
611 // Note that ownership protocols require upgrade, otherwise a
612 // write miss on a shared owned block will generate a ReadExcl,
613 // which will clobber the owned copy.
614 const bool useUpgrades = true;
615 if (blkValid && useUpgrades) {
616 // only reason to be here is that blk is shared
617 // (read-only) and we need exclusive
618 assert(needsExclusive && !blk->isWritable());
619 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
620 } else {
621 // block is invalid
622 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
623 }
624 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
625
626 pkt->allocate();
627 return pkt;
628 }
629
630
631 template<class TagStore>
632 Tick
633 Cache<TagStore>::atomicAccess(PacketPtr pkt)
634 {
635 int lat = hitLatency;
636
637 // @TODO: make this a parameter
638 bool last_level_cache = false;
639
640 if (pkt->memInhibitAsserted()) {
641 assert(!pkt->req->isUncacheable());
642 // have to invalidate ourselves and any lower caches even if
643 // upper cache will be responding
644 if (pkt->isInvalidate()) {
645 BlkType *blk = tags->findBlock(pkt->getAddr());
646 if (blk && blk->isValid()) {
647 tags->invalidateBlk(blk);
648 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
649 pkt->cmdString(), pkt->getAddr());
650 }
651 if (!last_level_cache) {
652 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
653 pkt->cmdString(), pkt->getAddr());
654 lat += memSidePort->sendAtomic(pkt);
655 }
656 } else {
657 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
658 pkt->cmdString(), pkt->getAddr());
659 }
660
661 return lat;
662 }
663
664 // should assert here that there are no outstanding MSHRs or
665 // writebacks... that would mean that someone used an atomic
666 // access in timing mode
667
668 BlkType *blk = NULL;
669 PacketList writebacks;
670
671 if (!access(pkt, blk, lat, writebacks)) {
672 // MISS
673 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
674
675 bool is_forward = (bus_pkt == NULL);
676
677 if (is_forward) {
678 // just forwarding the same request to the next level
679 // no local cache operation involved
680 bus_pkt = pkt;
681 }
682
683 DPRINTF(Cache, "Sending an atomic %s for %x\n",
684 bus_pkt->cmdString(), bus_pkt->getAddr());
685
686 #if TRACING_ON
687 CacheBlk::State old_state = blk ? blk->status : 0;
688 #endif
689
690 lat += memSidePort->sendAtomic(bus_pkt);
691
692 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
693 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
694
695 assert(!bus_pkt->wasNacked());
696
697 // If packet was a forward, the response (if any) is already
698 // in place in the bus_pkt == pkt structure, so we don't need
699 // to do anything. Otherwise, use the separate bus_pkt to
700 // generate response to pkt and then delete it.
701 if (!is_forward) {
702 if (pkt->needsResponse()) {
703 assert(bus_pkt->isResponse());
704 if (bus_pkt->isError()) {
705 pkt->makeAtomicResponse();
706 pkt->copyError(bus_pkt);
707 } else if (bus_pkt->isRead() ||
708 bus_pkt->cmd == MemCmd::UpgradeResp) {
709 // we're updating cache state to allow us to
710 // satisfy the upstream request from the cache
711 blk = handleFill(bus_pkt, blk, writebacks);
712 satisfyCpuSideRequest(pkt, blk);
713 } else {
714 // we're satisfying the upstream request without
715 // modifying cache state, e.g., a write-through
716 pkt->makeAtomicResponse();
717 }
718 }
719 delete bus_pkt;
720 }
721 }
722
723 // Note that we don't invoke the prefetcher at all in atomic mode.
724 // It's not clear how to do it properly, particularly for
725 // prefetchers that aggressively generate prefetch candidates and
726 // rely on bandwidth contention to throttle them; these will tend
727 // to pollute the cache in atomic mode since there is no bandwidth
728 // contention. If we ever do want to enable prefetching in atomic
729 // mode, though, this is the place to do it... see timingAccess()
730 // for an example (though we'd want to issue the prefetch(es)
731 // immediately rather than calling requestMemSideBus() as we do
732 // there).
733
734 // Handle writebacks if needed
735 while (!writebacks.empty()){
736 PacketPtr wbPkt = writebacks.front();
737 memSidePort->sendAtomic(wbPkt);
738 writebacks.pop_front();
739 delete wbPkt;
740 }
741
742 // We now have the block one way or another (hit or completed miss)
743
744 if (pkt->needsResponse()) {
745 pkt->makeAtomicResponse();
746 }
747
748 return lat;
749 }
750
751
752 template<class TagStore>
753 void
754 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
755 {
756 Addr blk_addr = blockAlign(pkt->getAddr());
757 BlkType *blk = tags->findBlock(pkt->getAddr());
758 MSHR *mshr = mshrQueue.findMatch(blk_addr);
759
760 pkt->pushLabel(name());
761
762 CacheBlkPrintWrapper cbpw(blk);
763
764 // Note that just because an L2/L3 has valid data doesn't mean an
765 // L1 doesn't have a more up-to-date modified copy that still
766 // needs to be found. As a result we always update the request if
767 // we have it, but only declare it satisfied if we are the owner.
768
769 // see if we have data at all (owned or otherwise)
770 bool have_data = blk && blk->isValid()
771 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
772
773 // data we have is dirty if marked as such or if valid & ownership
774 // pending due to outstanding UpgradeReq
775 bool have_dirty =
776 have_data && (blk->isDirty() ||
777 (mshr && mshr->inService && mshr->isPendingDirty()));
778
779 bool done = have_dirty
780 || cpuSidePort->checkFunctional(pkt)
781 || mshrQueue.checkFunctional(pkt, blk_addr)
782 || writeBuffer.checkFunctional(pkt, blk_addr)
783 || memSidePort->checkFunctional(pkt);
784
785 DPRINTF(Cache, "functional %s %x %s%s%s\n",
786 pkt->cmdString(), pkt->getAddr(),
787 (blk && blk->isValid()) ? "valid " : "",
788 have_data ? "data " : "", done ? "done " : "");
789
790 // We're leaving the cache, so pop cache->name() label
791 pkt->popLabel();
792
793 if (done) {
794 pkt->makeResponse();
795 } else {
796 // if it came as a request from the CPU side then make sure it
797 // continues towards the memory side
798 if (fromCpuSide) {
799 memSidePort->sendFunctional(pkt);
800 } else if (forwardSnoops) {
801 // if it came from the memory side, it must be a snoop request
802 // and we should only forward it if we are forwarding snoops
803 cpuSidePort->sendFunctional(pkt);
804 }
805 }
806 }
807
808
809 /////////////////////////////////////////////////////
810 //
811 // Response handling: responses from the memory side
812 //
813 /////////////////////////////////////////////////////
814
815
816 template<class TagStore>
817 void
818 Cache<TagStore>::handleResponse(PacketPtr pkt)
819 {
820 Tick time = curTick() + hitLatency;
821 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
822 bool is_error = pkt->isError();
823
824 assert(mshr);
825
826 if (pkt->wasNacked()) {
827 //pkt->reinitFromRequest();
828 warn("NACKs from devices not connected to the same bus "
829 "not implemented\n");
830 return;
831 }
832 if (is_error) {
833 DPRINTF(Cache, "Cache received packet with error for address %x, "
834 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
835 }
836
837 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
838
839 MSHRQueue *mq = mshr->queue;
840 bool wasFull = mq->isFull();
841
842 if (mshr == noTargetMSHR) {
843 // we always clear at least one target
844 clearBlocked(Blocked_NoTargets);
845 noTargetMSHR = NULL;
846 }
847
848 // Initial target is used just for stats
849 MSHR::Target *initial_tgt = mshr->getTarget();
850 BlkType *blk = tags->findBlock(pkt->getAddr());
851 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
852 Tick miss_latency = curTick() - initial_tgt->recvTime;
853 PacketList writebacks;
854
855 if (pkt->req->isUncacheable()) {
856 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
857 miss_latency;
858 } else {
859 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
860 miss_latency;
861 }
862
863 bool is_fill = !mshr->isForward &&
864 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
865
866 if (is_fill && !is_error) {
867 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
868 pkt->getAddr());
869
870 // give mshr a chance to do some dirty work
871 mshr->handleFill(pkt, blk);
872
873 blk = handleFill(pkt, blk, writebacks);
874 assert(blk != NULL);
875 }
876
877 // First offset for critical word first calculations
878 int initial_offset = 0;
879
880 if (mshr->hasTargets()) {
881 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
882 }
883
884 while (mshr->hasTargets()) {
885 MSHR::Target *target = mshr->getTarget();
886
887 switch (target->source) {
888 case MSHR::Target::FromCPU:
889 Tick completion_time;
890 if (is_fill) {
891 satisfyCpuSideRequest(target->pkt, blk,
892 true, mshr->hasPostDowngrade());
893 // How many bytes past the first request is this one
894 int transfer_offset =
895 target->pkt->getOffset(blkSize) - initial_offset;
896 if (transfer_offset < 0) {
897 transfer_offset += blkSize;
898 }
899
900 // If critical word (no offset) return first word time
901 completion_time = tags->getHitLatency() +
902 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
903
904 assert(!target->pkt->req->isUncacheable());
905 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
906 completion_time - target->recvTime;
907 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
908 // failed StoreCond upgrade
909 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
910 target->pkt->cmd == MemCmd::StoreCondFailReq ||
911 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
912 completion_time = tags->getHitLatency() + pkt->finishTime;
913 target->pkt->req->setExtraData(0);
914 } else {
915 // not a cache fill, just forwarding response
916 completion_time = tags->getHitLatency() + pkt->finishTime;
917 if (pkt->isRead() && !is_error) {
918 target->pkt->setData(pkt->getPtr<uint8_t>());
919 }
920 }
921 target->pkt->makeTimingResponse();
922 // if this packet is an error copy that to the new packet
923 if (is_error)
924 target->pkt->copyError(pkt);
925 if (target->pkt->cmd == MemCmd::ReadResp &&
926 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
927 // If intermediate cache got ReadRespWithInvalidate,
928 // propagate that. Response should not have
929 // isInvalidate() set otherwise.
930 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
931 }
932 cpuSidePort->respond(target->pkt, completion_time);
933 break;
934
935 case MSHR::Target::FromPrefetcher:
936 assert(target->pkt->cmd == MemCmd::HardPFReq);
937 if (blk)
938 blk->status |= BlkHWPrefetched;
939 delete target->pkt->req;
940 delete target->pkt;
941 break;
942
943 case MSHR::Target::FromSnoop:
944 // I don't believe that a snoop can be in an error state
945 assert(!is_error);
946 // response to snoop request
947 DPRINTF(Cache, "processing deferred snoop...\n");
948 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
949 handleSnoop(target->pkt, blk, true, true,
950 mshr->hasPostInvalidate());
951 break;
952
953 default:
954 panic("Illegal target->source enum %d\n", target->source);
955 }
956
957 mshr->popTarget();
958 }
959
960 if (blk) {
961 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
962 tags->invalidateBlk(blk);
963 } else if (mshr->hasPostDowngrade()) {
964 blk->status &= ~BlkWritable;
965 }
966 }
967
968 if (mshr->promoteDeferredTargets()) {
969 // avoid later read getting stale data while write miss is
970 // outstanding.. see comment in timingAccess()
971 if (blk) {
972 blk->status &= ~BlkReadable;
973 }
974 MSHRQueue *mq = mshr->queue;
975 mq->markPending(mshr);
976 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
977 } else {
978 mq->deallocate(mshr);
979 if (wasFull && !mq->isFull()) {
980 clearBlocked((BlockedCause)mq->index);
981 }
982 }
983
984 // copy writebacks to write buffer
985 while (!writebacks.empty()) {
986 PacketPtr wbPkt = writebacks.front();
987 allocateWriteBuffer(wbPkt, time, true);
988 writebacks.pop_front();
989 }
990 // if we used temp block, clear it out
991 if (blk == tempBlock) {
992 if (blk->isDirty()) {
993 allocateWriteBuffer(writebackBlk(blk), time, true);
994 }
995 tags->invalidateBlk(blk);
996 }
997
998 delete pkt;
999 }
1000
1001
1002
1003
1004 template<class TagStore>
1005 PacketPtr
1006 Cache<TagStore>::writebackBlk(BlkType *blk)
1007 {
1008 assert(blk && blk->isValid() && blk->isDirty());
1009
1010 writebacks[0/*pkt->req->threadId()*/]++;
1011
1012 Request *writebackReq =
1013 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
1014 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
1015 if (blk->isWritable()) {
1016 writeback->setSupplyExclusive();
1017 }
1018 writeback->allocate();
1019 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1020
1021 blk->status &= ~BlkDirty;
1022 return writeback;
1023 }
1024
1025
1026 template<class TagStore>
1027 typename Cache<TagStore>::BlkType*
1028 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1029 {
1030 BlkType *blk = tags->findVictim(addr, writebacks);
1031
1032 if (blk->isValid()) {
1033 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1034 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1035 if (repl_mshr) {
1036 // must be an outstanding upgrade request on block
1037 // we're about to replace...
1038 assert(!blk->isWritable());
1039 assert(repl_mshr->needsExclusive());
1040 // too hard to replace block with transient state
1041 // allocation failed, block not inserted
1042 return NULL;
1043 } else {
1044 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1045 repl_addr, addr,
1046 blk->isDirty() ? "writeback" : "clean");
1047
1048 if (blk->isDirty()) {
1049 // Save writeback packet for handling by caller
1050 writebacks.push_back(writebackBlk(blk));
1051 }
1052 }
1053 }
1054
1055 return blk;
1056 }
1057
1058
1059 // Note that the reason we return a list of writebacks rather than
1060 // inserting them directly in the write buffer is that this function
1061 // is called by both atomic and timing-mode accesses, and in atomic
1062 // mode we don't mess with the write buffer (we just perform the
1063 // writebacks atomically once the original request is complete).
1064 template<class TagStore>
1065 typename Cache<TagStore>::BlkType*
1066 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1067 PacketList &writebacks)
1068 {
1069 Addr addr = pkt->getAddr();
1070 #if TRACING_ON
1071 CacheBlk::State old_state = blk ? blk->status : 0;
1072 #endif
1073
1074 if (blk == NULL) {
1075 // better have read new data...
1076 assert(pkt->hasData());
1077 // need to do a replacement
1078 blk = allocateBlock(addr, writebacks);
1079 if (blk == NULL) {
1080 // No replaceable block... just use temporary storage to
1081 // complete the current request and then get rid of it
1082 assert(!tempBlock->isValid());
1083 blk = tempBlock;
1084 tempBlock->set = tags->extractSet(addr);
1085 tempBlock->tag = tags->extractTag(addr);
1086 DPRINTF(Cache, "using temp block for %x\n", addr);
1087 } else {
1088 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
1089 tags->insertBlock(pkt->getAddr(), blk, id);
1090 }
1091
1092 // starting from scratch with a new block
1093 blk->status = 0;
1094 } else {
1095 // existing block... probably an upgrade
1096 assert(blk->tag == tags->extractTag(addr));
1097 // either we're getting new data or the block should already be valid
1098 assert(pkt->hasData() || blk->isValid());
1099 // don't clear block status... if block is already dirty we
1100 // don't want to lose that
1101 }
1102
1103 blk->status |= BlkValid | BlkReadable;
1104
1105 if (!pkt->sharedAsserted()) {
1106 blk->status |= BlkWritable;
1107 // If we got this via cache-to-cache transfer (i.e., from a
1108 // cache that was an owner) and took away that owner's copy,
1109 // then we need to write it back. Normally this happens
1110 // anyway as a side effect of getting a copy to write it, but
1111 // there are cases (such as failed store conditionals or
1112 // compare-and-swaps) where we'll demand an exclusive copy but
1113 // end up not writing it.
1114 if (pkt->memInhibitAsserted())
1115 blk->status |= BlkDirty;
1116 }
1117
1118 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1119 addr, old_state, blk->status);
1120
1121 // if we got new data, copy it in
1122 if (pkt->isRead()) {
1123 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1124 }
1125
1126 blk->whenReady = pkt->finishTime;
1127
1128 return blk;
1129 }
1130
1131
1132 /////////////////////////////////////////////////////
1133 //
1134 // Snoop path: requests coming in from the memory side
1135 //
1136 /////////////////////////////////////////////////////
1137
1138 template<class TagStore>
1139 void
1140 Cache<TagStore>::
1141 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1142 bool already_copied, bool pending_inval)
1143 {
1144 // timing-mode snoop responses require a new packet, unless we
1145 // already made a copy...
1146 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1147 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1148 pkt->allocate();
1149 pkt->makeTimingResponse();
1150 if (pkt->isRead()) {
1151 pkt->setDataFromBlock(blk_data, blkSize);
1152 }
1153 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1154 // Assume we defer a response to a read from a far-away cache
1155 // A, then later defer a ReadExcl from a cache B on the same
1156 // bus as us. We'll assert MemInhibit in both cases, but in
1157 // the latter case MemInhibit will keep the invalidation from
1158 // reaching cache A. This special response tells cache A that
1159 // it gets the block to satisfy its read, but must immediately
1160 // invalidate it.
1161 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1162 }
1163 memSidePort->respond(pkt, curTick() + hitLatency);
1164 }
1165
1166 template<class TagStore>
1167 void
1168 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1169 bool is_timing, bool is_deferred,
1170 bool pending_inval)
1171 {
1172 // deferred snoops can only happen in timing mode
1173 assert(!(is_deferred && !is_timing));
1174 // pending_inval only makes sense on deferred snoops
1175 assert(!(pending_inval && !is_deferred));
1176 assert(pkt->isRequest());
1177
1178 // the packet may get modified if we or a forwarded snooper
1179 // responds in atomic mode, so remember a few things about the
1180 // original packet up front
1181 bool invalidate = pkt->isInvalidate();
1182 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1183
1184 if (forwardSnoops) {
1185 // first propagate snoop upward to see if anyone above us wants to
1186 // handle it. save & restore packet src since it will get
1187 // rewritten to be relative to cpu-side bus (if any)
1188 bool alreadyResponded = pkt->memInhibitAsserted();
1189 if (is_timing) {
1190 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1191 snoopPkt->setExpressSnoop();
1192 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1193 cpuSidePort->sendTiming(snoopPkt);
1194 if (snoopPkt->memInhibitAsserted()) {
1195 // cache-to-cache response from some upper cache
1196 assert(!alreadyResponded);
1197 pkt->assertMemInhibit();
1198 } else {
1199 delete snoopPkt->senderState;
1200 }
1201 if (snoopPkt->sharedAsserted()) {
1202 pkt->assertShared();
1203 }
1204 delete snoopPkt;
1205 } else {
1206 int origSrc = pkt->getSrc();
1207 cpuSidePort->sendAtomic(pkt);
1208 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1209 // cache-to-cache response from some upper cache:
1210 // forward response to original requester
1211 assert(pkt->isResponse());
1212 }
1213 pkt->setSrc(origSrc);
1214 }
1215 }
1216
1217 if (!blk || !blk->isValid()) {
1218 return;
1219 }
1220
1221 // we may end up modifying both the block state and the packet (if
1222 // we respond in atomic mode), so just figure out what to do now
1223 // and then do it later
1224 bool respond = blk->isDirty() && pkt->needsResponse();
1225 bool have_exclusive = blk->isWritable();
1226
1227 if (pkt->isRead() && !invalidate) {
1228 assert(!needs_exclusive);
1229 pkt->assertShared();
1230 int bits_to_clear = BlkWritable;
1231 const bool haveOwnershipState = true; // for now
1232 if (!haveOwnershipState) {
1233 // if we don't support pure ownership (dirty && !writable),
1234 // have to clear dirty bit here, assume memory snarfs data
1235 // on cache-to-cache xfer
1236 bits_to_clear |= BlkDirty;
1237 }
1238 blk->status &= ~bits_to_clear;
1239 }
1240
1241 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1242 pkt->cmdString(), blockAlign(pkt->getAddr()),
1243 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1244
1245 if (respond) {
1246 assert(!pkt->memInhibitAsserted());
1247 pkt->assertMemInhibit();
1248 if (have_exclusive) {
1249 pkt->setSupplyExclusive();
1250 }
1251 if (is_timing) {
1252 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1253 } else {
1254 pkt->makeAtomicResponse();
1255 pkt->setDataFromBlock(blk->data, blkSize);
1256 }
1257 } else if (is_timing && is_deferred) {
1258 // if it's a deferred timing snoop then we've made a copy of
1259 // the packet, and so if we're not using that copy to respond
1260 // then we need to delete it here.
1261 delete pkt;
1262 }
1263
1264 // Do this last in case it deallocates block data or something
1265 // like that
1266 if (invalidate) {
1267 tags->invalidateBlk(blk);
1268 }
1269 }
1270
1271
1272 template<class TagStore>
1273 void
1274 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1275 {
1276 // Note that some deferred snoops don't have requests, since the
1277 // original access may have already completed
1278 if ((pkt->req && pkt->req->isUncacheable()) ||
1279 pkt->cmd == MemCmd::Writeback) {
1280 //Can't get a hit on an uncacheable address
1281 //Revisit this for multi level coherence
1282 return;
1283 }
1284
1285 BlkType *blk = tags->findBlock(pkt->getAddr());
1286
1287 Addr blk_addr = blockAlign(pkt->getAddr());
1288 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1289
1290 // Let the MSHR itself track the snoop and decide whether we want
1291 // to go ahead and do the regular cache snoop
1292 if (mshr && mshr->handleSnoop(pkt, order++)) {
1293 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1294 blk_addr);
1295 if (mshr->getNumTargets() > numTarget)
1296 warn("allocating bonus target for snoop"); //handle later
1297 return;
1298 }
1299
1300 //We also need to check the writeback buffers and handle those
1301 std::vector<MSHR *> writebacks;
1302 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1303 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1304 pkt->getAddr());
1305
1306 //Look through writebacks for any non-uncachable writes, use that
1307 for (int i = 0; i < writebacks.size(); i++) {
1308 mshr = writebacks[i];
1309 assert(!mshr->isUncacheable());
1310 assert(mshr->getNumTargets() == 1);
1311 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1312 assert(wb_pkt->cmd == MemCmd::Writeback);
1313
1314 assert(!pkt->memInhibitAsserted());
1315 pkt->assertMemInhibit();
1316 if (!pkt->needsExclusive()) {
1317 pkt->assertShared();
1318 // the writeback is no longer the exclusive copy in the system
1319 wb_pkt->clearSupplyExclusive();
1320 } else {
1321 // if we're not asserting the shared line, we need to
1322 // invalidate our copy. we'll do that below as long as
1323 // the packet's invalidate flag is set...
1324 assert(pkt->isInvalidate());
1325 }
1326 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1327 false, false);
1328
1329 if (pkt->isInvalidate()) {
1330 // Invalidation trumps our writeback... discard here
1331 markInService(mshr);
1332 delete wb_pkt;
1333 }
1334
1335 // If this was a shared writeback, there may still be
1336 // other shared copies above that require invalidation.
1337 // We could be more selective and return here if the
1338 // request is non-exclusive or if the writeback is
1339 // exclusive.
1340 break;
1341 }
1342 }
1343
1344 handleSnoop(pkt, blk, true, false, false);
1345 }
1346
1347
1348 template<class TagStore>
1349 Tick
1350 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1351 {
1352 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1353 // Can't get a hit on an uncacheable address
1354 // Revisit this for multi level coherence
1355 return hitLatency;
1356 }
1357
1358 BlkType *blk = tags->findBlock(pkt->getAddr());
1359 handleSnoop(pkt, blk, false, false, false);
1360 return hitLatency;
1361 }
1362
1363
1364 template<class TagStore>
1365 MSHR *
1366 Cache<TagStore>::getNextMSHR()
1367 {
1368 // Check both MSHR queue and write buffer for potential requests
1369 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1370 MSHR *write_mshr = writeBuffer.getNextMSHR();
1371
1372 // Now figure out which one to send... some cases are easy
1373 if (miss_mshr && !write_mshr) {
1374 return miss_mshr;
1375 }
1376 if (write_mshr && !miss_mshr) {
1377 return write_mshr;
1378 }
1379
1380 if (miss_mshr && write_mshr) {
1381 // We have one of each... normally we favor the miss request
1382 // unless the write buffer is full
1383 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1384 // Write buffer is full, so we'd like to issue a write;
1385 // need to search MSHR queue for conflicting earlier miss.
1386 MSHR *conflict_mshr =
1387 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1388
1389 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1390 // Service misses in order until conflict is cleared.
1391 return conflict_mshr;
1392 }
1393
1394 // No conflicts; issue write
1395 return write_mshr;
1396 }
1397
1398 // Write buffer isn't full, but need to check it for
1399 // conflicting earlier writeback
1400 MSHR *conflict_mshr =
1401 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1402 if (conflict_mshr) {
1403 // not sure why we don't check order here... it was in the
1404 // original code but commented out.
1405
1406 // The only way this happens is if we are
1407 // doing a write and we didn't have permissions
1408 // then subsequently saw a writeback (owned got evicted)
1409 // We need to make sure to perform the writeback first
1410 // To preserve the dirty data, then we can issue the write
1411
1412 // should we return write_mshr here instead? I.e. do we
1413 // have to flush writes in order? I don't think so... not
1414 // for Alpha anyway. Maybe for x86?
1415 return conflict_mshr;
1416 }
1417
1418 // No conflicts; issue read
1419 return miss_mshr;
1420 }
1421
1422 // fall through... no pending requests. Try a prefetch.
1423 assert(!miss_mshr && !write_mshr);
1424 if (prefetcher && !mshrQueue.isFull()) {
1425 // If we have a miss queue slot, we can try a prefetch
1426 PacketPtr pkt = prefetcher->getPacket();
1427 if (pkt) {
1428 Addr pf_addr = blockAlign(pkt->getAddr());
1429 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
1430 !writeBuffer.findMatch(pf_addr)) {
1431 // Update statistic on number of prefetches issued
1432 // (hwpf_mshr_misses)
1433 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1434 // Don't request bus, since we already have it
1435 return allocateMissBuffer(pkt, curTick(), false);
1436 } else {
1437 // free the request and packet
1438 delete pkt->req;
1439 delete pkt;
1440 }
1441 }
1442 }
1443
1444 return NULL;
1445 }
1446
1447
1448 template<class TagStore>
1449 PacketPtr
1450 Cache<TagStore>::getTimingPacket()
1451 {
1452 MSHR *mshr = getNextMSHR();
1453
1454 if (mshr == NULL) {
1455 return NULL;
1456 }
1457
1458 // use request from 1st target
1459 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1460 PacketPtr pkt = NULL;
1461
1462 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1463 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1464 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1465 // in MSHR, so now that we are getting around to processing
1466 // it, just treat it as if we got a failure response
1467 pkt = new Packet(tgt_pkt);
1468 pkt->cmd = MemCmd::UpgradeFailResp;
1469 pkt->senderState = mshr;
1470 pkt->firstWordTime = pkt->finishTime = curTick();
1471 handleResponse(pkt);
1472 return NULL;
1473 } else if (mshr->isForwardNoResponse()) {
1474 // no response expected, just forward packet as it is
1475 assert(tags->findBlock(mshr->addr) == NULL);
1476 pkt = tgt_pkt;
1477 } else {
1478 BlkType *blk = tags->findBlock(mshr->addr);
1479
1480 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1481 // It might be possible for a writeback to arrive between
1482 // the time the prefetch is placed in the MSHRs and when
1483 // it's selected to send... if so, this assert will catch
1484 // that, and then we'll have to figure out what to do.
1485 assert(blk == NULL);
1486
1487 // We need to check the caches above us to verify that they don't have
1488 // a copy of this block in the dirty state at the moment. Without this
1489 // check we could get a stale copy from memory that might get used
1490 // in place of the dirty one.
1491 PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
1492 snoop_pkt->setExpressSnoop();
1493 snoop_pkt->senderState = mshr;
1494 cpuSidePort->sendTiming(snoop_pkt);
1495
1496 if (snoop_pkt->memInhibitAsserted()) {
1497 markInService(mshr, snoop_pkt);
1498 DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
1499 tgt_pkt->getAddr());
1500 delete snoop_pkt;
1501 return NULL;
1502 }
1503 delete snoop_pkt;
1504 }
1505
1506 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1507
1508 mshr->isForward = (pkt == NULL);
1509
1510 if (mshr->isForward) {
1511 // not a cache block request, but a response is expected
1512 // make copy of current packet to forward, keep current
1513 // copy for response handling
1514 pkt = new Packet(tgt_pkt);
1515 pkt->allocate();
1516 if (pkt->isWrite()) {
1517 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1518 }
1519 }
1520 }
1521
1522 assert(pkt != NULL);
1523 pkt->senderState = mshr;
1524 return pkt;
1525 }
1526
1527
1528 template<class TagStore>
1529 Tick
1530 Cache<TagStore>::nextMSHRReadyTime()
1531 {
1532 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1533 writeBuffer.nextMSHRReadyTime());
1534
1535 if (prefetcher) {
1536 nextReady = std::min(nextReady,
1537 prefetcher->nextPrefetchReadyTime());
1538 }
1539
1540 return nextReady;
1541 }
1542
1543
1544 ///////////////
1545 //
1546 // CpuSidePort
1547 //
1548 ///////////////
1549
1550 template<class TagStore>
1551 AddrRangeList
1552 Cache<TagStore>::CpuSidePort::
1553 getAddrRanges()
1554 {
1555 // CPU side port doesn't snoop; it's a target only. It can
1556 // potentially respond to any address.
1557 AddrRangeList ranges;
1558 ranges.push_back(myCache()->getAddrRange());
1559 return ranges;
1560 }
1561
1562
1563 template<class TagStore>
1564 bool
1565 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1566 {
1567 // illegal to block responses... can lead to deadlock
1568 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1569 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1570 mustSendRetry = true;
1571 return false;
1572 }
1573
1574 myCache()->timingAccess(pkt);
1575 return true;
1576 }
1577
1578
1579 template<class TagStore>
1580 Tick
1581 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1582 {
1583 return myCache()->atomicAccess(pkt);
1584 }
1585
1586
1587 template<class TagStore>
1588 void
1589 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1590 {
1591 myCache()->functionalAccess(pkt, true);
1592 }
1593
1594
1595 template<class TagStore>
1596 Cache<TagStore>::
1597 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1598 const std::string &_label)
1599 : BaseCache::CachePort(_name, _cache, _label)
1600 {
1601 }
1602
1603 ///////////////
1604 //
1605 // MemSidePort
1606 //
1607 ///////////////
1608
1609 template<class TagStore>
1610 bool
1611 Cache<TagStore>::MemSidePort::isSnooping()
1612 {
1613 // Memory-side port always snoops, but never passes requests
1614 // through to targets on the cpu side (so we don't add anything to
1615 // the address range list).
1616 return true;
1617 }
1618
1619
1620 template<class TagStore>
1621 bool
1622 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1623 {
1624 // this needs to be fixed so that the cache updates the mshr and sends the
1625 // packet back out on the link, but it probably won't happen so until this
1626 // gets fixed, just panic when it does
1627 if (pkt->wasNacked())
1628 panic("Need to implement cache resending nacked packets!\n");
1629
1630 if (pkt->isRequest() && blocked) {
1631 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1632 mustSendRetry = true;
1633 return false;
1634 }
1635
1636 if (pkt->isResponse()) {
1637 myCache()->handleResponse(pkt);
1638 } else {
1639 myCache()->snoopTiming(pkt);
1640 }
1641 return true;
1642 }
1643
1644
1645 template<class TagStore>
1646 Tick
1647 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1648 {
1649 // in atomic mode, responses go back to the sender via the
1650 // function return from sendAtomic(), not via a separate
1651 // sendAtomic() from the responder. Thus we should never see a
1652 // response packet in recvAtomic() (anywhere, not just here).
1653 assert(!pkt->isResponse());
1654 return myCache()->snoopAtomic(pkt);
1655 }
1656
1657
1658 template<class TagStore>
1659 void
1660 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1661 {
1662 myCache()->functionalAccess(pkt, false);
1663 }
1664
1665
1666
1667 template<class TagStore>
1668 void
1669 Cache<TagStore>::MemSidePort::sendPacket()
1670 {
1671 // if we have responses that are ready, they take precedence
1672 if (deferredPacketReady()) {
1673 bool success = sendTiming(transmitList.front().pkt);
1674
1675 if (success) {
1676 //send successful, remove packet
1677 transmitList.pop_front();
1678 }
1679
1680 waitingOnRetry = !success;
1681 } else {
1682 // check for non-response packets (requests & writebacks)
1683 PacketPtr pkt = myCache()->getTimingPacket();
1684 if (pkt == NULL) {
1685 // can happen if e.g. we attempt a writeback and fail, but
1686 // before the retry, the writeback is eliminated because
1687 // we snoop another cache's ReadEx.
1688 waitingOnRetry = false;
1689 } else {
1690 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1691
1692 bool success = sendTiming(pkt);
1693
1694 waitingOnRetry = !success;
1695 if (waitingOnRetry) {
1696 DPRINTF(CachePort, "now waiting on a retry\n");
1697 if (!mshr->isForwardNoResponse()) {
1698 delete pkt;
1699 }
1700 } else {
1701 myCache()->markInService(mshr, pkt);
1702 }
1703 }
1704 }
1705
1706
1707 // tried to send packet... if it was successful (no retry), see if
1708 // we need to rerequest bus or not
1709 if (!waitingOnRetry) {
1710 Tick nextReady = std::min(deferredPacketReadyTime(),
1711 myCache()->nextMSHRReadyTime());
1712 // @TODO: need to facotr in prefetch requests here somehow
1713 if (nextReady != MaxTick) {
1714 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1715 cache->schedule(sendEvent, std::max(nextReady, curTick() + 1));
1716 } else {
1717 // no more to send right now: if we're draining, we may be done
1718 if (drainEvent && !sendEvent->scheduled()) {
1719 drainEvent->process();
1720 drainEvent = NULL;
1721 }
1722 }
1723 }
1724 }
1725
1726 template<class TagStore>
1727 void
1728 Cache<TagStore>::MemSidePort::recvRetry()
1729 {
1730 assert(waitingOnRetry);
1731 sendPacket();
1732 }
1733
1734
1735 template<class TagStore>
1736 void
1737 Cache<TagStore>::MemSidePort::processSendEvent()
1738 {
1739 assert(!waitingOnRetry);
1740 sendPacket();
1741 }
1742
1743
1744 template<class TagStore>
1745 Cache<TagStore>::
1746 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1747 const std::string &_label)
1748 : BaseCache::CachePort(_name, _cache, _label)
1749 {
1750 // override default send event from SimpleTimingPort
1751 delete sendEvent;
1752 sendEvent = new SendEvent(this);
1753 }