MEM: Separate queries for snooping and address ranges
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 */
47
48 /**
49 * @file
50 * Cache definitions.
51 */
52
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "debug/CachePort.hh"
59 #include "mem/cache/prefetch/base.hh"
60 #include "mem/cache/blk.hh"
61 #include "mem/cache/cache.hh"
62 #include "mem/cache/mshr.hh"
63 #include "sim/sim_exit.hh"
64
65 template<class TagStore>
66 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
67 : BaseCache(p),
68 tags(tags),
69 prefetcher(pf),
70 doFastWrites(true),
71 prefetchOnAccess(p->prefetch_on_access)
72 {
73 tempBlock = new BlkType();
74 tempBlock->data = new uint8_t[blkSize];
75
76 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
77 "CpuSidePort");
78 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
79 "MemSidePort");
80 cpuSidePort->setOtherPort(memSidePort);
81 memSidePort->setOtherPort(cpuSidePort);
82
83 tags->setCache(this);
84 if (prefetcher)
85 prefetcher->setCache(this);
86 }
87
88 template<class TagStore>
89 void
90 Cache<TagStore>::regStats()
91 {
92 BaseCache::regStats();
93 tags->regStats(name());
94 if (prefetcher)
95 prefetcher->regStats(name());
96 }
97
98 template<class TagStore>
99 Port *
100 Cache<TagStore>::getPort(const std::string &if_name, int idx)
101 {
102 if (if_name == "" || if_name == "cpu_side") {
103 return cpuSidePort;
104 } else if (if_name == "mem_side") {
105 return memSidePort;
106 } else if (if_name == "functional") {
107 CpuSidePort *funcPort =
108 new CpuSidePort(name() + "-cpu_side_funcport", this,
109 "CpuSideFuncPort");
110 funcPort->setOtherPort(memSidePort);
111 return funcPort;
112 } else {
113 panic("Port name %s unrecognized\n", if_name);
114 }
115 }
116
117 template<class TagStore>
118 void
119 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
120 {
121 uint64_t overwrite_val;
122 bool overwrite_mem;
123 uint64_t condition_val64;
124 uint32_t condition_val32;
125
126 int offset = tags->extractBlkOffset(pkt->getAddr());
127 uint8_t *blk_data = blk->data + offset;
128
129 assert(sizeof(uint64_t) >= pkt->getSize());
130
131 overwrite_mem = true;
132 // keep a copy of our possible write value, and copy what is at the
133 // memory address into the packet
134 pkt->writeData((uint8_t *)&overwrite_val);
135 pkt->setData(blk_data);
136
137 if (pkt->req->isCondSwap()) {
138 if (pkt->getSize() == sizeof(uint64_t)) {
139 condition_val64 = pkt->req->getExtraData();
140 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
141 sizeof(uint64_t));
142 } else if (pkt->getSize() == sizeof(uint32_t)) {
143 condition_val32 = (uint32_t)pkt->req->getExtraData();
144 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
145 sizeof(uint32_t));
146 } else
147 panic("Invalid size for conditional read/write\n");
148 }
149
150 if (overwrite_mem) {
151 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
152 blk->status |= BlkDirty;
153 }
154 }
155
156
157 template<class TagStore>
158 void
159 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
160 bool deferred_response,
161 bool pending_downgrade)
162 {
163 assert(blk && blk->isValid());
164 // Occasionally this is not true... if we are a lower-level cache
165 // satisfying a string of Read and ReadEx requests from
166 // upper-level caches, a Read will mark the block as shared but we
167 // can satisfy a following ReadEx anyway since we can rely on the
168 // Read requester(s) to have buffered the ReadEx snoop and to
169 // invalidate their blocks after receiving them.
170 // assert(!pkt->needsExclusive() || blk->isWritable());
171 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
172
173 // Check RMW operations first since both isRead() and
174 // isWrite() will be true for them
175 if (pkt->cmd == MemCmd::SwapReq) {
176 cmpAndSwap(blk, pkt);
177 } else if (pkt->isWrite()) {
178 if (blk->checkWrite(pkt)) {
179 pkt->writeDataToBlock(blk->data, blkSize);
180 blk->status |= BlkDirty;
181 }
182 } else if (pkt->isRead()) {
183 if (pkt->isLLSC()) {
184 blk->trackLoadLocked(pkt);
185 }
186 pkt->setDataFromBlock(blk->data, blkSize);
187 if (pkt->getSize() == blkSize) {
188 // special handling for coherent block requests from
189 // upper-level caches
190 if (pkt->needsExclusive()) {
191 // if we have a dirty copy, make sure the recipient
192 // keeps it marked dirty
193 if (blk->isDirty()) {
194 pkt->assertMemInhibit();
195 }
196 // on ReadExReq we give up our copy unconditionally
197 tags->invalidateBlk(blk);
198 } else if (blk->isWritable() && !pending_downgrade
199 && !pkt->sharedAsserted()) {
200 // we can give the requester an exclusive copy (by not
201 // asserting shared line) on a read request if:
202 // - we have an exclusive copy at this level (& below)
203 // - we don't have a pending snoop from below
204 // signaling another read request
205 // - no other cache above has a copy (otherwise it
206 // would have asseretd shared line on request)
207
208 if (blk->isDirty()) {
209 // special considerations if we're owner:
210 if (!deferred_response && !isTopLevel) {
211 // if we are responding immediately and can
212 // signal that we're transferring ownership
213 // along with exclusivity, do so
214 pkt->assertMemInhibit();
215 blk->status &= ~BlkDirty;
216 } else {
217 // if we're responding after our own miss,
218 // there's a window where the recipient didn't
219 // know it was getting ownership and may not
220 // have responded to snoops correctly, so we
221 // can't pass off ownership *or* exclusivity
222 pkt->assertShared();
223 }
224 }
225 } else {
226 // otherwise only respond with a shared copy
227 pkt->assertShared();
228 }
229 }
230 } else {
231 // Not a read or write... must be an upgrade. it's OK
232 // to just ack those as long as we have an exclusive
233 // copy at this level.
234 assert(pkt->isUpgrade());
235 tags->invalidateBlk(blk);
236 }
237 }
238
239
240 /////////////////////////////////////////////////////
241 //
242 // MSHR helper functions
243 //
244 /////////////////////////////////////////////////////
245
246
247 template<class TagStore>
248 void
249 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
250 {
251 markInServiceInternal(mshr, pkt);
252 #if 0
253 if (mshr->originalCmd == MemCmd::HardPFReq) {
254 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
255 name());
256 //Also clear pending if need be
257 if (!prefetcher->havePending())
258 {
259 deassertMemSideBusRequest(Request_PF);
260 }
261 }
262 #endif
263 }
264
265
266 template<class TagStore>
267 void
268 Cache<TagStore>::squash(int threadNum)
269 {
270 bool unblock = false;
271 BlockedCause cause = NUM_BLOCKED_CAUSES;
272
273 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
274 noTargetMSHR = NULL;
275 unblock = true;
276 cause = Blocked_NoTargets;
277 }
278 if (mshrQueue.isFull()) {
279 unblock = true;
280 cause = Blocked_NoMSHRs;
281 }
282 mshrQueue.squash(threadNum);
283 if (unblock && !mshrQueue.isFull()) {
284 clearBlocked(cause);
285 }
286 }
287
288 /////////////////////////////////////////////////////
289 //
290 // Access path: requests coming in from the CPU side
291 //
292 /////////////////////////////////////////////////////
293
294 template<class TagStore>
295 bool
296 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
297 int &lat, PacketList &writebacks)
298 {
299 if (pkt->req->isUncacheable()) {
300 if (pkt->req->isClearLL()) {
301 tags->clearLocks();
302 } else {
303 blk = tags->findBlock(pkt->getAddr());
304 if (blk != NULL) {
305 tags->invalidateBlk(blk);
306 }
307 }
308
309 blk = NULL;
310 lat = hitLatency;
311 return false;
312 }
313
314 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
315 blk = tags->accessBlock(pkt->getAddr(), lat, id);
316
317 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
318 pkt->req->isInstFetch() ? " (ifetch)" : "",
319 pkt->getAddr(), (blk) ? "hit" : "miss");
320
321 if (blk != NULL) {
322
323 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
324 // OK to satisfy access
325 incHitCount(pkt, id);
326 satisfyCpuSideRequest(pkt, blk);
327 return true;
328 }
329 }
330
331 // Can't satisfy access normally... either no block (blk == NULL)
332 // or have block but need exclusive & only have shared.
333
334 // Writeback handling is special case. We can write the block
335 // into the cache without having a writeable copy (or any copy at
336 // all).
337 if (pkt->cmd == MemCmd::Writeback) {
338 assert(blkSize == pkt->getSize());
339 if (blk == NULL) {
340 // need to do a replacement
341 blk = allocateBlock(pkt->getAddr(), writebacks);
342 if (blk == NULL) {
343 // no replaceable block available, give up.
344 // writeback will be forwarded to next level.
345 incMissCount(pkt, id);
346 return false;
347 }
348 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
349 tags->insertBlock(pkt->getAddr(), blk, id);
350 blk->status = BlkValid | BlkReadable;
351 }
352 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
353 blk->status |= BlkDirty;
354 if (pkt->isSupplyExclusive()) {
355 blk->status |= BlkWritable;
356 }
357 // nothing else to do; writeback doesn't expect response
358 assert(!pkt->needsResponse());
359 incHitCount(pkt, id);
360 return true;
361 }
362
363 incMissCount(pkt, id);
364
365 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
366 // complete miss on store conditional... just give up now
367 pkt->req->setExtraData(0);
368 return true;
369 }
370
371 return false;
372 }
373
374
375 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
376 {
377 Packet::SenderState *prevSenderState;
378 int prevSrc;
379 #ifndef NDEBUG
380 BaseCache *cache;
381 #endif
382 public:
383 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
384 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
385 #ifndef NDEBUG
386 , cache(_cache)
387 #endif
388 {}
389 void restore(Packet *pkt, BaseCache *_cache)
390 {
391 assert(_cache == cache);
392 pkt->senderState = prevSenderState;
393 pkt->setDest(prevSrc);
394 }
395 };
396
397
398 template<class TagStore>
399 bool
400 Cache<TagStore>::timingAccess(PacketPtr pkt)
401 {
402 //@todo Add back in MemDebug Calls
403 // MemDebug::cacheAccess(pkt);
404
405 // we charge hitLatency for doing just about anything here
406 Tick time = curTick() + hitLatency;
407
408 if (pkt->isResponse()) {
409 // must be cache-to-cache response from upper to lower level
410 ForwardResponseRecord *rec =
411 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
412
413 if (rec == NULL) {
414 assert(pkt->cmd == MemCmd::HardPFResp);
415 // Check if it's a prefetch response and handle it. We shouldn't
416 // get any other kinds of responses without FRRs.
417 DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
418 pkt->getAddr());
419 handleResponse(pkt);
420 return true;
421 }
422
423 rec->restore(pkt, this);
424 delete rec;
425 memSidePort->respond(pkt, time);
426 return true;
427 }
428
429 assert(pkt->isRequest());
430
431 if (pkt->memInhibitAsserted()) {
432 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
433 pkt->getAddr());
434 assert(!pkt->req->isUncacheable());
435 // Special tweak for multilevel coherence: snoop downward here
436 // on invalidates since there may be other caches below here
437 // that have shared copies. Not necessary if we know that
438 // supplier had exclusive copy to begin with.
439 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
440 Packet *snoopPkt = new Packet(pkt, true); // clear flags
441 snoopPkt->setExpressSnoop();
442 snoopPkt->assertMemInhibit();
443 memSidePort->sendTiming(snoopPkt);
444 // main memory will delete snoopPkt
445 }
446 // since we're the official target but we aren't responding,
447 // delete the packet now.
448 delete pkt;
449 return true;
450 }
451
452 if (pkt->req->isUncacheable()) {
453 if (pkt->req->isClearLL()) {
454 tags->clearLocks();
455 } else {
456 BlkType *blk = tags->findBlock(pkt->getAddr());
457 if (blk != NULL) {
458 tags->invalidateBlk(blk);
459 }
460 }
461
462 // writes go in write buffer, reads use MSHR
463 if (pkt->isWrite() && !pkt->isRead()) {
464 allocateWriteBuffer(pkt, time, true);
465 } else {
466 allocateUncachedReadBuffer(pkt, time, true);
467 }
468 assert(pkt->needsResponse()); // else we should delete it here??
469 return true;
470 }
471
472 int lat = hitLatency;
473 BlkType *blk = NULL;
474 PacketList writebacks;
475
476 bool satisfied = access(pkt, blk, lat, writebacks);
477
478 #if 0
479 /** @todo make the fast write alloc (wh64) work with coherence. */
480
481 // If this is a block size write/hint (WH64) allocate the block here
482 // if the coherence protocol allows it.
483 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
484 (pkt->cmd == MemCmd::WriteReq
485 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
486 // not outstanding misses, can do this
487 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
488 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
489 if (outstanding_miss) {
490 warn("WriteInv doing a fastallocate"
491 "with an outstanding miss to the same address\n");
492 }
493 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
494 writebacks);
495 ++fastWrites;
496 }
497 }
498 #endif
499
500 // track time of availability of next prefetch, if any
501 Tick next_pf_time = 0;
502
503 bool needsResponse = pkt->needsResponse();
504
505 if (satisfied) {
506 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
507 if (blk)
508 blk->status &= ~BlkHWPrefetched;
509 next_pf_time = prefetcher->notify(pkt, time);
510 }
511
512 if (needsResponse) {
513 pkt->makeTimingResponse();
514 cpuSidePort->respond(pkt, curTick()+lat);
515 } else {
516 delete pkt;
517 }
518 } else {
519 // miss
520
521 Addr blk_addr = blockAlign(pkt->getAddr());
522 MSHR *mshr = mshrQueue.findMatch(blk_addr);
523
524 if (mshr) {
525 // MSHR hit
526 //@todo remove hw_pf here
527 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
528 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
529 mshr->threadNum = -1;
530 }
531 mshr->allocateTarget(pkt, time, order++);
532 if (mshr->getNumTargets() == numTarget) {
533 noTargetMSHR = mshr;
534 setBlocked(Blocked_NoTargets);
535 // need to be careful with this... if this mshr isn't
536 // ready yet (i.e. time > curTick()_, we don't want to
537 // move it ahead of mshrs that are ready
538 // mshrQueue.moveToFront(mshr);
539 }
540 } else {
541 // no MSHR
542 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
543 // always mark as cache fill for now... if we implement
544 // no-write-allocate or bypass accesses this will have to
545 // be changed.
546 if (pkt->cmd == MemCmd::Writeback) {
547 allocateWriteBuffer(pkt, time, true);
548 } else {
549 if (blk && blk->isValid()) {
550 // If we have a write miss to a valid block, we
551 // need to mark the block non-readable. Otherwise
552 // if we allow reads while there's an outstanding
553 // write miss, the read could return stale data
554 // out of the cache block... a more aggressive
555 // system could detect the overlap (if any) and
556 // forward data out of the MSHRs, but we don't do
557 // that yet. Note that we do need to leave the
558 // block valid so that it stays in the cache, in
559 // case we get an upgrade response (and hence no
560 // new data) when the write miss completes.
561 // As long as CPUs do proper store/load forwarding
562 // internally, and have a sufficiently weak memory
563 // model, this is probably unnecessary, but at some
564 // point it must have seemed like we needed it...
565 assert(pkt->needsExclusive() && !blk->isWritable());
566 blk->status &= ~BlkReadable;
567 }
568
569 allocateMissBuffer(pkt, time, true);
570 }
571
572 if (prefetcher) {
573 next_pf_time = prefetcher->notify(pkt, time);
574 }
575 }
576 }
577
578 if (next_pf_time != 0)
579 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
580
581 // copy writebacks to write buffer
582 while (!writebacks.empty()) {
583 PacketPtr wbPkt = writebacks.front();
584 allocateWriteBuffer(wbPkt, time, true);
585 writebacks.pop_front();
586 }
587
588 return true;
589 }
590
591
592 // See comment in cache.hh.
593 template<class TagStore>
594 PacketPtr
595 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
596 bool needsExclusive)
597 {
598 bool blkValid = blk && blk->isValid();
599
600 if (cpu_pkt->req->isUncacheable()) {
601 //assert(blk == NULL);
602 return NULL;
603 }
604
605 if (!blkValid &&
606 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
607 // Writebacks that weren't allocated in access() and upgrades
608 // from upper-level caches that missed completely just go
609 // through.
610 return NULL;
611 }
612
613 assert(cpu_pkt->needsResponse());
614
615 MemCmd cmd;
616 // @TODO make useUpgrades a parameter.
617 // Note that ownership protocols require upgrade, otherwise a
618 // write miss on a shared owned block will generate a ReadExcl,
619 // which will clobber the owned copy.
620 const bool useUpgrades = true;
621 if (blkValid && useUpgrades) {
622 // only reason to be here is that blk is shared
623 // (read-only) and we need exclusive
624 assert(needsExclusive && !blk->isWritable());
625 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
626 } else {
627 // block is invalid
628 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
629 }
630 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
631
632 pkt->allocate();
633 return pkt;
634 }
635
636
637 template<class TagStore>
638 Tick
639 Cache<TagStore>::atomicAccess(PacketPtr pkt)
640 {
641 int lat = hitLatency;
642
643 // @TODO: make this a parameter
644 bool last_level_cache = false;
645
646 if (pkt->memInhibitAsserted()) {
647 assert(!pkt->req->isUncacheable());
648 // have to invalidate ourselves and any lower caches even if
649 // upper cache will be responding
650 if (pkt->isInvalidate()) {
651 BlkType *blk = tags->findBlock(pkt->getAddr());
652 if (blk && blk->isValid()) {
653 tags->invalidateBlk(blk);
654 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
655 pkt->cmdString(), pkt->getAddr());
656 }
657 if (!last_level_cache) {
658 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
659 pkt->cmdString(), pkt->getAddr());
660 lat += memSidePort->sendAtomic(pkt);
661 }
662 } else {
663 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
664 pkt->cmdString(), pkt->getAddr());
665 }
666
667 return lat;
668 }
669
670 // should assert here that there are no outstanding MSHRs or
671 // writebacks... that would mean that someone used an atomic
672 // access in timing mode
673
674 BlkType *blk = NULL;
675 PacketList writebacks;
676
677 if (!access(pkt, blk, lat, writebacks)) {
678 // MISS
679 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
680
681 bool is_forward = (bus_pkt == NULL);
682
683 if (is_forward) {
684 // just forwarding the same request to the next level
685 // no local cache operation involved
686 bus_pkt = pkt;
687 }
688
689 DPRINTF(Cache, "Sending an atomic %s for %x\n",
690 bus_pkt->cmdString(), bus_pkt->getAddr());
691
692 #if TRACING_ON
693 CacheBlk::State old_state = blk ? blk->status : 0;
694 #endif
695
696 lat += memSidePort->sendAtomic(bus_pkt);
697
698 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
699 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
700
701 assert(!bus_pkt->wasNacked());
702
703 // If packet was a forward, the response (if any) is already
704 // in place in the bus_pkt == pkt structure, so we don't need
705 // to do anything. Otherwise, use the separate bus_pkt to
706 // generate response to pkt and then delete it.
707 if (!is_forward) {
708 if (pkt->needsResponse()) {
709 assert(bus_pkt->isResponse());
710 if (bus_pkt->isError()) {
711 pkt->makeAtomicResponse();
712 pkt->copyError(bus_pkt);
713 } else if (bus_pkt->isRead() ||
714 bus_pkt->cmd == MemCmd::UpgradeResp) {
715 // we're updating cache state to allow us to
716 // satisfy the upstream request from the cache
717 blk = handleFill(bus_pkt, blk, writebacks);
718 satisfyCpuSideRequest(pkt, blk);
719 } else {
720 // we're satisfying the upstream request without
721 // modifying cache state, e.g., a write-through
722 pkt->makeAtomicResponse();
723 }
724 }
725 delete bus_pkt;
726 }
727 }
728
729 // Note that we don't invoke the prefetcher at all in atomic mode.
730 // It's not clear how to do it properly, particularly for
731 // prefetchers that aggressively generate prefetch candidates and
732 // rely on bandwidth contention to throttle them; these will tend
733 // to pollute the cache in atomic mode since there is no bandwidth
734 // contention. If we ever do want to enable prefetching in atomic
735 // mode, though, this is the place to do it... see timingAccess()
736 // for an example (though we'd want to issue the prefetch(es)
737 // immediately rather than calling requestMemSideBus() as we do
738 // there).
739
740 // Handle writebacks if needed
741 while (!writebacks.empty()){
742 PacketPtr wbPkt = writebacks.front();
743 memSidePort->sendAtomic(wbPkt);
744 writebacks.pop_front();
745 delete wbPkt;
746 }
747
748 // We now have the block one way or another (hit or completed miss)
749
750 if (pkt->needsResponse()) {
751 pkt->makeAtomicResponse();
752 }
753
754 return lat;
755 }
756
757
758 template<class TagStore>
759 void
760 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
761 {
762 Addr blk_addr = blockAlign(pkt->getAddr());
763 BlkType *blk = tags->findBlock(pkt->getAddr());
764 MSHR *mshr = mshrQueue.findMatch(blk_addr);
765
766 pkt->pushLabel(name());
767
768 CacheBlkPrintWrapper cbpw(blk);
769
770 // Note that just because an L2/L3 has valid data doesn't mean an
771 // L1 doesn't have a more up-to-date modified copy that still
772 // needs to be found. As a result we always update the request if
773 // we have it, but only declare it satisfied if we are the owner.
774
775 // see if we have data at all (owned or otherwise)
776 bool have_data = blk && blk->isValid()
777 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
778
779 // data we have is dirty if marked as such or if valid & ownership
780 // pending due to outstanding UpgradeReq
781 bool have_dirty =
782 have_data && (blk->isDirty() ||
783 (mshr && mshr->inService && mshr->isPendingDirty()));
784
785 bool done = have_dirty
786 || cpuSidePort->checkFunctional(pkt)
787 || mshrQueue.checkFunctional(pkt, blk_addr)
788 || writeBuffer.checkFunctional(pkt, blk_addr)
789 || memSidePort->checkFunctional(pkt);
790
791 DPRINTF(Cache, "functional %s %x %s%s%s\n",
792 pkt->cmdString(), pkt->getAddr(),
793 (blk && blk->isValid()) ? "valid " : "",
794 have_data ? "data " : "", done ? "done " : "");
795
796 // We're leaving the cache, so pop cache->name() label
797 pkt->popLabel();
798
799 if (done) {
800 pkt->makeResponse();
801 } else {
802 // if it came as a request from the CPU side then make sure it
803 // continues towards the memory side
804 if (fromCpuSide) {
805 memSidePort->sendFunctional(pkt);
806 } else if (forwardSnoops) {
807 // if it came from the memory side, it must be a snoop request
808 // and we should only forward it if we are forwarding snoops
809 cpuSidePort->sendFunctional(pkt);
810 }
811 }
812 }
813
814
815 /////////////////////////////////////////////////////
816 //
817 // Response handling: responses from the memory side
818 //
819 /////////////////////////////////////////////////////
820
821
822 template<class TagStore>
823 void
824 Cache<TagStore>::handleResponse(PacketPtr pkt)
825 {
826 Tick time = curTick() + hitLatency;
827 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
828 bool is_error = pkt->isError();
829
830 assert(mshr);
831
832 if (pkt->wasNacked()) {
833 //pkt->reinitFromRequest();
834 warn("NACKs from devices not connected to the same bus "
835 "not implemented\n");
836 return;
837 }
838 if (is_error) {
839 DPRINTF(Cache, "Cache received packet with error for address %x, "
840 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
841 }
842
843 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
844
845 MSHRQueue *mq = mshr->queue;
846 bool wasFull = mq->isFull();
847
848 if (mshr == noTargetMSHR) {
849 // we always clear at least one target
850 clearBlocked(Blocked_NoTargets);
851 noTargetMSHR = NULL;
852 }
853
854 // Initial target is used just for stats
855 MSHR::Target *initial_tgt = mshr->getTarget();
856 BlkType *blk = tags->findBlock(pkt->getAddr());
857 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
858 Tick miss_latency = curTick() - initial_tgt->recvTime;
859 PacketList writebacks;
860
861 if (pkt->req->isUncacheable()) {
862 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
863 miss_latency;
864 } else {
865 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
866 miss_latency;
867 }
868
869 bool is_fill = !mshr->isForward &&
870 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
871
872 if (is_fill && !is_error) {
873 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
874 pkt->getAddr());
875
876 // give mshr a chance to do some dirty work
877 mshr->handleFill(pkt, blk);
878
879 blk = handleFill(pkt, blk, writebacks);
880 assert(blk != NULL);
881 }
882
883 // First offset for critical word first calculations
884 int initial_offset = 0;
885
886 if (mshr->hasTargets()) {
887 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
888 }
889
890 while (mshr->hasTargets()) {
891 MSHR::Target *target = mshr->getTarget();
892
893 switch (target->source) {
894 case MSHR::Target::FromCPU:
895 Tick completion_time;
896 if (is_fill) {
897 satisfyCpuSideRequest(target->pkt, blk,
898 true, mshr->hasPostDowngrade());
899 // How many bytes past the first request is this one
900 int transfer_offset =
901 target->pkt->getOffset(blkSize) - initial_offset;
902 if (transfer_offset < 0) {
903 transfer_offset += blkSize;
904 }
905
906 // If critical word (no offset) return first word time
907 completion_time = tags->getHitLatency() +
908 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
909
910 assert(!target->pkt->req->isUncacheable());
911 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
912 completion_time - target->recvTime;
913 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
914 // failed StoreCond upgrade
915 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
916 target->pkt->cmd == MemCmd::StoreCondFailReq ||
917 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
918 completion_time = tags->getHitLatency() + pkt->finishTime;
919 target->pkt->req->setExtraData(0);
920 } else {
921 // not a cache fill, just forwarding response
922 completion_time = tags->getHitLatency() + pkt->finishTime;
923 if (pkt->isRead() && !is_error) {
924 target->pkt->setData(pkt->getPtr<uint8_t>());
925 }
926 }
927 target->pkt->makeTimingResponse();
928 // if this packet is an error copy that to the new packet
929 if (is_error)
930 target->pkt->copyError(pkt);
931 if (target->pkt->cmd == MemCmd::ReadResp &&
932 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
933 // If intermediate cache got ReadRespWithInvalidate,
934 // propagate that. Response should not have
935 // isInvalidate() set otherwise.
936 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
937 }
938 cpuSidePort->respond(target->pkt, completion_time);
939 break;
940
941 case MSHR::Target::FromPrefetcher:
942 assert(target->pkt->cmd == MemCmd::HardPFReq);
943 if (blk)
944 blk->status |= BlkHWPrefetched;
945 delete target->pkt->req;
946 delete target->pkt;
947 break;
948
949 case MSHR::Target::FromSnoop:
950 // I don't believe that a snoop can be in an error state
951 assert(!is_error);
952 // response to snoop request
953 DPRINTF(Cache, "processing deferred snoop...\n");
954 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
955 handleSnoop(target->pkt, blk, true, true,
956 mshr->hasPostInvalidate());
957 break;
958
959 default:
960 panic("Illegal target->source enum %d\n", target->source);
961 }
962
963 mshr->popTarget();
964 }
965
966 if (blk) {
967 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
968 tags->invalidateBlk(blk);
969 } else if (mshr->hasPostDowngrade()) {
970 blk->status &= ~BlkWritable;
971 }
972 }
973
974 if (mshr->promoteDeferredTargets()) {
975 // avoid later read getting stale data while write miss is
976 // outstanding.. see comment in timingAccess()
977 if (blk) {
978 blk->status &= ~BlkReadable;
979 }
980 MSHRQueue *mq = mshr->queue;
981 mq->markPending(mshr);
982 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
983 } else {
984 mq->deallocate(mshr);
985 if (wasFull && !mq->isFull()) {
986 clearBlocked((BlockedCause)mq->index);
987 }
988 }
989
990 // copy writebacks to write buffer
991 while (!writebacks.empty()) {
992 PacketPtr wbPkt = writebacks.front();
993 allocateWriteBuffer(wbPkt, time, true);
994 writebacks.pop_front();
995 }
996 // if we used temp block, clear it out
997 if (blk == tempBlock) {
998 if (blk->isDirty()) {
999 allocateWriteBuffer(writebackBlk(blk), time, true);
1000 }
1001 tags->invalidateBlk(blk);
1002 }
1003
1004 delete pkt;
1005 }
1006
1007
1008
1009
1010 template<class TagStore>
1011 PacketPtr
1012 Cache<TagStore>::writebackBlk(BlkType *blk)
1013 {
1014 assert(blk && blk->isValid() && blk->isDirty());
1015
1016 writebacks[0/*pkt->req->threadId()*/]++;
1017
1018 Request *writebackReq =
1019 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
1020 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
1021 if (blk->isWritable()) {
1022 writeback->setSupplyExclusive();
1023 }
1024 writeback->allocate();
1025 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1026
1027 blk->status &= ~BlkDirty;
1028 return writeback;
1029 }
1030
1031
1032 template<class TagStore>
1033 typename Cache<TagStore>::BlkType*
1034 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1035 {
1036 BlkType *blk = tags->findVictim(addr, writebacks);
1037
1038 if (blk->isValid()) {
1039 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1040 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1041 if (repl_mshr) {
1042 // must be an outstanding upgrade request on block
1043 // we're about to replace...
1044 assert(!blk->isWritable());
1045 assert(repl_mshr->needsExclusive());
1046 // too hard to replace block with transient state
1047 // allocation failed, block not inserted
1048 return NULL;
1049 } else {
1050 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1051 repl_addr, addr,
1052 blk->isDirty() ? "writeback" : "clean");
1053
1054 if (blk->isDirty()) {
1055 // Save writeback packet for handling by caller
1056 writebacks.push_back(writebackBlk(blk));
1057 }
1058 }
1059 }
1060
1061 return blk;
1062 }
1063
1064
1065 // Note that the reason we return a list of writebacks rather than
1066 // inserting them directly in the write buffer is that this function
1067 // is called by both atomic and timing-mode accesses, and in atomic
1068 // mode we don't mess with the write buffer (we just perform the
1069 // writebacks atomically once the original request is complete).
1070 template<class TagStore>
1071 typename Cache<TagStore>::BlkType*
1072 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1073 PacketList &writebacks)
1074 {
1075 Addr addr = pkt->getAddr();
1076 #if TRACING_ON
1077 CacheBlk::State old_state = blk ? blk->status : 0;
1078 #endif
1079
1080 if (blk == NULL) {
1081 // better have read new data...
1082 assert(pkt->hasData());
1083 // need to do a replacement
1084 blk = allocateBlock(addr, writebacks);
1085 if (blk == NULL) {
1086 // No replaceable block... just use temporary storage to
1087 // complete the current request and then get rid of it
1088 assert(!tempBlock->isValid());
1089 blk = tempBlock;
1090 tempBlock->set = tags->extractSet(addr);
1091 tempBlock->tag = tags->extractTag(addr);
1092 DPRINTF(Cache, "using temp block for %x\n", addr);
1093 } else {
1094 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
1095 tags->insertBlock(pkt->getAddr(), blk, id);
1096 }
1097
1098 // starting from scratch with a new block
1099 blk->status = 0;
1100 } else {
1101 // existing block... probably an upgrade
1102 assert(blk->tag == tags->extractTag(addr));
1103 // either we're getting new data or the block should already be valid
1104 assert(pkt->hasData() || blk->isValid());
1105 // don't clear block status... if block is already dirty we
1106 // don't want to lose that
1107 }
1108
1109 blk->status |= BlkValid | BlkReadable;
1110
1111 if (!pkt->sharedAsserted()) {
1112 blk->status |= BlkWritable;
1113 // If we got this via cache-to-cache transfer (i.e., from a
1114 // cache that was an owner) and took away that owner's copy,
1115 // then we need to write it back. Normally this happens
1116 // anyway as a side effect of getting a copy to write it, but
1117 // there are cases (such as failed store conditionals or
1118 // compare-and-swaps) where we'll demand an exclusive copy but
1119 // end up not writing it.
1120 if (pkt->memInhibitAsserted())
1121 blk->status |= BlkDirty;
1122 }
1123
1124 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1125 addr, old_state, blk->status);
1126
1127 // if we got new data, copy it in
1128 if (pkt->isRead()) {
1129 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1130 }
1131
1132 blk->whenReady = pkt->finishTime;
1133
1134 return blk;
1135 }
1136
1137
1138 /////////////////////////////////////////////////////
1139 //
1140 // Snoop path: requests coming in from the memory side
1141 //
1142 /////////////////////////////////////////////////////
1143
1144 template<class TagStore>
1145 void
1146 Cache<TagStore>::
1147 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1148 bool already_copied, bool pending_inval)
1149 {
1150 // timing-mode snoop responses require a new packet, unless we
1151 // already made a copy...
1152 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1153 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1154 pkt->allocate();
1155 pkt->makeTimingResponse();
1156 if (pkt->isRead()) {
1157 pkt->setDataFromBlock(blk_data, blkSize);
1158 }
1159 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1160 // Assume we defer a response to a read from a far-away cache
1161 // A, then later defer a ReadExcl from a cache B on the same
1162 // bus as us. We'll assert MemInhibit in both cases, but in
1163 // the latter case MemInhibit will keep the invalidation from
1164 // reaching cache A. This special response tells cache A that
1165 // it gets the block to satisfy its read, but must immediately
1166 // invalidate it.
1167 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1168 }
1169 memSidePort->respond(pkt, curTick() + hitLatency);
1170 }
1171
1172 template<class TagStore>
1173 void
1174 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1175 bool is_timing, bool is_deferred,
1176 bool pending_inval)
1177 {
1178 // deferred snoops can only happen in timing mode
1179 assert(!(is_deferred && !is_timing));
1180 // pending_inval only makes sense on deferred snoops
1181 assert(!(pending_inval && !is_deferred));
1182 assert(pkt->isRequest());
1183
1184 // the packet may get modified if we or a forwarded snooper
1185 // responds in atomic mode, so remember a few things about the
1186 // original packet up front
1187 bool invalidate = pkt->isInvalidate();
1188 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1189
1190 if (forwardSnoops) {
1191 // first propagate snoop upward to see if anyone above us wants to
1192 // handle it. save & restore packet src since it will get
1193 // rewritten to be relative to cpu-side bus (if any)
1194 bool alreadyResponded = pkt->memInhibitAsserted();
1195 if (is_timing) {
1196 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1197 snoopPkt->setExpressSnoop();
1198 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1199 cpuSidePort->sendTiming(snoopPkt);
1200 if (snoopPkt->memInhibitAsserted()) {
1201 // cache-to-cache response from some upper cache
1202 assert(!alreadyResponded);
1203 pkt->assertMemInhibit();
1204 } else {
1205 delete snoopPkt->senderState;
1206 }
1207 if (snoopPkt->sharedAsserted()) {
1208 pkt->assertShared();
1209 }
1210 delete snoopPkt;
1211 } else {
1212 int origSrc = pkt->getSrc();
1213 cpuSidePort->sendAtomic(pkt);
1214 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1215 // cache-to-cache response from some upper cache:
1216 // forward response to original requester
1217 assert(pkt->isResponse());
1218 }
1219 pkt->setSrc(origSrc);
1220 }
1221 }
1222
1223 if (!blk || !blk->isValid()) {
1224 return;
1225 }
1226
1227 // we may end up modifying both the block state and the packet (if
1228 // we respond in atomic mode), so just figure out what to do now
1229 // and then do it later
1230 bool respond = blk->isDirty() && pkt->needsResponse();
1231 bool have_exclusive = blk->isWritable();
1232
1233 if (pkt->isRead() && !invalidate) {
1234 assert(!needs_exclusive);
1235 pkt->assertShared();
1236 int bits_to_clear = BlkWritable;
1237 const bool haveOwnershipState = true; // for now
1238 if (!haveOwnershipState) {
1239 // if we don't support pure ownership (dirty && !writable),
1240 // have to clear dirty bit here, assume memory snarfs data
1241 // on cache-to-cache xfer
1242 bits_to_clear |= BlkDirty;
1243 }
1244 blk->status &= ~bits_to_clear;
1245 }
1246
1247 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1248 pkt->cmdString(), blockAlign(pkt->getAddr()),
1249 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1250
1251 if (respond) {
1252 assert(!pkt->memInhibitAsserted());
1253 pkt->assertMemInhibit();
1254 if (have_exclusive) {
1255 pkt->setSupplyExclusive();
1256 }
1257 if (is_timing) {
1258 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1259 } else {
1260 pkt->makeAtomicResponse();
1261 pkt->setDataFromBlock(blk->data, blkSize);
1262 }
1263 } else if (is_timing && is_deferred) {
1264 // if it's a deferred timing snoop then we've made a copy of
1265 // the packet, and so if we're not using that copy to respond
1266 // then we need to delete it here.
1267 delete pkt;
1268 }
1269
1270 // Do this last in case it deallocates block data or something
1271 // like that
1272 if (invalidate) {
1273 tags->invalidateBlk(blk);
1274 }
1275 }
1276
1277
1278 template<class TagStore>
1279 void
1280 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1281 {
1282 // Note that some deferred snoops don't have requests, since the
1283 // original access may have already completed
1284 if ((pkt->req && pkt->req->isUncacheable()) ||
1285 pkt->cmd == MemCmd::Writeback) {
1286 //Can't get a hit on an uncacheable address
1287 //Revisit this for multi level coherence
1288 return;
1289 }
1290
1291 BlkType *blk = tags->findBlock(pkt->getAddr());
1292
1293 Addr blk_addr = blockAlign(pkt->getAddr());
1294 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1295
1296 // Let the MSHR itself track the snoop and decide whether we want
1297 // to go ahead and do the regular cache snoop
1298 if (mshr && mshr->handleSnoop(pkt, order++)) {
1299 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1300 blk_addr);
1301 if (mshr->getNumTargets() > numTarget)
1302 warn("allocating bonus target for snoop"); //handle later
1303 return;
1304 }
1305
1306 //We also need to check the writeback buffers and handle those
1307 std::vector<MSHR *> writebacks;
1308 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1309 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1310 pkt->getAddr());
1311
1312 //Look through writebacks for any non-uncachable writes, use that
1313 for (int i = 0; i < writebacks.size(); i++) {
1314 mshr = writebacks[i];
1315 assert(!mshr->isUncacheable());
1316 assert(mshr->getNumTargets() == 1);
1317 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1318 assert(wb_pkt->cmd == MemCmd::Writeback);
1319
1320 assert(!pkt->memInhibitAsserted());
1321 pkt->assertMemInhibit();
1322 if (!pkt->needsExclusive()) {
1323 pkt->assertShared();
1324 // the writeback is no longer the exclusive copy in the system
1325 wb_pkt->clearSupplyExclusive();
1326 } else {
1327 // if we're not asserting the shared line, we need to
1328 // invalidate our copy. we'll do that below as long as
1329 // the packet's invalidate flag is set...
1330 assert(pkt->isInvalidate());
1331 }
1332 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1333 false, false);
1334
1335 if (pkt->isInvalidate()) {
1336 // Invalidation trumps our writeback... discard here
1337 markInService(mshr);
1338 delete wb_pkt;
1339 }
1340
1341 // If this was a shared writeback, there may still be
1342 // other shared copies above that require invalidation.
1343 // We could be more selective and return here if the
1344 // request is non-exclusive or if the writeback is
1345 // exclusive.
1346 break;
1347 }
1348 }
1349
1350 handleSnoop(pkt, blk, true, false, false);
1351 }
1352
1353
1354 template<class TagStore>
1355 Tick
1356 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1357 {
1358 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1359 // Can't get a hit on an uncacheable address
1360 // Revisit this for multi level coherence
1361 return hitLatency;
1362 }
1363
1364 BlkType *blk = tags->findBlock(pkt->getAddr());
1365 handleSnoop(pkt, blk, false, false, false);
1366 return hitLatency;
1367 }
1368
1369
1370 template<class TagStore>
1371 MSHR *
1372 Cache<TagStore>::getNextMSHR()
1373 {
1374 // Check both MSHR queue and write buffer for potential requests
1375 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1376 MSHR *write_mshr = writeBuffer.getNextMSHR();
1377
1378 // Now figure out which one to send... some cases are easy
1379 if (miss_mshr && !write_mshr) {
1380 return miss_mshr;
1381 }
1382 if (write_mshr && !miss_mshr) {
1383 return write_mshr;
1384 }
1385
1386 if (miss_mshr && write_mshr) {
1387 // We have one of each... normally we favor the miss request
1388 // unless the write buffer is full
1389 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1390 // Write buffer is full, so we'd like to issue a write;
1391 // need to search MSHR queue for conflicting earlier miss.
1392 MSHR *conflict_mshr =
1393 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1394
1395 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1396 // Service misses in order until conflict is cleared.
1397 return conflict_mshr;
1398 }
1399
1400 // No conflicts; issue write
1401 return write_mshr;
1402 }
1403
1404 // Write buffer isn't full, but need to check it for
1405 // conflicting earlier writeback
1406 MSHR *conflict_mshr =
1407 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1408 if (conflict_mshr) {
1409 // not sure why we don't check order here... it was in the
1410 // original code but commented out.
1411
1412 // The only way this happens is if we are
1413 // doing a write and we didn't have permissions
1414 // then subsequently saw a writeback (owned got evicted)
1415 // We need to make sure to perform the writeback first
1416 // To preserve the dirty data, then we can issue the write
1417
1418 // should we return write_mshr here instead? I.e. do we
1419 // have to flush writes in order? I don't think so... not
1420 // for Alpha anyway. Maybe for x86?
1421 return conflict_mshr;
1422 }
1423
1424 // No conflicts; issue read
1425 return miss_mshr;
1426 }
1427
1428 // fall through... no pending requests. Try a prefetch.
1429 assert(!miss_mshr && !write_mshr);
1430 if (prefetcher && !mshrQueue.isFull()) {
1431 // If we have a miss queue slot, we can try a prefetch
1432 PacketPtr pkt = prefetcher->getPacket();
1433 if (pkt) {
1434 Addr pf_addr = blockAlign(pkt->getAddr());
1435 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
1436 !writeBuffer.findMatch(pf_addr)) {
1437 // Update statistic on number of prefetches issued
1438 // (hwpf_mshr_misses)
1439 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1440 // Don't request bus, since we already have it
1441 return allocateMissBuffer(pkt, curTick(), false);
1442 } else {
1443 // free the request and packet
1444 delete pkt->req;
1445 delete pkt;
1446 }
1447 }
1448 }
1449
1450 return NULL;
1451 }
1452
1453
1454 template<class TagStore>
1455 PacketPtr
1456 Cache<TagStore>::getTimingPacket()
1457 {
1458 MSHR *mshr = getNextMSHR();
1459
1460 if (mshr == NULL) {
1461 return NULL;
1462 }
1463
1464 // use request from 1st target
1465 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1466 PacketPtr pkt = NULL;
1467
1468 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1469 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1470 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1471 // in MSHR, so now that we are getting around to processing
1472 // it, just treat it as if we got a failure response
1473 pkt = new Packet(tgt_pkt);
1474 pkt->cmd = MemCmd::UpgradeFailResp;
1475 pkt->senderState = mshr;
1476 pkt->firstWordTime = pkt->finishTime = curTick();
1477 handleResponse(pkt);
1478 return NULL;
1479 } else if (mshr->isForwardNoResponse()) {
1480 // no response expected, just forward packet as it is
1481 assert(tags->findBlock(mshr->addr) == NULL);
1482 pkt = tgt_pkt;
1483 } else {
1484 BlkType *blk = tags->findBlock(mshr->addr);
1485
1486 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1487 // It might be possible for a writeback to arrive between
1488 // the time the prefetch is placed in the MSHRs and when
1489 // it's selected to send... if so, this assert will catch
1490 // that, and then we'll have to figure out what to do.
1491 assert(blk == NULL);
1492
1493 // We need to check the caches above us to verify that they don't have
1494 // a copy of this block in the dirty state at the moment. Without this
1495 // check we could get a stale copy from memory that might get used
1496 // in place of the dirty one.
1497 PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
1498 snoop_pkt->setExpressSnoop();
1499 snoop_pkt->senderState = mshr;
1500 cpuSidePort->sendTiming(snoop_pkt);
1501
1502 if (snoop_pkt->memInhibitAsserted()) {
1503 markInService(mshr, snoop_pkt);
1504 DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
1505 tgt_pkt->getAddr());
1506 delete snoop_pkt;
1507 return NULL;
1508 }
1509 delete snoop_pkt;
1510 }
1511
1512 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1513
1514 mshr->isForward = (pkt == NULL);
1515
1516 if (mshr->isForward) {
1517 // not a cache block request, but a response is expected
1518 // make copy of current packet to forward, keep current
1519 // copy for response handling
1520 pkt = new Packet(tgt_pkt);
1521 pkt->allocate();
1522 if (pkt->isWrite()) {
1523 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1524 }
1525 }
1526 }
1527
1528 assert(pkt != NULL);
1529 pkt->senderState = mshr;
1530 return pkt;
1531 }
1532
1533
1534 template<class TagStore>
1535 Tick
1536 Cache<TagStore>::nextMSHRReadyTime()
1537 {
1538 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1539 writeBuffer.nextMSHRReadyTime());
1540
1541 if (prefetcher) {
1542 nextReady = std::min(nextReady,
1543 prefetcher->nextPrefetchReadyTime());
1544 }
1545
1546 return nextReady;
1547 }
1548
1549
1550 ///////////////
1551 //
1552 // CpuSidePort
1553 //
1554 ///////////////
1555
1556 template<class TagStore>
1557 AddrRangeList
1558 Cache<TagStore>::CpuSidePort::
1559 getAddrRanges()
1560 {
1561 // CPU side port doesn't snoop; it's a target only. It can
1562 // potentially respond to any address.
1563 AddrRangeList ranges;
1564 ranges.push_back(myCache()->getAddrRange());
1565 return ranges;
1566 }
1567
1568
1569 template<class TagStore>
1570 bool
1571 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1572 {
1573 // illegal to block responses... can lead to deadlock
1574 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1575 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1576 mustSendRetry = true;
1577 return false;
1578 }
1579
1580 myCache()->timingAccess(pkt);
1581 return true;
1582 }
1583
1584
1585 template<class TagStore>
1586 Tick
1587 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1588 {
1589 return myCache()->atomicAccess(pkt);
1590 }
1591
1592
1593 template<class TagStore>
1594 void
1595 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1596 {
1597 myCache()->functionalAccess(pkt, true);
1598 }
1599
1600
1601 template<class TagStore>
1602 Cache<TagStore>::
1603 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1604 const std::string &_label)
1605 : BaseCache::CachePort(_name, _cache, _label)
1606 {
1607 }
1608
1609 ///////////////
1610 //
1611 // MemSidePort
1612 //
1613 ///////////////
1614
1615 template<class TagStore>
1616 bool
1617 Cache<TagStore>::MemSidePort::isSnooping()
1618 {
1619 // Memory-side port always snoops, but never passes requests
1620 // through to targets on the cpu side (so we don't add anything to
1621 // the address range list).
1622 return true;
1623 }
1624
1625
1626 template<class TagStore>
1627 bool
1628 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1629 {
1630 // this needs to be fixed so that the cache updates the mshr and sends the
1631 // packet back out on the link, but it probably won't happen so until this
1632 // gets fixed, just panic when it does
1633 if (pkt->wasNacked())
1634 panic("Need to implement cache resending nacked packets!\n");
1635
1636 if (pkt->isRequest() && blocked) {
1637 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1638 mustSendRetry = true;
1639 return false;
1640 }
1641
1642 if (pkt->isResponse()) {
1643 myCache()->handleResponse(pkt);
1644 } else {
1645 myCache()->snoopTiming(pkt);
1646 }
1647 return true;
1648 }
1649
1650
1651 template<class TagStore>
1652 Tick
1653 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1654 {
1655 // in atomic mode, responses go back to the sender via the
1656 // function return from sendAtomic(), not via a separate
1657 // sendAtomic() from the responder. Thus we should never see a
1658 // response packet in recvAtomic() (anywhere, not just here).
1659 assert(!pkt->isResponse());
1660 return myCache()->snoopAtomic(pkt);
1661 }
1662
1663
1664 template<class TagStore>
1665 void
1666 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1667 {
1668 myCache()->functionalAccess(pkt, false);
1669 }
1670
1671
1672
1673 template<class TagStore>
1674 void
1675 Cache<TagStore>::MemSidePort::sendPacket()
1676 {
1677 // if we have responses that are ready, they take precedence
1678 if (deferredPacketReady()) {
1679 bool success = sendTiming(transmitList.front().pkt);
1680
1681 if (success) {
1682 //send successful, remove packet
1683 transmitList.pop_front();
1684 }
1685
1686 waitingOnRetry = !success;
1687 } else {
1688 // check for non-response packets (requests & writebacks)
1689 PacketPtr pkt = myCache()->getTimingPacket();
1690 if (pkt == NULL) {
1691 // can happen if e.g. we attempt a writeback and fail, but
1692 // before the retry, the writeback is eliminated because
1693 // we snoop another cache's ReadEx.
1694 waitingOnRetry = false;
1695 } else {
1696 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1697
1698 bool success = sendTiming(pkt);
1699
1700 waitingOnRetry = !success;
1701 if (waitingOnRetry) {
1702 DPRINTF(CachePort, "now waiting on a retry\n");
1703 if (!mshr->isForwardNoResponse()) {
1704 delete pkt;
1705 }
1706 } else {
1707 myCache()->markInService(mshr, pkt);
1708 }
1709 }
1710 }
1711
1712
1713 // tried to send packet... if it was successful (no retry), see if
1714 // we need to rerequest bus or not
1715 if (!waitingOnRetry) {
1716 Tick nextReady = std::min(deferredPacketReadyTime(),
1717 myCache()->nextMSHRReadyTime());
1718 // @TODO: need to facotr in prefetch requests here somehow
1719 if (nextReady != MaxTick) {
1720 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1721 cache->schedule(sendEvent, std::max(nextReady, curTick() + 1));
1722 } else {
1723 // no more to send right now: if we're draining, we may be done
1724 if (drainEvent && !sendEvent->scheduled()) {
1725 drainEvent->process();
1726 drainEvent = NULL;
1727 }
1728 }
1729 }
1730 }
1731
1732 template<class TagStore>
1733 void
1734 Cache<TagStore>::MemSidePort::recvRetry()
1735 {
1736 assert(waitingOnRetry);
1737 sendPacket();
1738 }
1739
1740
1741 template<class TagStore>
1742 void
1743 Cache<TagStore>::MemSidePort::processSendEvent()
1744 {
1745 assert(!waitingOnRetry);
1746 sendPacket();
1747 }
1748
1749
1750 template<class TagStore>
1751 Cache<TagStore>::
1752 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1753 const std::string &_label)
1754 : BaseCache::CachePort(_name, _cache, _label)
1755 {
1756 // override default send event from SimpleTimingPort
1757 delete sendEvent;
1758 sendEvent = new SendEvent(this);
1759 }