Merge with head, hopefully the last time for this batch.
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 */
47
48 /**
49 * @file
50 * Cache definitions.
51 */
52
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "debug/CachePort.hh"
59 #include "mem/cache/prefetch/base.hh"
60 #include "mem/cache/blk.hh"
61 #include "mem/cache/cache.hh"
62 #include "mem/cache/mshr.hh"
63 #include "sim/sim_exit.hh"
64
65 template<class TagStore>
66 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
67 : BaseCache(p),
68 tags(tags),
69 prefetcher(pf),
70 doFastWrites(true),
71 prefetchOnAccess(p->prefetch_on_access)
72 {
73 tempBlock = new BlkType();
74 tempBlock->data = new uint8_t[blkSize];
75
76 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
77 "CpuSidePort");
78 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
79 "MemSidePort");
80
81 tags->setCache(this);
82 if (prefetcher)
83 prefetcher->setCache(this);
84 }
85
86 template<class TagStore>
87 void
88 Cache<TagStore>::regStats()
89 {
90 BaseCache::regStats();
91 tags->regStats(name());
92 if (prefetcher)
93 prefetcher->regStats(name());
94 }
95
96 template<class TagStore>
97 Port *
98 Cache<TagStore>::getPort(const std::string &if_name, int idx)
99 {
100 if (if_name == "" || if_name == "cpu_side") {
101 return cpuSidePort;
102 } else if (if_name == "mem_side") {
103 return memSidePort;
104 } else {
105 panic("Port name %s unrecognized\n", if_name);
106 }
107 }
108
109 template<class TagStore>
110 void
111 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
112 {
113 uint64_t overwrite_val;
114 bool overwrite_mem;
115 uint64_t condition_val64;
116 uint32_t condition_val32;
117
118 int offset = tags->extractBlkOffset(pkt->getAddr());
119 uint8_t *blk_data = blk->data + offset;
120
121 assert(sizeof(uint64_t) >= pkt->getSize());
122
123 overwrite_mem = true;
124 // keep a copy of our possible write value, and copy what is at the
125 // memory address into the packet
126 pkt->writeData((uint8_t *)&overwrite_val);
127 pkt->setData(blk_data);
128
129 if (pkt->req->isCondSwap()) {
130 if (pkt->getSize() == sizeof(uint64_t)) {
131 condition_val64 = pkt->req->getExtraData();
132 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
133 sizeof(uint64_t));
134 } else if (pkt->getSize() == sizeof(uint32_t)) {
135 condition_val32 = (uint32_t)pkt->req->getExtraData();
136 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
137 sizeof(uint32_t));
138 } else
139 panic("Invalid size for conditional read/write\n");
140 }
141
142 if (overwrite_mem) {
143 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
144 blk->status |= BlkDirty;
145 }
146 }
147
148
149 template<class TagStore>
150 void
151 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
152 bool deferred_response,
153 bool pending_downgrade)
154 {
155 assert(blk && blk->isValid());
156 // Occasionally this is not true... if we are a lower-level cache
157 // satisfying a string of Read and ReadEx requests from
158 // upper-level caches, a Read will mark the block as shared but we
159 // can satisfy a following ReadEx anyway since we can rely on the
160 // Read requester(s) to have buffered the ReadEx snoop and to
161 // invalidate their blocks after receiving them.
162 // assert(!pkt->needsExclusive() || blk->isWritable());
163 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
164
165 // Check RMW operations first since both isRead() and
166 // isWrite() will be true for them
167 if (pkt->cmd == MemCmd::SwapReq) {
168 cmpAndSwap(blk, pkt);
169 } else if (pkt->isWrite()) {
170 if (blk->checkWrite(pkt)) {
171 pkt->writeDataToBlock(blk->data, blkSize);
172 blk->status |= BlkDirty;
173 }
174 } else if (pkt->isRead()) {
175 if (pkt->isLLSC()) {
176 blk->trackLoadLocked(pkt);
177 }
178 pkt->setDataFromBlock(blk->data, blkSize);
179 if (pkt->getSize() == blkSize) {
180 // special handling for coherent block requests from
181 // upper-level caches
182 if (pkt->needsExclusive()) {
183 // if we have a dirty copy, make sure the recipient
184 // keeps it marked dirty
185 if (blk->isDirty()) {
186 pkt->assertMemInhibit();
187 }
188 // on ReadExReq we give up our copy unconditionally
189 tags->invalidateBlk(blk);
190 } else if (blk->isWritable() && !pending_downgrade
191 && !pkt->sharedAsserted()) {
192 // we can give the requester an exclusive copy (by not
193 // asserting shared line) on a read request if:
194 // - we have an exclusive copy at this level (& below)
195 // - we don't have a pending snoop from below
196 // signaling another read request
197 // - no other cache above has a copy (otherwise it
198 // would have asseretd shared line on request)
199
200 if (blk->isDirty()) {
201 // special considerations if we're owner:
202 if (!deferred_response && !isTopLevel) {
203 // if we are responding immediately and can
204 // signal that we're transferring ownership
205 // along with exclusivity, do so
206 pkt->assertMemInhibit();
207 blk->status &= ~BlkDirty;
208 } else {
209 // if we're responding after our own miss,
210 // there's a window where the recipient didn't
211 // know it was getting ownership and may not
212 // have responded to snoops correctly, so we
213 // can't pass off ownership *or* exclusivity
214 pkt->assertShared();
215 }
216 }
217 } else {
218 // otherwise only respond with a shared copy
219 pkt->assertShared();
220 }
221 }
222 } else {
223 // Not a read or write... must be an upgrade. it's OK
224 // to just ack those as long as we have an exclusive
225 // copy at this level.
226 assert(pkt->isUpgrade());
227 tags->invalidateBlk(blk);
228 }
229 }
230
231
232 /////////////////////////////////////////////////////
233 //
234 // MSHR helper functions
235 //
236 /////////////////////////////////////////////////////
237
238
239 template<class TagStore>
240 void
241 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
242 {
243 markInServiceInternal(mshr, pkt);
244 #if 0
245 if (mshr->originalCmd == MemCmd::HardPFReq) {
246 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
247 name());
248 //Also clear pending if need be
249 if (!prefetcher->havePending())
250 {
251 deassertMemSideBusRequest(Request_PF);
252 }
253 }
254 #endif
255 }
256
257
258 template<class TagStore>
259 void
260 Cache<TagStore>::squash(int threadNum)
261 {
262 bool unblock = false;
263 BlockedCause cause = NUM_BLOCKED_CAUSES;
264
265 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
266 noTargetMSHR = NULL;
267 unblock = true;
268 cause = Blocked_NoTargets;
269 }
270 if (mshrQueue.isFull()) {
271 unblock = true;
272 cause = Blocked_NoMSHRs;
273 }
274 mshrQueue.squash(threadNum);
275 if (unblock && !mshrQueue.isFull()) {
276 clearBlocked(cause);
277 }
278 }
279
280 /////////////////////////////////////////////////////
281 //
282 // Access path: requests coming in from the CPU side
283 //
284 /////////////////////////////////////////////////////
285
286 template<class TagStore>
287 bool
288 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
289 int &lat, PacketList &writebacks)
290 {
291 if (pkt->req->isUncacheable()) {
292 if (pkt->req->isClearLL()) {
293 tags->clearLocks();
294 } else {
295 blk = tags->findBlock(pkt->getAddr());
296 if (blk != NULL) {
297 tags->invalidateBlk(blk);
298 }
299 }
300
301 blk = NULL;
302 lat = hitLatency;
303 return false;
304 }
305
306 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
307 blk = tags->accessBlock(pkt->getAddr(), lat, id);
308
309 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
310 pkt->req->isInstFetch() ? " (ifetch)" : "",
311 pkt->getAddr(), (blk) ? "hit" : "miss");
312
313 if (blk != NULL) {
314
315 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
316 // OK to satisfy access
317 incHitCount(pkt, id);
318 satisfyCpuSideRequest(pkt, blk);
319 return true;
320 }
321 }
322
323 // Can't satisfy access normally... either no block (blk == NULL)
324 // or have block but need exclusive & only have shared.
325
326 // Writeback handling is special case. We can write the block
327 // into the cache without having a writeable copy (or any copy at
328 // all).
329 if (pkt->cmd == MemCmd::Writeback) {
330 assert(blkSize == pkt->getSize());
331 if (blk == NULL) {
332 // need to do a replacement
333 blk = allocateBlock(pkt->getAddr(), writebacks);
334 if (blk == NULL) {
335 // no replaceable block available, give up.
336 // writeback will be forwarded to next level.
337 incMissCount(pkt, id);
338 return false;
339 }
340 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
341 tags->insertBlock(pkt->getAddr(), blk, id);
342 blk->status = BlkValid | BlkReadable;
343 }
344 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
345 blk->status |= BlkDirty;
346 if (pkt->isSupplyExclusive()) {
347 blk->status |= BlkWritable;
348 }
349 // nothing else to do; writeback doesn't expect response
350 assert(!pkt->needsResponse());
351 incHitCount(pkt, id);
352 return true;
353 }
354
355 incMissCount(pkt, id);
356
357 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
358 // complete miss on store conditional... just give up now
359 pkt->req->setExtraData(0);
360 return true;
361 }
362
363 return false;
364 }
365
366
367 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
368 {
369 Packet::SenderState *prevSenderState;
370 int prevSrc;
371 #ifndef NDEBUG
372 BaseCache *cache;
373 #endif
374 public:
375 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
376 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
377 #ifndef NDEBUG
378 , cache(_cache)
379 #endif
380 {}
381 void restore(Packet *pkt, BaseCache *_cache)
382 {
383 assert(_cache == cache);
384 pkt->senderState = prevSenderState;
385 pkt->setDest(prevSrc);
386 }
387 };
388
389
390 template<class TagStore>
391 bool
392 Cache<TagStore>::timingAccess(PacketPtr pkt)
393 {
394 //@todo Add back in MemDebug Calls
395 // MemDebug::cacheAccess(pkt);
396
397 // we charge hitLatency for doing just about anything here
398 Tick time = curTick() + hitLatency;
399
400 if (pkt->isResponse()) {
401 // must be cache-to-cache response from upper to lower level
402 ForwardResponseRecord *rec =
403 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
404
405 if (rec == NULL) {
406 assert(pkt->cmd == MemCmd::HardPFResp);
407 // Check if it's a prefetch response and handle it. We shouldn't
408 // get any other kinds of responses without FRRs.
409 DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
410 pkt->getAddr());
411 handleResponse(pkt);
412 return true;
413 }
414
415 rec->restore(pkt, this);
416 delete rec;
417 memSidePort->respond(pkt, time);
418 return true;
419 }
420
421 assert(pkt->isRequest());
422
423 if (pkt->memInhibitAsserted()) {
424 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
425 pkt->getAddr());
426 assert(!pkt->req->isUncacheable());
427 // Special tweak for multilevel coherence: snoop downward here
428 // on invalidates since there may be other caches below here
429 // that have shared copies. Not necessary if we know that
430 // supplier had exclusive copy to begin with.
431 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
432 Packet *snoopPkt = new Packet(pkt, true); // clear flags
433 snoopPkt->setExpressSnoop();
434 snoopPkt->assertMemInhibit();
435 memSidePort->sendTiming(snoopPkt);
436 // main memory will delete snoopPkt
437 }
438 // since we're the official target but we aren't responding,
439 // delete the packet now.
440 delete pkt;
441 return true;
442 }
443
444 if (pkt->req->isUncacheable()) {
445 if (pkt->req->isClearLL()) {
446 tags->clearLocks();
447 } else {
448 BlkType *blk = tags->findBlock(pkt->getAddr());
449 if (blk != NULL) {
450 tags->invalidateBlk(blk);
451 }
452 }
453
454 // writes go in write buffer, reads use MSHR
455 if (pkt->isWrite() && !pkt->isRead()) {
456 allocateWriteBuffer(pkt, time, true);
457 } else {
458 allocateUncachedReadBuffer(pkt, time, true);
459 }
460 assert(pkt->needsResponse()); // else we should delete it here??
461 return true;
462 }
463
464 int lat = hitLatency;
465 BlkType *blk = NULL;
466 PacketList writebacks;
467
468 bool satisfied = access(pkt, blk, lat, writebacks);
469
470 #if 0
471 /** @todo make the fast write alloc (wh64) work with coherence. */
472
473 // If this is a block size write/hint (WH64) allocate the block here
474 // if the coherence protocol allows it.
475 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
476 (pkt->cmd == MemCmd::WriteReq
477 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
478 // not outstanding misses, can do this
479 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
480 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
481 if (outstanding_miss) {
482 warn("WriteInv doing a fastallocate"
483 "with an outstanding miss to the same address\n");
484 }
485 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
486 writebacks);
487 ++fastWrites;
488 }
489 }
490 #endif
491
492 // track time of availability of next prefetch, if any
493 Tick next_pf_time = 0;
494
495 bool needsResponse = pkt->needsResponse();
496
497 if (satisfied) {
498 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
499 if (blk)
500 blk->status &= ~BlkHWPrefetched;
501 next_pf_time = prefetcher->notify(pkt, time);
502 }
503
504 if (needsResponse) {
505 pkt->makeTimingResponse();
506 cpuSidePort->respond(pkt, curTick()+lat);
507 } else {
508 delete pkt;
509 }
510 } else {
511 // miss
512
513 Addr blk_addr = blockAlign(pkt->getAddr());
514 MSHR *mshr = mshrQueue.findMatch(blk_addr);
515
516 if (mshr) {
517 // MSHR hit
518 //@todo remove hw_pf here
519 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
520 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
521 mshr->threadNum = -1;
522 }
523 mshr->allocateTarget(pkt, time, order++);
524 if (mshr->getNumTargets() == numTarget) {
525 noTargetMSHR = mshr;
526 setBlocked(Blocked_NoTargets);
527 // need to be careful with this... if this mshr isn't
528 // ready yet (i.e. time > curTick()_, we don't want to
529 // move it ahead of mshrs that are ready
530 // mshrQueue.moveToFront(mshr);
531 }
532 } else {
533 // no MSHR
534 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
535 // always mark as cache fill for now... if we implement
536 // no-write-allocate or bypass accesses this will have to
537 // be changed.
538 if (pkt->cmd == MemCmd::Writeback) {
539 allocateWriteBuffer(pkt, time, true);
540 } else {
541 if (blk && blk->isValid()) {
542 // If we have a write miss to a valid block, we
543 // need to mark the block non-readable. Otherwise
544 // if we allow reads while there's an outstanding
545 // write miss, the read could return stale data
546 // out of the cache block... a more aggressive
547 // system could detect the overlap (if any) and
548 // forward data out of the MSHRs, but we don't do
549 // that yet. Note that we do need to leave the
550 // block valid so that it stays in the cache, in
551 // case we get an upgrade response (and hence no
552 // new data) when the write miss completes.
553 // As long as CPUs do proper store/load forwarding
554 // internally, and have a sufficiently weak memory
555 // model, this is probably unnecessary, but at some
556 // point it must have seemed like we needed it...
557 assert(pkt->needsExclusive() && !blk->isWritable());
558 blk->status &= ~BlkReadable;
559 }
560
561 allocateMissBuffer(pkt, time, true);
562 }
563
564 if (prefetcher) {
565 next_pf_time = prefetcher->notify(pkt, time);
566 }
567 }
568 }
569
570 if (next_pf_time != 0)
571 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
572
573 // copy writebacks to write buffer
574 while (!writebacks.empty()) {
575 PacketPtr wbPkt = writebacks.front();
576 allocateWriteBuffer(wbPkt, time, true);
577 writebacks.pop_front();
578 }
579
580 return true;
581 }
582
583
584 // See comment in cache.hh.
585 template<class TagStore>
586 PacketPtr
587 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
588 bool needsExclusive)
589 {
590 bool blkValid = blk && blk->isValid();
591
592 if (cpu_pkt->req->isUncacheable()) {
593 //assert(blk == NULL);
594 return NULL;
595 }
596
597 if (!blkValid &&
598 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
599 // Writebacks that weren't allocated in access() and upgrades
600 // from upper-level caches that missed completely just go
601 // through.
602 return NULL;
603 }
604
605 assert(cpu_pkt->needsResponse());
606
607 MemCmd cmd;
608 // @TODO make useUpgrades a parameter.
609 // Note that ownership protocols require upgrade, otherwise a
610 // write miss on a shared owned block will generate a ReadExcl,
611 // which will clobber the owned copy.
612 const bool useUpgrades = true;
613 if (blkValid && useUpgrades) {
614 // only reason to be here is that blk is shared
615 // (read-only) and we need exclusive
616 assert(needsExclusive && !blk->isWritable());
617 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
618 } else {
619 // block is invalid
620 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
621 }
622 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
623
624 pkt->allocate();
625 return pkt;
626 }
627
628
629 template<class TagStore>
630 Tick
631 Cache<TagStore>::atomicAccess(PacketPtr pkt)
632 {
633 int lat = hitLatency;
634
635 // @TODO: make this a parameter
636 bool last_level_cache = false;
637
638 if (pkt->memInhibitAsserted()) {
639 assert(!pkt->req->isUncacheable());
640 // have to invalidate ourselves and any lower caches even if
641 // upper cache will be responding
642 if (pkt->isInvalidate()) {
643 BlkType *blk = tags->findBlock(pkt->getAddr());
644 if (blk && blk->isValid()) {
645 tags->invalidateBlk(blk);
646 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
647 pkt->cmdString(), pkt->getAddr());
648 }
649 if (!last_level_cache) {
650 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
651 pkt->cmdString(), pkt->getAddr());
652 lat += memSidePort->sendAtomic(pkt);
653 }
654 } else {
655 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
656 pkt->cmdString(), pkt->getAddr());
657 }
658
659 return lat;
660 }
661
662 // should assert here that there are no outstanding MSHRs or
663 // writebacks... that would mean that someone used an atomic
664 // access in timing mode
665
666 BlkType *blk = NULL;
667 PacketList writebacks;
668
669 if (!access(pkt, blk, lat, writebacks)) {
670 // MISS
671 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
672
673 bool is_forward = (bus_pkt == NULL);
674
675 if (is_forward) {
676 // just forwarding the same request to the next level
677 // no local cache operation involved
678 bus_pkt = pkt;
679 }
680
681 DPRINTF(Cache, "Sending an atomic %s for %x\n",
682 bus_pkt->cmdString(), bus_pkt->getAddr());
683
684 #if TRACING_ON
685 CacheBlk::State old_state = blk ? blk->status : 0;
686 #endif
687
688 lat += memSidePort->sendAtomic(bus_pkt);
689
690 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
691 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
692
693 assert(!bus_pkt->wasNacked());
694
695 // If packet was a forward, the response (if any) is already
696 // in place in the bus_pkt == pkt structure, so we don't need
697 // to do anything. Otherwise, use the separate bus_pkt to
698 // generate response to pkt and then delete it.
699 if (!is_forward) {
700 if (pkt->needsResponse()) {
701 assert(bus_pkt->isResponse());
702 if (bus_pkt->isError()) {
703 pkt->makeAtomicResponse();
704 pkt->copyError(bus_pkt);
705 } else if (bus_pkt->isRead() ||
706 bus_pkt->cmd == MemCmd::UpgradeResp) {
707 // we're updating cache state to allow us to
708 // satisfy the upstream request from the cache
709 blk = handleFill(bus_pkt, blk, writebacks);
710 satisfyCpuSideRequest(pkt, blk);
711 } else {
712 // we're satisfying the upstream request without
713 // modifying cache state, e.g., a write-through
714 pkt->makeAtomicResponse();
715 }
716 }
717 delete bus_pkt;
718 }
719 }
720
721 // Note that we don't invoke the prefetcher at all in atomic mode.
722 // It's not clear how to do it properly, particularly for
723 // prefetchers that aggressively generate prefetch candidates and
724 // rely on bandwidth contention to throttle them; these will tend
725 // to pollute the cache in atomic mode since there is no bandwidth
726 // contention. If we ever do want to enable prefetching in atomic
727 // mode, though, this is the place to do it... see timingAccess()
728 // for an example (though we'd want to issue the prefetch(es)
729 // immediately rather than calling requestMemSideBus() as we do
730 // there).
731
732 // Handle writebacks if needed
733 while (!writebacks.empty()){
734 PacketPtr wbPkt = writebacks.front();
735 memSidePort->sendAtomic(wbPkt);
736 writebacks.pop_front();
737 delete wbPkt;
738 }
739
740 // We now have the block one way or another (hit or completed miss)
741
742 if (pkt->needsResponse()) {
743 pkt->makeAtomicResponse();
744 }
745
746 return lat;
747 }
748
749
750 template<class TagStore>
751 void
752 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
753 {
754 Addr blk_addr = blockAlign(pkt->getAddr());
755 BlkType *blk = tags->findBlock(pkt->getAddr());
756 MSHR *mshr = mshrQueue.findMatch(blk_addr);
757
758 pkt->pushLabel(name());
759
760 CacheBlkPrintWrapper cbpw(blk);
761
762 // Note that just because an L2/L3 has valid data doesn't mean an
763 // L1 doesn't have a more up-to-date modified copy that still
764 // needs to be found. As a result we always update the request if
765 // we have it, but only declare it satisfied if we are the owner.
766
767 // see if we have data at all (owned or otherwise)
768 bool have_data = blk && blk->isValid()
769 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
770
771 // data we have is dirty if marked as such or if valid & ownership
772 // pending due to outstanding UpgradeReq
773 bool have_dirty =
774 have_data && (blk->isDirty() ||
775 (mshr && mshr->inService && mshr->isPendingDirty()));
776
777 bool done = have_dirty
778 || cpuSidePort->checkFunctional(pkt)
779 || mshrQueue.checkFunctional(pkt, blk_addr)
780 || writeBuffer.checkFunctional(pkt, blk_addr)
781 || memSidePort->checkFunctional(pkt);
782
783 DPRINTF(Cache, "functional %s %x %s%s%s\n",
784 pkt->cmdString(), pkt->getAddr(),
785 (blk && blk->isValid()) ? "valid " : "",
786 have_data ? "data " : "", done ? "done " : "");
787
788 // We're leaving the cache, so pop cache->name() label
789 pkt->popLabel();
790
791 if (done) {
792 pkt->makeResponse();
793 } else {
794 // if it came as a request from the CPU side then make sure it
795 // continues towards the memory side
796 if (fromCpuSide) {
797 memSidePort->sendFunctional(pkt);
798 } else if (forwardSnoops) {
799 // if it came from the memory side, it must be a snoop request
800 // and we should only forward it if we are forwarding snoops
801 cpuSidePort->sendFunctional(pkt);
802 }
803 }
804 }
805
806
807 /////////////////////////////////////////////////////
808 //
809 // Response handling: responses from the memory side
810 //
811 /////////////////////////////////////////////////////
812
813
814 template<class TagStore>
815 void
816 Cache<TagStore>::handleResponse(PacketPtr pkt)
817 {
818 Tick time = curTick() + hitLatency;
819 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
820 bool is_error = pkt->isError();
821
822 assert(mshr);
823
824 if (pkt->wasNacked()) {
825 //pkt->reinitFromRequest();
826 warn("NACKs from devices not connected to the same bus "
827 "not implemented\n");
828 return;
829 }
830 if (is_error) {
831 DPRINTF(Cache, "Cache received packet with error for address %x, "
832 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
833 }
834
835 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
836
837 MSHRQueue *mq = mshr->queue;
838 bool wasFull = mq->isFull();
839
840 if (mshr == noTargetMSHR) {
841 // we always clear at least one target
842 clearBlocked(Blocked_NoTargets);
843 noTargetMSHR = NULL;
844 }
845
846 // Initial target is used just for stats
847 MSHR::Target *initial_tgt = mshr->getTarget();
848 BlkType *blk = tags->findBlock(pkt->getAddr());
849 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
850 Tick miss_latency = curTick() - initial_tgt->recvTime;
851 PacketList writebacks;
852
853 if (pkt->req->isUncacheable()) {
854 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
855 miss_latency;
856 } else {
857 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
858 miss_latency;
859 }
860
861 bool is_fill = !mshr->isForward &&
862 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
863
864 if (is_fill && !is_error) {
865 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
866 pkt->getAddr());
867
868 // give mshr a chance to do some dirty work
869 mshr->handleFill(pkt, blk);
870
871 blk = handleFill(pkt, blk, writebacks);
872 assert(blk != NULL);
873 }
874
875 // First offset for critical word first calculations
876 int initial_offset = 0;
877
878 if (mshr->hasTargets()) {
879 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
880 }
881
882 while (mshr->hasTargets()) {
883 MSHR::Target *target = mshr->getTarget();
884
885 switch (target->source) {
886 case MSHR::Target::FromCPU:
887 Tick completion_time;
888 if (is_fill) {
889 satisfyCpuSideRequest(target->pkt, blk,
890 true, mshr->hasPostDowngrade());
891 // How many bytes past the first request is this one
892 int transfer_offset =
893 target->pkt->getOffset(blkSize) - initial_offset;
894 if (transfer_offset < 0) {
895 transfer_offset += blkSize;
896 }
897
898 // If critical word (no offset) return first word time
899 completion_time = tags->getHitLatency() +
900 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
901
902 assert(!target->pkt->req->isUncacheable());
903 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
904 completion_time - target->recvTime;
905 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
906 // failed StoreCond upgrade
907 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
908 target->pkt->cmd == MemCmd::StoreCondFailReq ||
909 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
910 completion_time = tags->getHitLatency() + pkt->finishTime;
911 target->pkt->req->setExtraData(0);
912 } else {
913 // not a cache fill, just forwarding response
914 completion_time = tags->getHitLatency() + pkt->finishTime;
915 if (pkt->isRead() && !is_error) {
916 target->pkt->setData(pkt->getPtr<uint8_t>());
917 }
918 }
919 target->pkt->makeTimingResponse();
920 // if this packet is an error copy that to the new packet
921 if (is_error)
922 target->pkt->copyError(pkt);
923 if (target->pkt->cmd == MemCmd::ReadResp &&
924 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
925 // If intermediate cache got ReadRespWithInvalidate,
926 // propagate that. Response should not have
927 // isInvalidate() set otherwise.
928 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
929 }
930 cpuSidePort->respond(target->pkt, completion_time);
931 break;
932
933 case MSHR::Target::FromPrefetcher:
934 assert(target->pkt->cmd == MemCmd::HardPFReq);
935 if (blk)
936 blk->status |= BlkHWPrefetched;
937 delete target->pkt->req;
938 delete target->pkt;
939 break;
940
941 case MSHR::Target::FromSnoop:
942 // I don't believe that a snoop can be in an error state
943 assert(!is_error);
944 // response to snoop request
945 DPRINTF(Cache, "processing deferred snoop...\n");
946 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
947 handleSnoop(target->pkt, blk, true, true,
948 mshr->hasPostInvalidate());
949 break;
950
951 default:
952 panic("Illegal target->source enum %d\n", target->source);
953 }
954
955 mshr->popTarget();
956 }
957
958 if (blk) {
959 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
960 tags->invalidateBlk(blk);
961 } else if (mshr->hasPostDowngrade()) {
962 blk->status &= ~BlkWritable;
963 }
964 }
965
966 if (mshr->promoteDeferredTargets()) {
967 // avoid later read getting stale data while write miss is
968 // outstanding.. see comment in timingAccess()
969 if (blk) {
970 blk->status &= ~BlkReadable;
971 }
972 MSHRQueue *mq = mshr->queue;
973 mq->markPending(mshr);
974 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
975 } else {
976 mq->deallocate(mshr);
977 if (wasFull && !mq->isFull()) {
978 clearBlocked((BlockedCause)mq->index);
979 }
980 }
981
982 // copy writebacks to write buffer
983 while (!writebacks.empty()) {
984 PacketPtr wbPkt = writebacks.front();
985 allocateWriteBuffer(wbPkt, time, true);
986 writebacks.pop_front();
987 }
988 // if we used temp block, clear it out
989 if (blk == tempBlock) {
990 if (blk->isDirty()) {
991 allocateWriteBuffer(writebackBlk(blk), time, true);
992 }
993 tags->invalidateBlk(blk);
994 }
995
996 delete pkt;
997 }
998
999
1000
1001
1002 template<class TagStore>
1003 PacketPtr
1004 Cache<TagStore>::writebackBlk(BlkType *blk)
1005 {
1006 assert(blk && blk->isValid() && blk->isDirty());
1007
1008 writebacks[0/*pkt->req->threadId()*/]++;
1009
1010 Request *writebackReq =
1011 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
1012 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
1013 if (blk->isWritable()) {
1014 writeback->setSupplyExclusive();
1015 }
1016 writeback->allocate();
1017 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1018
1019 blk->status &= ~BlkDirty;
1020 return writeback;
1021 }
1022
1023
1024 template<class TagStore>
1025 typename Cache<TagStore>::BlkType*
1026 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1027 {
1028 BlkType *blk = tags->findVictim(addr, writebacks);
1029
1030 if (blk->isValid()) {
1031 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1032 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1033 if (repl_mshr) {
1034 // must be an outstanding upgrade request on block
1035 // we're about to replace...
1036 assert(!blk->isWritable());
1037 assert(repl_mshr->needsExclusive());
1038 // too hard to replace block with transient state
1039 // allocation failed, block not inserted
1040 return NULL;
1041 } else {
1042 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1043 repl_addr, addr,
1044 blk->isDirty() ? "writeback" : "clean");
1045
1046 if (blk->isDirty()) {
1047 // Save writeback packet for handling by caller
1048 writebacks.push_back(writebackBlk(blk));
1049 }
1050 }
1051 }
1052
1053 return blk;
1054 }
1055
1056
1057 // Note that the reason we return a list of writebacks rather than
1058 // inserting them directly in the write buffer is that this function
1059 // is called by both atomic and timing-mode accesses, and in atomic
1060 // mode we don't mess with the write buffer (we just perform the
1061 // writebacks atomically once the original request is complete).
1062 template<class TagStore>
1063 typename Cache<TagStore>::BlkType*
1064 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1065 PacketList &writebacks)
1066 {
1067 Addr addr = pkt->getAddr();
1068 #if TRACING_ON
1069 CacheBlk::State old_state = blk ? blk->status : 0;
1070 #endif
1071
1072 if (blk == NULL) {
1073 // better have read new data...
1074 assert(pkt->hasData());
1075 // need to do a replacement
1076 blk = allocateBlock(addr, writebacks);
1077 if (blk == NULL) {
1078 // No replaceable block... just use temporary storage to
1079 // complete the current request and then get rid of it
1080 assert(!tempBlock->isValid());
1081 blk = tempBlock;
1082 tempBlock->set = tags->extractSet(addr);
1083 tempBlock->tag = tags->extractTag(addr);
1084 DPRINTF(Cache, "using temp block for %x\n", addr);
1085 } else {
1086 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
1087 tags->insertBlock(pkt->getAddr(), blk, id);
1088 }
1089
1090 // starting from scratch with a new block
1091 blk->status = 0;
1092 } else {
1093 // existing block... probably an upgrade
1094 assert(blk->tag == tags->extractTag(addr));
1095 // either we're getting new data or the block should already be valid
1096 assert(pkt->hasData() || blk->isValid());
1097 // don't clear block status... if block is already dirty we
1098 // don't want to lose that
1099 }
1100
1101 blk->status |= BlkValid | BlkReadable;
1102
1103 if (!pkt->sharedAsserted()) {
1104 blk->status |= BlkWritable;
1105 // If we got this via cache-to-cache transfer (i.e., from a
1106 // cache that was an owner) and took away that owner's copy,
1107 // then we need to write it back. Normally this happens
1108 // anyway as a side effect of getting a copy to write it, but
1109 // there are cases (such as failed store conditionals or
1110 // compare-and-swaps) where we'll demand an exclusive copy but
1111 // end up not writing it.
1112 if (pkt->memInhibitAsserted())
1113 blk->status |= BlkDirty;
1114 }
1115
1116 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1117 addr, old_state, blk->status);
1118
1119 // if we got new data, copy it in
1120 if (pkt->isRead()) {
1121 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1122 }
1123
1124 blk->whenReady = pkt->finishTime;
1125
1126 return blk;
1127 }
1128
1129
1130 /////////////////////////////////////////////////////
1131 //
1132 // Snoop path: requests coming in from the memory side
1133 //
1134 /////////////////////////////////////////////////////
1135
1136 template<class TagStore>
1137 void
1138 Cache<TagStore>::
1139 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1140 bool already_copied, bool pending_inval)
1141 {
1142 // timing-mode snoop responses require a new packet, unless we
1143 // already made a copy...
1144 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1145 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1146 pkt->allocate();
1147 pkt->makeTimingResponse();
1148 if (pkt->isRead()) {
1149 pkt->setDataFromBlock(blk_data, blkSize);
1150 }
1151 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1152 // Assume we defer a response to a read from a far-away cache
1153 // A, then later defer a ReadExcl from a cache B on the same
1154 // bus as us. We'll assert MemInhibit in both cases, but in
1155 // the latter case MemInhibit will keep the invalidation from
1156 // reaching cache A. This special response tells cache A that
1157 // it gets the block to satisfy its read, but must immediately
1158 // invalidate it.
1159 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1160 }
1161 memSidePort->respond(pkt, curTick() + hitLatency);
1162 }
1163
1164 template<class TagStore>
1165 void
1166 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1167 bool is_timing, bool is_deferred,
1168 bool pending_inval)
1169 {
1170 // deferred snoops can only happen in timing mode
1171 assert(!(is_deferred && !is_timing));
1172 // pending_inval only makes sense on deferred snoops
1173 assert(!(pending_inval && !is_deferred));
1174 assert(pkt->isRequest());
1175
1176 // the packet may get modified if we or a forwarded snooper
1177 // responds in atomic mode, so remember a few things about the
1178 // original packet up front
1179 bool invalidate = pkt->isInvalidate();
1180 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1181
1182 if (forwardSnoops) {
1183 // first propagate snoop upward to see if anyone above us wants to
1184 // handle it. save & restore packet src since it will get
1185 // rewritten to be relative to cpu-side bus (if any)
1186 bool alreadyResponded = pkt->memInhibitAsserted();
1187 if (is_timing) {
1188 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1189 snoopPkt->setExpressSnoop();
1190 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1191 cpuSidePort->sendTiming(snoopPkt);
1192 if (snoopPkt->memInhibitAsserted()) {
1193 // cache-to-cache response from some upper cache
1194 assert(!alreadyResponded);
1195 pkt->assertMemInhibit();
1196 } else {
1197 delete snoopPkt->senderState;
1198 }
1199 if (snoopPkt->sharedAsserted()) {
1200 pkt->assertShared();
1201 }
1202 delete snoopPkt;
1203 } else {
1204 int origSrc = pkt->getSrc();
1205 cpuSidePort->sendAtomic(pkt);
1206 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1207 // cache-to-cache response from some upper cache:
1208 // forward response to original requester
1209 assert(pkt->isResponse());
1210 }
1211 pkt->setSrc(origSrc);
1212 }
1213 }
1214
1215 if (!blk || !blk->isValid()) {
1216 return;
1217 }
1218
1219 // we may end up modifying both the block state and the packet (if
1220 // we respond in atomic mode), so just figure out what to do now
1221 // and then do it later
1222 bool respond = blk->isDirty() && pkt->needsResponse();
1223 bool have_exclusive = blk->isWritable();
1224
1225 if (pkt->isRead() && !invalidate) {
1226 assert(!needs_exclusive);
1227 pkt->assertShared();
1228 int bits_to_clear = BlkWritable;
1229 const bool haveOwnershipState = true; // for now
1230 if (!haveOwnershipState) {
1231 // if we don't support pure ownership (dirty && !writable),
1232 // have to clear dirty bit here, assume memory snarfs data
1233 // on cache-to-cache xfer
1234 bits_to_clear |= BlkDirty;
1235 }
1236 blk->status &= ~bits_to_clear;
1237 }
1238
1239 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1240 pkt->cmdString(), blockAlign(pkt->getAddr()),
1241 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1242
1243 if (respond) {
1244 assert(!pkt->memInhibitAsserted());
1245 pkt->assertMemInhibit();
1246 if (have_exclusive) {
1247 pkt->setSupplyExclusive();
1248 }
1249 if (is_timing) {
1250 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1251 } else {
1252 pkt->makeAtomicResponse();
1253 pkt->setDataFromBlock(blk->data, blkSize);
1254 }
1255 } else if (is_timing && is_deferred) {
1256 // if it's a deferred timing snoop then we've made a copy of
1257 // the packet, and so if we're not using that copy to respond
1258 // then we need to delete it here.
1259 delete pkt;
1260 }
1261
1262 // Do this last in case it deallocates block data or something
1263 // like that
1264 if (invalidate) {
1265 tags->invalidateBlk(blk);
1266 }
1267 }
1268
1269
1270 template<class TagStore>
1271 void
1272 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1273 {
1274 // Note that some deferred snoops don't have requests, since the
1275 // original access may have already completed
1276 if ((pkt->req && pkt->req->isUncacheable()) ||
1277 pkt->cmd == MemCmd::Writeback) {
1278 //Can't get a hit on an uncacheable address
1279 //Revisit this for multi level coherence
1280 return;
1281 }
1282
1283 BlkType *blk = tags->findBlock(pkt->getAddr());
1284
1285 Addr blk_addr = blockAlign(pkt->getAddr());
1286 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1287
1288 // Let the MSHR itself track the snoop and decide whether we want
1289 // to go ahead and do the regular cache snoop
1290 if (mshr && mshr->handleSnoop(pkt, order++)) {
1291 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1292 blk_addr);
1293 if (mshr->getNumTargets() > numTarget)
1294 warn("allocating bonus target for snoop"); //handle later
1295 return;
1296 }
1297
1298 //We also need to check the writeback buffers and handle those
1299 std::vector<MSHR *> writebacks;
1300 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1301 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1302 pkt->getAddr());
1303
1304 //Look through writebacks for any non-uncachable writes, use that
1305 for (int i = 0; i < writebacks.size(); i++) {
1306 mshr = writebacks[i];
1307 assert(!mshr->isUncacheable());
1308 assert(mshr->getNumTargets() == 1);
1309 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1310 assert(wb_pkt->cmd == MemCmd::Writeback);
1311
1312 assert(!pkt->memInhibitAsserted());
1313 pkt->assertMemInhibit();
1314 if (!pkt->needsExclusive()) {
1315 pkt->assertShared();
1316 // the writeback is no longer the exclusive copy in the system
1317 wb_pkt->clearSupplyExclusive();
1318 } else {
1319 // if we're not asserting the shared line, we need to
1320 // invalidate our copy. we'll do that below as long as
1321 // the packet's invalidate flag is set...
1322 assert(pkt->isInvalidate());
1323 }
1324 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1325 false, false);
1326
1327 if (pkt->isInvalidate()) {
1328 // Invalidation trumps our writeback... discard here
1329 markInService(mshr);
1330 delete wb_pkt;
1331 }
1332
1333 // If this was a shared writeback, there may still be
1334 // other shared copies above that require invalidation.
1335 // We could be more selective and return here if the
1336 // request is non-exclusive or if the writeback is
1337 // exclusive.
1338 break;
1339 }
1340 }
1341
1342 handleSnoop(pkt, blk, true, false, false);
1343 }
1344
1345
1346 template<class TagStore>
1347 Tick
1348 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1349 {
1350 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1351 // Can't get a hit on an uncacheable address
1352 // Revisit this for multi level coherence
1353 return hitLatency;
1354 }
1355
1356 BlkType *blk = tags->findBlock(pkt->getAddr());
1357 handleSnoop(pkt, blk, false, false, false);
1358 return hitLatency;
1359 }
1360
1361
1362 template<class TagStore>
1363 MSHR *
1364 Cache<TagStore>::getNextMSHR()
1365 {
1366 // Check both MSHR queue and write buffer for potential requests
1367 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1368 MSHR *write_mshr = writeBuffer.getNextMSHR();
1369
1370 // Now figure out which one to send... some cases are easy
1371 if (miss_mshr && !write_mshr) {
1372 return miss_mshr;
1373 }
1374 if (write_mshr && !miss_mshr) {
1375 return write_mshr;
1376 }
1377
1378 if (miss_mshr && write_mshr) {
1379 // We have one of each... normally we favor the miss request
1380 // unless the write buffer is full
1381 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1382 // Write buffer is full, so we'd like to issue a write;
1383 // need to search MSHR queue for conflicting earlier miss.
1384 MSHR *conflict_mshr =
1385 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1386
1387 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1388 // Service misses in order until conflict is cleared.
1389 return conflict_mshr;
1390 }
1391
1392 // No conflicts; issue write
1393 return write_mshr;
1394 }
1395
1396 // Write buffer isn't full, but need to check it for
1397 // conflicting earlier writeback
1398 MSHR *conflict_mshr =
1399 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1400 if (conflict_mshr) {
1401 // not sure why we don't check order here... it was in the
1402 // original code but commented out.
1403
1404 // The only way this happens is if we are
1405 // doing a write and we didn't have permissions
1406 // then subsequently saw a writeback (owned got evicted)
1407 // We need to make sure to perform the writeback first
1408 // To preserve the dirty data, then we can issue the write
1409
1410 // should we return write_mshr here instead? I.e. do we
1411 // have to flush writes in order? I don't think so... not
1412 // for Alpha anyway. Maybe for x86?
1413 return conflict_mshr;
1414 }
1415
1416 // No conflicts; issue read
1417 return miss_mshr;
1418 }
1419
1420 // fall through... no pending requests. Try a prefetch.
1421 assert(!miss_mshr && !write_mshr);
1422 if (prefetcher && !mshrQueue.isFull()) {
1423 // If we have a miss queue slot, we can try a prefetch
1424 PacketPtr pkt = prefetcher->getPacket();
1425 if (pkt) {
1426 Addr pf_addr = blockAlign(pkt->getAddr());
1427 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
1428 !writeBuffer.findMatch(pf_addr)) {
1429 // Update statistic on number of prefetches issued
1430 // (hwpf_mshr_misses)
1431 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1432 // Don't request bus, since we already have it
1433 return allocateMissBuffer(pkt, curTick(), false);
1434 } else {
1435 // free the request and packet
1436 delete pkt->req;
1437 delete pkt;
1438 }
1439 }
1440 }
1441
1442 return NULL;
1443 }
1444
1445
1446 template<class TagStore>
1447 PacketPtr
1448 Cache<TagStore>::getTimingPacket()
1449 {
1450 MSHR *mshr = getNextMSHR();
1451
1452 if (mshr == NULL) {
1453 return NULL;
1454 }
1455
1456 // use request from 1st target
1457 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1458 PacketPtr pkt = NULL;
1459
1460 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1461 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1462 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1463 // in MSHR, so now that we are getting around to processing
1464 // it, just treat it as if we got a failure response
1465 pkt = new Packet(tgt_pkt);
1466 pkt->cmd = MemCmd::UpgradeFailResp;
1467 pkt->senderState = mshr;
1468 pkt->firstWordTime = pkt->finishTime = curTick();
1469 handleResponse(pkt);
1470 return NULL;
1471 } else if (mshr->isForwardNoResponse()) {
1472 // no response expected, just forward packet as it is
1473 assert(tags->findBlock(mshr->addr) == NULL);
1474 pkt = tgt_pkt;
1475 } else {
1476 BlkType *blk = tags->findBlock(mshr->addr);
1477
1478 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1479 // It might be possible for a writeback to arrive between
1480 // the time the prefetch is placed in the MSHRs and when
1481 // it's selected to send... if so, this assert will catch
1482 // that, and then we'll have to figure out what to do.
1483 assert(blk == NULL);
1484
1485 // We need to check the caches above us to verify that they don't have
1486 // a copy of this block in the dirty state at the moment. Without this
1487 // check we could get a stale copy from memory that might get used
1488 // in place of the dirty one.
1489 PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
1490 snoop_pkt->setExpressSnoop();
1491 snoop_pkt->senderState = mshr;
1492 cpuSidePort->sendTiming(snoop_pkt);
1493
1494 if (snoop_pkt->memInhibitAsserted()) {
1495 markInService(mshr, snoop_pkt);
1496 DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
1497 tgt_pkt->getAddr());
1498 delete snoop_pkt;
1499 return NULL;
1500 }
1501 delete snoop_pkt;
1502 }
1503
1504 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1505
1506 mshr->isForward = (pkt == NULL);
1507
1508 if (mshr->isForward) {
1509 // not a cache block request, but a response is expected
1510 // make copy of current packet to forward, keep current
1511 // copy for response handling
1512 pkt = new Packet(tgt_pkt);
1513 pkt->allocate();
1514 if (pkt->isWrite()) {
1515 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1516 }
1517 }
1518 }
1519
1520 assert(pkt != NULL);
1521 pkt->senderState = mshr;
1522 return pkt;
1523 }
1524
1525
1526 template<class TagStore>
1527 Tick
1528 Cache<TagStore>::nextMSHRReadyTime()
1529 {
1530 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1531 writeBuffer.nextMSHRReadyTime());
1532
1533 if (prefetcher) {
1534 nextReady = std::min(nextReady,
1535 prefetcher->nextPrefetchReadyTime());
1536 }
1537
1538 return nextReady;
1539 }
1540
1541
1542 ///////////////
1543 //
1544 // CpuSidePort
1545 //
1546 ///////////////
1547
1548 template<class TagStore>
1549 AddrRangeList
1550 Cache<TagStore>::CpuSidePort::
1551 getAddrRanges()
1552 {
1553 // CPU side port doesn't snoop; it's a target only. It can
1554 // potentially respond to any address.
1555 AddrRangeList ranges;
1556 ranges.push_back(myCache()->getAddrRange());
1557 return ranges;
1558 }
1559
1560
1561 template<class TagStore>
1562 bool
1563 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1564 {
1565 // illegal to block responses... can lead to deadlock
1566 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1567 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1568 mustSendRetry = true;
1569 return false;
1570 }
1571
1572 myCache()->timingAccess(pkt);
1573 return true;
1574 }
1575
1576
1577 template<class TagStore>
1578 Tick
1579 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1580 {
1581 return myCache()->atomicAccess(pkt);
1582 }
1583
1584
1585 template<class TagStore>
1586 void
1587 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1588 {
1589 myCache()->functionalAccess(pkt, true);
1590 }
1591
1592
1593 template<class TagStore>
1594 Cache<TagStore>::
1595 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1596 const std::string &_label)
1597 : BaseCache::CachePort(_name, _cache, _label)
1598 {
1599 }
1600
1601 ///////////////
1602 //
1603 // MemSidePort
1604 //
1605 ///////////////
1606
1607 template<class TagStore>
1608 bool
1609 Cache<TagStore>::MemSidePort::isSnooping()
1610 {
1611 // Memory-side port always snoops, but never passes requests
1612 // through to targets on the cpu side (so we don't add anything to
1613 // the address range list).
1614 return true;
1615 }
1616
1617
1618 template<class TagStore>
1619 bool
1620 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1621 {
1622 // this needs to be fixed so that the cache updates the mshr and sends the
1623 // packet back out on the link, but it probably won't happen so until this
1624 // gets fixed, just panic when it does
1625 if (pkt->wasNacked())
1626 panic("Need to implement cache resending nacked packets!\n");
1627
1628 if (pkt->isRequest() && blocked) {
1629 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1630 mustSendRetry = true;
1631 return false;
1632 }
1633
1634 if (pkt->isResponse()) {
1635 myCache()->handleResponse(pkt);
1636 } else {
1637 myCache()->snoopTiming(pkt);
1638 }
1639 return true;
1640 }
1641
1642
1643 template<class TagStore>
1644 Tick
1645 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1646 {
1647 // in atomic mode, responses go back to the sender via the
1648 // function return from sendAtomic(), not via a separate
1649 // sendAtomic() from the responder. Thus we should never see a
1650 // response packet in recvAtomic() (anywhere, not just here).
1651 assert(!pkt->isResponse());
1652 return myCache()->snoopAtomic(pkt);
1653 }
1654
1655
1656 template<class TagStore>
1657 void
1658 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1659 {
1660 myCache()->functionalAccess(pkt, false);
1661 }
1662
1663
1664
1665 template<class TagStore>
1666 void
1667 Cache<TagStore>::MemSidePort::sendPacket()
1668 {
1669 // if we have responses that are ready, they take precedence
1670 if (deferredPacketReady()) {
1671 bool success = sendTiming(transmitList.front().pkt);
1672
1673 if (success) {
1674 //send successful, remove packet
1675 transmitList.pop_front();
1676 }
1677
1678 waitingOnRetry = !success;
1679 } else {
1680 // check for non-response packets (requests & writebacks)
1681 PacketPtr pkt = myCache()->getTimingPacket();
1682 if (pkt == NULL) {
1683 // can happen if e.g. we attempt a writeback and fail, but
1684 // before the retry, the writeback is eliminated because
1685 // we snoop another cache's ReadEx.
1686 waitingOnRetry = false;
1687 } else {
1688 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1689
1690 bool success = sendTiming(pkt);
1691
1692 waitingOnRetry = !success;
1693 if (waitingOnRetry) {
1694 DPRINTF(CachePort, "now waiting on a retry\n");
1695 if (!mshr->isForwardNoResponse()) {
1696 delete pkt;
1697 }
1698 } else {
1699 myCache()->markInService(mshr, pkt);
1700 }
1701 }
1702 }
1703
1704
1705 // tried to send packet... if it was successful (no retry), see if
1706 // we need to rerequest bus or not
1707 if (!waitingOnRetry) {
1708 Tick nextReady = std::min(deferredPacketReadyTime(),
1709 myCache()->nextMSHRReadyTime());
1710 // @TODO: need to facotr in prefetch requests here somehow
1711 if (nextReady != MaxTick) {
1712 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1713 cache->schedule(sendEvent, std::max(nextReady, curTick() + 1));
1714 } else {
1715 // no more to send right now: if we're draining, we may be done
1716 if (drainEvent && !sendEvent->scheduled()) {
1717 drainEvent->process();
1718 drainEvent = NULL;
1719 }
1720 }
1721 }
1722 }
1723
1724 template<class TagStore>
1725 void
1726 Cache<TagStore>::MemSidePort::recvRetry()
1727 {
1728 assert(waitingOnRetry);
1729 sendPacket();
1730 }
1731
1732
1733 template<class TagStore>
1734 void
1735 Cache<TagStore>::MemSidePort::processSendEvent()
1736 {
1737 assert(!waitingOnRetry);
1738 sendPacket();
1739 }
1740
1741
1742 template<class TagStore>
1743 Cache<TagStore>::
1744 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1745 const std::string &_label)
1746 : BaseCache::CachePort(_name, _cache, _label)
1747 {
1748 // override default send event from SimpleTimingPort
1749 delete sendEvent;
1750 sendEvent = new SendEvent(this);
1751 }