Mem: Fix issue with dirty block being lost when entire block transferred to non-cache.
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 */
47
48 /**
49 * @file
50 * Cache definitions.
51 */
52
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "mem/cache/blk.hh"
58 #include "mem/cache/cache.hh"
59 #include "mem/cache/mshr.hh"
60 #include "mem/cache/prefetch/base.hh"
61 #include "sim/sim_exit.hh"
62
63 template<class TagStore>
64 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
65 : BaseCache(p),
66 tags(tags),
67 prefetcher(pf),
68 doFastWrites(true),
69 prefetchOnAccess(p->prefetch_on_access)
70 {
71 tempBlock = new BlkType();
72 tempBlock->data = new uint8_t[blkSize];
73
74 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
75 "CpuSidePort");
76 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
77 "MemSidePort");
78 cpuSidePort->setOtherPort(memSidePort);
79 memSidePort->setOtherPort(cpuSidePort);
80
81 tags->setCache(this);
82 if (prefetcher)
83 prefetcher->setCache(this);
84 }
85
86 template<class TagStore>
87 void
88 Cache<TagStore>::regStats()
89 {
90 BaseCache::regStats();
91 tags->regStats(name());
92 if (prefetcher)
93 prefetcher->regStats(name());
94 }
95
96 template<class TagStore>
97 Port *
98 Cache<TagStore>::getPort(const std::string &if_name, int idx)
99 {
100 if (if_name == "" || if_name == "cpu_side") {
101 return cpuSidePort;
102 } else if (if_name == "mem_side") {
103 return memSidePort;
104 } else if (if_name == "functional") {
105 CpuSidePort *funcPort =
106 new CpuSidePort(name() + "-cpu_side_funcport", this,
107 "CpuSideFuncPort");
108 funcPort->setOtherPort(memSidePort);
109 return funcPort;
110 } else {
111 panic("Port name %s unrecognized\n", if_name);
112 }
113 }
114
115 template<class TagStore>
116 void
117 Cache<TagStore>::deletePortRefs(Port *p)
118 {
119 if (cpuSidePort == p || memSidePort == p)
120 panic("Can only delete functional ports\n");
121
122 delete p;
123 }
124
125
126 template<class TagStore>
127 void
128 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
129 {
130 uint64_t overwrite_val;
131 bool overwrite_mem;
132 uint64_t condition_val64;
133 uint32_t condition_val32;
134
135 int offset = tags->extractBlkOffset(pkt->getAddr());
136 uint8_t *blk_data = blk->data + offset;
137
138 assert(sizeof(uint64_t) >= pkt->getSize());
139
140 overwrite_mem = true;
141 // keep a copy of our possible write value, and copy what is at the
142 // memory address into the packet
143 pkt->writeData((uint8_t *)&overwrite_val);
144 pkt->setData(blk_data);
145
146 if (pkt->req->isCondSwap()) {
147 if (pkt->getSize() == sizeof(uint64_t)) {
148 condition_val64 = pkt->req->getExtraData();
149 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
150 sizeof(uint64_t));
151 } else if (pkt->getSize() == sizeof(uint32_t)) {
152 condition_val32 = (uint32_t)pkt->req->getExtraData();
153 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
154 sizeof(uint32_t));
155 } else
156 panic("Invalid size for conditional read/write\n");
157 }
158
159 if (overwrite_mem) {
160 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
161 blk->status |= BlkDirty;
162 }
163 }
164
165
166 template<class TagStore>
167 void
168 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
169 bool deferred_response,
170 bool pending_downgrade)
171 {
172 assert(blk && blk->isValid());
173 // Occasionally this is not true... if we are a lower-level cache
174 // satisfying a string of Read and ReadEx requests from
175 // upper-level caches, a Read will mark the block as shared but we
176 // can satisfy a following ReadEx anyway since we can rely on the
177 // Read requester(s) to have buffered the ReadEx snoop and to
178 // invalidate their blocks after receiving them.
179 // assert(!pkt->needsExclusive() || blk->isWritable());
180 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
181
182 // Check RMW operations first since both isRead() and
183 // isWrite() will be true for them
184 if (pkt->cmd == MemCmd::SwapReq) {
185 cmpAndSwap(blk, pkt);
186 } else if (pkt->isWrite()) {
187 if (blk->checkWrite(pkt)) {
188 pkt->writeDataToBlock(blk->data, blkSize);
189 blk->status |= BlkDirty;
190 }
191 } else if (pkt->isRead()) {
192 if (pkt->isLLSC()) {
193 blk->trackLoadLocked(pkt);
194 }
195 pkt->setDataFromBlock(blk->data, blkSize);
196 if (pkt->getSize() == blkSize) {
197 // special handling for coherent block requests from
198 // upper-level caches
199 if (pkt->needsExclusive()) {
200 // if we have a dirty copy, make sure the recipient
201 // keeps it marked dirty
202 if (blk->isDirty()) {
203 pkt->assertMemInhibit();
204 }
205 // on ReadExReq we give up our copy unconditionally
206 tags->invalidateBlk(blk);
207 } else if (blk->isWritable() && !pending_downgrade
208 && !pkt->sharedAsserted()) {
209 // we can give the requester an exclusive copy (by not
210 // asserting shared line) on a read request if:
211 // - we have an exclusive copy at this level (& below)
212 // - we don't have a pending snoop from below
213 // signaling another read request
214 // - no other cache above has a copy (otherwise it
215 // would have asseretd shared line on request)
216
217 if (blk->isDirty()) {
218 // special considerations if we're owner:
219 if (!deferred_response && !isTopLevel) {
220 // if we are responding immediately and can
221 // signal that we're transferring ownership
222 // along with exclusivity, do so
223 pkt->assertMemInhibit();
224 blk->status &= ~BlkDirty;
225 } else {
226 // if we're responding after our own miss,
227 // there's a window where the recipient didn't
228 // know it was getting ownership and may not
229 // have responded to snoops correctly, so we
230 // can't pass off ownership *or* exclusivity
231 pkt->assertShared();
232 }
233 }
234 } else {
235 // otherwise only respond with a shared copy
236 pkt->assertShared();
237 }
238 }
239 } else {
240 // Not a read or write... must be an upgrade. it's OK
241 // to just ack those as long as we have an exclusive
242 // copy at this level.
243 assert(pkt->isUpgrade());
244 tags->invalidateBlk(blk);
245 }
246 }
247
248
249 /////////////////////////////////////////////////////
250 //
251 // MSHR helper functions
252 //
253 /////////////////////////////////////////////////////
254
255
256 template<class TagStore>
257 void
258 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
259 {
260 markInServiceInternal(mshr, pkt);
261 #if 0
262 if (mshr->originalCmd == MemCmd::HardPFReq) {
263 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
264 name());
265 //Also clear pending if need be
266 if (!prefetcher->havePending())
267 {
268 deassertMemSideBusRequest(Request_PF);
269 }
270 }
271 #endif
272 }
273
274
275 template<class TagStore>
276 void
277 Cache<TagStore>::squash(int threadNum)
278 {
279 bool unblock = false;
280 BlockedCause cause = NUM_BLOCKED_CAUSES;
281
282 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
283 noTargetMSHR = NULL;
284 unblock = true;
285 cause = Blocked_NoTargets;
286 }
287 if (mshrQueue.isFull()) {
288 unblock = true;
289 cause = Blocked_NoMSHRs;
290 }
291 mshrQueue.squash(threadNum);
292 if (unblock && !mshrQueue.isFull()) {
293 clearBlocked(cause);
294 }
295 }
296
297 /////////////////////////////////////////////////////
298 //
299 // Access path: requests coming in from the CPU side
300 //
301 /////////////////////////////////////////////////////
302
303 template<class TagStore>
304 bool
305 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
306 int &lat, PacketList &writebacks)
307 {
308 if (pkt->req->isUncacheable()) {
309 if (pkt->req->isClearLL()) {
310 tags->clearLocks();
311 } else {
312 blk = tags->findBlock(pkt->getAddr());
313 if (blk != NULL) {
314 tags->invalidateBlk(blk);
315 }
316 }
317
318 blk = NULL;
319 lat = hitLatency;
320 return false;
321 }
322
323 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
324 blk = tags->accessBlock(pkt->getAddr(), lat, id);
325
326 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
327 pkt->req->isInstFetch() ? " (ifetch)" : "",
328 pkt->getAddr(), (blk) ? "hit" : "miss");
329
330 if (blk != NULL) {
331
332 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
333 // OK to satisfy access
334 incHitCount(pkt, id);
335 satisfyCpuSideRequest(pkt, blk);
336 return true;
337 }
338 }
339
340 // Can't satisfy access normally... either no block (blk == NULL)
341 // or have block but need exclusive & only have shared.
342
343 // Writeback handling is special case. We can write the block
344 // into the cache without having a writeable copy (or any copy at
345 // all).
346 if (pkt->cmd == MemCmd::Writeback) {
347 assert(blkSize == pkt->getSize());
348 if (blk == NULL) {
349 // need to do a replacement
350 blk = allocateBlock(pkt->getAddr(), writebacks);
351 if (blk == NULL) {
352 // no replaceable block available, give up.
353 // writeback will be forwarded to next level.
354 incMissCount(pkt, id);
355 return false;
356 }
357 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
358 tags->insertBlock(pkt->getAddr(), blk, id);
359 blk->status = BlkValid | BlkReadable;
360 }
361 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
362 blk->status |= BlkDirty;
363 if (pkt->isSupplyExclusive()) {
364 blk->status |= BlkWritable;
365 }
366 // nothing else to do; writeback doesn't expect response
367 assert(!pkt->needsResponse());
368 incHitCount(pkt, id);
369 return true;
370 }
371
372 incMissCount(pkt, id);
373
374 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
375 // complete miss on store conditional... just give up now
376 pkt->req->setExtraData(0);
377 return true;
378 }
379
380 return false;
381 }
382
383
384 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
385 {
386 Packet::SenderState *prevSenderState;
387 int prevSrc;
388 #ifndef NDEBUG
389 BaseCache *cache;
390 #endif
391 public:
392 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
393 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
394 #ifndef NDEBUG
395 , cache(_cache)
396 #endif
397 {}
398 void restore(Packet *pkt, BaseCache *_cache)
399 {
400 assert(_cache == cache);
401 pkt->senderState = prevSenderState;
402 pkt->setDest(prevSrc);
403 }
404 };
405
406
407 template<class TagStore>
408 bool
409 Cache<TagStore>::timingAccess(PacketPtr pkt)
410 {
411 //@todo Add back in MemDebug Calls
412 // MemDebug::cacheAccess(pkt);
413
414 // we charge hitLatency for doing just about anything here
415 Tick time = curTick() + hitLatency;
416
417 if (pkt->isResponse()) {
418 // must be cache-to-cache response from upper to lower level
419 ForwardResponseRecord *rec =
420 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
421 assert(rec != NULL);
422 rec->restore(pkt, this);
423 delete rec;
424 memSidePort->respond(pkt, time);
425 return true;
426 }
427
428 assert(pkt->isRequest());
429
430 if (pkt->memInhibitAsserted()) {
431 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
432 pkt->getAddr());
433 assert(!pkt->req->isUncacheable());
434 // Special tweak for multilevel coherence: snoop downward here
435 // on invalidates since there may be other caches below here
436 // that have shared copies. Not necessary if we know that
437 // supplier had exclusive copy to begin with.
438 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
439 Packet *snoopPkt = new Packet(pkt, true); // clear flags
440 snoopPkt->setExpressSnoop();
441 snoopPkt->assertMemInhibit();
442 memSidePort->sendTiming(snoopPkt);
443 // main memory will delete snoopPkt
444 }
445 // since we're the official target but we aren't responding,
446 // delete the packet now.
447 delete pkt;
448 return true;
449 }
450
451 if (pkt->req->isUncacheable()) {
452 if (pkt->req->isClearLL()) {
453 tags->clearLocks();
454 } else {
455 BlkType *blk = tags->findBlock(pkt->getAddr());
456 if (blk != NULL) {
457 tags->invalidateBlk(blk);
458 }
459 }
460
461 // writes go in write buffer, reads use MSHR
462 if (pkt->isWrite() && !pkt->isRead()) {
463 allocateWriteBuffer(pkt, time, true);
464 } else {
465 allocateUncachedReadBuffer(pkt, time, true);
466 }
467 assert(pkt->needsResponse()); // else we should delete it here??
468 return true;
469 }
470
471 int lat = hitLatency;
472 BlkType *blk = NULL;
473 PacketList writebacks;
474
475 bool satisfied = access(pkt, blk, lat, writebacks);
476
477 #if 0
478 /** @todo make the fast write alloc (wh64) work with coherence. */
479
480 // If this is a block size write/hint (WH64) allocate the block here
481 // if the coherence protocol allows it.
482 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
483 (pkt->cmd == MemCmd::WriteReq
484 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
485 // not outstanding misses, can do this
486 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
487 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
488 if (outstanding_miss) {
489 warn("WriteInv doing a fastallocate"
490 "with an outstanding miss to the same address\n");
491 }
492 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
493 writebacks);
494 ++fastWrites;
495 }
496 }
497 #endif
498
499 // track time of availability of next prefetch, if any
500 Tick next_pf_time = 0;
501
502 bool needsResponse = pkt->needsResponse();
503
504 if (satisfied) {
505 if (needsResponse) {
506 pkt->makeTimingResponse();
507 cpuSidePort->respond(pkt, curTick()+lat);
508 } else {
509 delete pkt;
510 }
511
512 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
513 if (blk)
514 blk->status &= ~BlkHWPrefetched;
515 next_pf_time = prefetcher->notify(pkt, time);
516 }
517 } else {
518 // miss
519
520 Addr blk_addr = blockAlign(pkt->getAddr());
521 MSHR *mshr = mshrQueue.findMatch(blk_addr);
522
523 if (mshr) {
524 // MSHR hit
525 //@todo remove hw_pf here
526 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
527 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
528 mshr->threadNum = -1;
529 }
530 mshr->allocateTarget(pkt, time, order++);
531 if (mshr->getNumTargets() == numTarget) {
532 noTargetMSHR = mshr;
533 setBlocked(Blocked_NoTargets);
534 // need to be careful with this... if this mshr isn't
535 // ready yet (i.e. time > curTick()_, we don't want to
536 // move it ahead of mshrs that are ready
537 // mshrQueue.moveToFront(mshr);
538 }
539 } else {
540 // no MSHR
541 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
542 // always mark as cache fill for now... if we implement
543 // no-write-allocate or bypass accesses this will have to
544 // be changed.
545 if (pkt->cmd == MemCmd::Writeback) {
546 allocateWriteBuffer(pkt, time, true);
547 } else {
548 if (blk && blk->isValid()) {
549 // If we have a write miss to a valid block, we
550 // need to mark the block non-readable. Otherwise
551 // if we allow reads while there's an outstanding
552 // write miss, the read could return stale data
553 // out of the cache block... a more aggressive
554 // system could detect the overlap (if any) and
555 // forward data out of the MSHRs, but we don't do
556 // that yet. Note that we do need to leave the
557 // block valid so that it stays in the cache, in
558 // case we get an upgrade response (and hence no
559 // new data) when the write miss completes.
560 // As long as CPUs do proper store/load forwarding
561 // internally, and have a sufficiently weak memory
562 // model, this is probably unnecessary, but at some
563 // point it must have seemed like we needed it...
564 assert(pkt->needsExclusive() && !blk->isWritable());
565 blk->status &= ~BlkReadable;
566 }
567
568 allocateMissBuffer(pkt, time, true);
569 }
570
571 if (prefetcher) {
572 next_pf_time = prefetcher->notify(pkt, time);
573 }
574 }
575 }
576
577 if (next_pf_time != 0)
578 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
579
580 // copy writebacks to write buffer
581 while (!writebacks.empty()) {
582 PacketPtr wbPkt = writebacks.front();
583 allocateWriteBuffer(wbPkt, time, true);
584 writebacks.pop_front();
585 }
586
587 return true;
588 }
589
590
591 // See comment in cache.hh.
592 template<class TagStore>
593 PacketPtr
594 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
595 bool needsExclusive)
596 {
597 bool blkValid = blk && blk->isValid();
598
599 if (cpu_pkt->req->isUncacheable()) {
600 //assert(blk == NULL);
601 return NULL;
602 }
603
604 if (!blkValid &&
605 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
606 // Writebacks that weren't allocated in access() and upgrades
607 // from upper-level caches that missed completely just go
608 // through.
609 return NULL;
610 }
611
612 assert(cpu_pkt->needsResponse());
613
614 MemCmd cmd;
615 // @TODO make useUpgrades a parameter.
616 // Note that ownership protocols require upgrade, otherwise a
617 // write miss on a shared owned block will generate a ReadExcl,
618 // which will clobber the owned copy.
619 const bool useUpgrades = true;
620 if (blkValid && useUpgrades) {
621 // only reason to be here is that blk is shared
622 // (read-only) and we need exclusive
623 assert(needsExclusive && !blk->isWritable());
624 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
625 } else {
626 // block is invalid
627 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
628 }
629 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
630
631 pkt->allocate();
632 return pkt;
633 }
634
635
636 template<class TagStore>
637 Tick
638 Cache<TagStore>::atomicAccess(PacketPtr pkt)
639 {
640 int lat = hitLatency;
641
642 // @TODO: make this a parameter
643 bool last_level_cache = false;
644
645 if (pkt->memInhibitAsserted()) {
646 assert(!pkt->req->isUncacheable());
647 // have to invalidate ourselves and any lower caches even if
648 // upper cache will be responding
649 if (pkt->isInvalidate()) {
650 BlkType *blk = tags->findBlock(pkt->getAddr());
651 if (blk && blk->isValid()) {
652 tags->invalidateBlk(blk);
653 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
654 pkt->cmdString(), pkt->getAddr());
655 }
656 if (!last_level_cache) {
657 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
658 pkt->cmdString(), pkt->getAddr());
659 lat += memSidePort->sendAtomic(pkt);
660 }
661 } else {
662 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
663 pkt->cmdString(), pkt->getAddr());
664 }
665
666 return lat;
667 }
668
669 // should assert here that there are no outstanding MSHRs or
670 // writebacks... that would mean that someone used an atomic
671 // access in timing mode
672
673 BlkType *blk = NULL;
674 PacketList writebacks;
675
676 if (!access(pkt, blk, lat, writebacks)) {
677 // MISS
678 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
679
680 bool is_forward = (bus_pkt == NULL);
681
682 if (is_forward) {
683 // just forwarding the same request to the next level
684 // no local cache operation involved
685 bus_pkt = pkt;
686 }
687
688 DPRINTF(Cache, "Sending an atomic %s for %x\n",
689 bus_pkt->cmdString(), bus_pkt->getAddr());
690
691 #if TRACING_ON
692 CacheBlk::State old_state = blk ? blk->status : 0;
693 #endif
694
695 lat += memSidePort->sendAtomic(bus_pkt);
696
697 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
698 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
699
700 assert(!bus_pkt->wasNacked());
701
702 // If packet was a forward, the response (if any) is already
703 // in place in the bus_pkt == pkt structure, so we don't need
704 // to do anything. Otherwise, use the separate bus_pkt to
705 // generate response to pkt and then delete it.
706 if (!is_forward) {
707 if (pkt->needsResponse()) {
708 assert(bus_pkt->isResponse());
709 if (bus_pkt->isError()) {
710 pkt->makeAtomicResponse();
711 pkt->copyError(bus_pkt);
712 } else if (bus_pkt->isRead() ||
713 bus_pkt->cmd == MemCmd::UpgradeResp) {
714 // we're updating cache state to allow us to
715 // satisfy the upstream request from the cache
716 blk = handleFill(bus_pkt, blk, writebacks);
717 satisfyCpuSideRequest(pkt, blk);
718 } else {
719 // we're satisfying the upstream request without
720 // modifying cache state, e.g., a write-through
721 pkt->makeAtomicResponse();
722 }
723 }
724 delete bus_pkt;
725 }
726 }
727
728 // Note that we don't invoke the prefetcher at all in atomic mode.
729 // It's not clear how to do it properly, particularly for
730 // prefetchers that aggressively generate prefetch candidates and
731 // rely on bandwidth contention to throttle them; these will tend
732 // to pollute the cache in atomic mode since there is no bandwidth
733 // contention. If we ever do want to enable prefetching in atomic
734 // mode, though, this is the place to do it... see timingAccess()
735 // for an example (though we'd want to issue the prefetch(es)
736 // immediately rather than calling requestMemSideBus() as we do
737 // there).
738
739 // Handle writebacks if needed
740 while (!writebacks.empty()){
741 PacketPtr wbPkt = writebacks.front();
742 memSidePort->sendAtomic(wbPkt);
743 writebacks.pop_front();
744 delete wbPkt;
745 }
746
747 // We now have the block one way or another (hit or completed miss)
748
749 if (pkt->needsResponse()) {
750 pkt->makeAtomicResponse();
751 }
752
753 return lat;
754 }
755
756
757 template<class TagStore>
758 void
759 Cache<TagStore>::functionalAccess(PacketPtr pkt,
760 CachePort *incomingPort,
761 CachePort *otherSidePort)
762 {
763 Addr blk_addr = blockAlign(pkt->getAddr());
764 BlkType *blk = tags->findBlock(pkt->getAddr());
765 MSHR *mshr = mshrQueue.findMatch(blk_addr);
766
767 pkt->pushLabel(name());
768
769 CacheBlkPrintWrapper cbpw(blk);
770
771 // Note that just because an L2/L3 has valid data doesn't mean an
772 // L1 doesn't have a more up-to-date modified copy that still
773 // needs to be found. As a result we always update the request if
774 // we have it, but only declare it satisfied if we are the owner.
775
776 // see if we have data at all (owned or otherwise)
777 bool have_data = blk && blk->isValid()
778 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
779
780 // data we have is dirty if marked as such or if valid & ownership
781 // pending due to outstanding UpgradeReq
782 bool have_dirty =
783 have_data && (blk->isDirty() ||
784 (mshr && mshr->inService && mshr->isPendingDirty()));
785
786 bool done = have_dirty
787 || incomingPort->checkFunctional(pkt)
788 || mshrQueue.checkFunctional(pkt, blk_addr)
789 || writeBuffer.checkFunctional(pkt, blk_addr)
790 || otherSidePort->checkFunctional(pkt);
791
792 DPRINTF(Cache, "functional %s %x %s%s%s\n",
793 pkt->cmdString(), pkt->getAddr(),
794 (blk && blk->isValid()) ? "valid " : "",
795 have_data ? "data " : "", done ? "done " : "");
796
797 // We're leaving the cache, so pop cache->name() label
798 pkt->popLabel();
799
800 if (done) {
801 pkt->makeResponse();
802 } else {
803 otherSidePort->sendFunctional(pkt);
804 }
805 }
806
807
808 /////////////////////////////////////////////////////
809 //
810 // Response handling: responses from the memory side
811 //
812 /////////////////////////////////////////////////////
813
814
815 template<class TagStore>
816 void
817 Cache<TagStore>::handleResponse(PacketPtr pkt)
818 {
819 Tick time = curTick() + hitLatency;
820 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
821 bool is_error = pkt->isError();
822
823 assert(mshr);
824
825 if (pkt->wasNacked()) {
826 //pkt->reinitFromRequest();
827 warn("NACKs from devices not connected to the same bus "
828 "not implemented\n");
829 return;
830 }
831 if (is_error) {
832 DPRINTF(Cache, "Cache received packet with error for address %x, "
833 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
834 }
835
836 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
837
838 MSHRQueue *mq = mshr->queue;
839 bool wasFull = mq->isFull();
840
841 if (mshr == noTargetMSHR) {
842 // we always clear at least one target
843 clearBlocked(Blocked_NoTargets);
844 noTargetMSHR = NULL;
845 }
846
847 // Initial target is used just for stats
848 MSHR::Target *initial_tgt = mshr->getTarget();
849 BlkType *blk = tags->findBlock(pkt->getAddr());
850 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
851 Tick miss_latency = curTick() - initial_tgt->recvTime;
852 PacketList writebacks;
853
854 if (pkt->req->isUncacheable()) {
855 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
856 miss_latency;
857 } else {
858 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
859 miss_latency;
860 }
861
862 bool is_fill = !mshr->isForward &&
863 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
864
865 if (is_fill && !is_error) {
866 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
867 pkt->getAddr());
868
869 // give mshr a chance to do some dirty work
870 mshr->handleFill(pkt, blk);
871
872 blk = handleFill(pkt, blk, writebacks);
873 assert(blk != NULL);
874 }
875
876 // First offset for critical word first calculations
877 int initial_offset = 0;
878
879 if (mshr->hasTargets()) {
880 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
881 }
882
883 while (mshr->hasTargets()) {
884 MSHR::Target *target = mshr->getTarget();
885
886 switch (target->source) {
887 case MSHR::Target::FromCPU:
888 Tick completion_time;
889 if (is_fill) {
890 satisfyCpuSideRequest(target->pkt, blk,
891 true, mshr->hasPostDowngrade());
892 // How many bytes past the first request is this one
893 int transfer_offset =
894 target->pkt->getOffset(blkSize) - initial_offset;
895 if (transfer_offset < 0) {
896 transfer_offset += blkSize;
897 }
898
899 // If critical word (no offset) return first word time
900 completion_time = tags->getHitLatency() +
901 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
902
903 assert(!target->pkt->req->isUncacheable());
904 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
905 completion_time - target->recvTime;
906 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
907 // failed StoreCond upgrade
908 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
909 target->pkt->cmd == MemCmd::StoreCondFailReq ||
910 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
911 completion_time = tags->getHitLatency() + pkt->finishTime;
912 target->pkt->req->setExtraData(0);
913 } else {
914 // not a cache fill, just forwarding response
915 completion_time = tags->getHitLatency() + pkt->finishTime;
916 if (pkt->isRead() && !is_error) {
917 target->pkt->setData(pkt->getPtr<uint8_t>());
918 }
919 }
920 target->pkt->makeTimingResponse();
921 // if this packet is an error copy that to the new packet
922 if (is_error)
923 target->pkt->copyError(pkt);
924 if (target->pkt->cmd == MemCmd::ReadResp &&
925 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
926 // If intermediate cache got ReadRespWithInvalidate,
927 // propagate that. Response should not have
928 // isInvalidate() set otherwise.
929 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
930 }
931 cpuSidePort->respond(target->pkt, completion_time);
932 break;
933
934 case MSHR::Target::FromPrefetcher:
935 assert(target->pkt->cmd == MemCmd::HardPFReq);
936 if (blk)
937 blk->status |= BlkHWPrefetched;
938 delete target->pkt->req;
939 delete target->pkt;
940 break;
941
942 case MSHR::Target::FromSnoop:
943 // I don't believe that a snoop can be in an error state
944 assert(!is_error);
945 // response to snoop request
946 DPRINTF(Cache, "processing deferred snoop...\n");
947 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
948 handleSnoop(target->pkt, blk, true, true,
949 mshr->hasPostInvalidate());
950 break;
951
952 default:
953 panic("Illegal target->source enum %d\n", target->source);
954 }
955
956 mshr->popTarget();
957 }
958
959 if (blk) {
960 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
961 tags->invalidateBlk(blk);
962 } else if (mshr->hasPostDowngrade()) {
963 blk->status &= ~BlkWritable;
964 }
965 }
966
967 if (mshr->promoteDeferredTargets()) {
968 // avoid later read getting stale data while write miss is
969 // outstanding.. see comment in timingAccess()
970 if (blk) {
971 blk->status &= ~BlkReadable;
972 }
973 MSHRQueue *mq = mshr->queue;
974 mq->markPending(mshr);
975 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
976 } else {
977 mq->deallocate(mshr);
978 if (wasFull && !mq->isFull()) {
979 clearBlocked((BlockedCause)mq->index);
980 }
981 }
982
983 // copy writebacks to write buffer
984 while (!writebacks.empty()) {
985 PacketPtr wbPkt = writebacks.front();
986 allocateWriteBuffer(wbPkt, time, true);
987 writebacks.pop_front();
988 }
989 // if we used temp block, clear it out
990 if (blk == tempBlock) {
991 if (blk->isDirty()) {
992 allocateWriteBuffer(writebackBlk(blk), time, true);
993 }
994 tags->invalidateBlk(blk);
995 }
996
997 delete pkt;
998 }
999
1000
1001
1002
1003 template<class TagStore>
1004 PacketPtr
1005 Cache<TagStore>::writebackBlk(BlkType *blk)
1006 {
1007 assert(blk && blk->isValid() && blk->isDirty());
1008
1009 writebacks[0/*pkt->req->threadId()*/]++;
1010
1011 Request *writebackReq =
1012 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
1013 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
1014 if (blk->isWritable()) {
1015 writeback->setSupplyExclusive();
1016 }
1017 writeback->allocate();
1018 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1019
1020 blk->status &= ~BlkDirty;
1021 return writeback;
1022 }
1023
1024
1025 template<class TagStore>
1026 typename Cache<TagStore>::BlkType*
1027 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1028 {
1029 BlkType *blk = tags->findVictim(addr, writebacks);
1030
1031 if (blk->isValid()) {
1032 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1033 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1034 if (repl_mshr) {
1035 // must be an outstanding upgrade request on block
1036 // we're about to replace...
1037 assert(!blk->isWritable());
1038 assert(repl_mshr->needsExclusive());
1039 // too hard to replace block with transient state
1040 // allocation failed, block not inserted
1041 return NULL;
1042 } else {
1043 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1044 repl_addr, addr,
1045 blk->isDirty() ? "writeback" : "clean");
1046
1047 if (blk->isDirty()) {
1048 // Save writeback packet for handling by caller
1049 writebacks.push_back(writebackBlk(blk));
1050 }
1051 }
1052 }
1053
1054 return blk;
1055 }
1056
1057
1058 // Note that the reason we return a list of writebacks rather than
1059 // inserting them directly in the write buffer is that this function
1060 // is called by both atomic and timing-mode accesses, and in atomic
1061 // mode we don't mess with the write buffer (we just perform the
1062 // writebacks atomically once the original request is complete).
1063 template<class TagStore>
1064 typename Cache<TagStore>::BlkType*
1065 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1066 PacketList &writebacks)
1067 {
1068 Addr addr = pkt->getAddr();
1069 #if TRACING_ON
1070 CacheBlk::State old_state = blk ? blk->status : 0;
1071 #endif
1072
1073 if (blk == NULL) {
1074 // better have read new data...
1075 assert(pkt->hasData());
1076 // need to do a replacement
1077 blk = allocateBlock(addr, writebacks);
1078 if (blk == NULL) {
1079 // No replaceable block... just use temporary storage to
1080 // complete the current request and then get rid of it
1081 assert(!tempBlock->isValid());
1082 blk = tempBlock;
1083 tempBlock->set = tags->extractSet(addr);
1084 tempBlock->tag = tags->extractTag(addr);
1085 DPRINTF(Cache, "using temp block for %x\n", addr);
1086 } else {
1087 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
1088 tags->insertBlock(pkt->getAddr(), blk, id);
1089 }
1090
1091 // starting from scratch with a new block
1092 blk->status = 0;
1093 } else {
1094 // existing block... probably an upgrade
1095 assert(blk->tag == tags->extractTag(addr));
1096 // either we're getting new data or the block should already be valid
1097 assert(pkt->hasData() || blk->isValid());
1098 // don't clear block status... if block is already dirty we
1099 // don't want to lose that
1100 }
1101
1102 blk->status |= BlkValid | BlkReadable;
1103
1104 if (!pkt->sharedAsserted()) {
1105 blk->status |= BlkWritable;
1106 // If we got this via cache-to-cache transfer (i.e., from a
1107 // cache that was an owner) and took away that owner's copy,
1108 // then we need to write it back. Normally this happens
1109 // anyway as a side effect of getting a copy to write it, but
1110 // there are cases (such as failed store conditionals or
1111 // compare-and-swaps) where we'll demand an exclusive copy but
1112 // end up not writing it.
1113 if (pkt->memInhibitAsserted())
1114 blk->status |= BlkDirty;
1115 }
1116
1117 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1118 addr, old_state, blk->status);
1119
1120 // if we got new data, copy it in
1121 if (pkt->isRead()) {
1122 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1123 }
1124
1125 blk->whenReady = pkt->finishTime;
1126
1127 return blk;
1128 }
1129
1130
1131 /////////////////////////////////////////////////////
1132 //
1133 // Snoop path: requests coming in from the memory side
1134 //
1135 /////////////////////////////////////////////////////
1136
1137 template<class TagStore>
1138 void
1139 Cache<TagStore>::
1140 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1141 bool already_copied, bool pending_inval)
1142 {
1143 // timing-mode snoop responses require a new packet, unless we
1144 // already made a copy...
1145 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1146 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1147 pkt->allocate();
1148 pkt->makeTimingResponse();
1149 if (pkt->isRead()) {
1150 pkt->setDataFromBlock(blk_data, blkSize);
1151 }
1152 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1153 // Assume we defer a response to a read from a far-away cache
1154 // A, then later defer a ReadExcl from a cache B on the same
1155 // bus as us. We'll assert MemInhibit in both cases, but in
1156 // the latter case MemInhibit will keep the invalidation from
1157 // reaching cache A. This special response tells cache A that
1158 // it gets the block to satisfy its read, but must immediately
1159 // invalidate it.
1160 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1161 }
1162 memSidePort->respond(pkt, curTick() + hitLatency);
1163 }
1164
1165 template<class TagStore>
1166 void
1167 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1168 bool is_timing, bool is_deferred,
1169 bool pending_inval)
1170 {
1171 // deferred snoops can only happen in timing mode
1172 assert(!(is_deferred && !is_timing));
1173 // pending_inval only makes sense on deferred snoops
1174 assert(!(pending_inval && !is_deferred));
1175 assert(pkt->isRequest());
1176
1177 // the packet may get modified if we or a forwarded snooper
1178 // responds in atomic mode, so remember a few things about the
1179 // original packet up front
1180 bool invalidate = pkt->isInvalidate();
1181 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1182
1183 if (forwardSnoops) {
1184 // first propagate snoop upward to see if anyone above us wants to
1185 // handle it. save & restore packet src since it will get
1186 // rewritten to be relative to cpu-side bus (if any)
1187 bool alreadyResponded = pkt->memInhibitAsserted();
1188 if (is_timing) {
1189 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1190 snoopPkt->setExpressSnoop();
1191 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1192 cpuSidePort->sendTiming(snoopPkt);
1193 if (snoopPkt->memInhibitAsserted()) {
1194 // cache-to-cache response from some upper cache
1195 assert(!alreadyResponded);
1196 pkt->assertMemInhibit();
1197 } else {
1198 delete snoopPkt->senderState;
1199 }
1200 if (snoopPkt->sharedAsserted()) {
1201 pkt->assertShared();
1202 }
1203 delete snoopPkt;
1204 } else {
1205 int origSrc = pkt->getSrc();
1206 cpuSidePort->sendAtomic(pkt);
1207 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1208 // cache-to-cache response from some upper cache:
1209 // forward response to original requester
1210 assert(pkt->isResponse());
1211 }
1212 pkt->setSrc(origSrc);
1213 }
1214 }
1215
1216 if (!blk || !blk->isValid()) {
1217 return;
1218 }
1219
1220 // we may end up modifying both the block state and the packet (if
1221 // we respond in atomic mode), so just figure out what to do now
1222 // and then do it later
1223 bool respond = blk->isDirty() && pkt->needsResponse();
1224 bool have_exclusive = blk->isWritable();
1225
1226 if (pkt->isRead() && !invalidate) {
1227 assert(!needs_exclusive);
1228 pkt->assertShared();
1229 int bits_to_clear = BlkWritable;
1230 const bool haveOwnershipState = true; // for now
1231 if (!haveOwnershipState) {
1232 // if we don't support pure ownership (dirty && !writable),
1233 // have to clear dirty bit here, assume memory snarfs data
1234 // on cache-to-cache xfer
1235 bits_to_clear |= BlkDirty;
1236 }
1237 blk->status &= ~bits_to_clear;
1238 }
1239
1240 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1241 pkt->cmdString(), blockAlign(pkt->getAddr()),
1242 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1243
1244 if (respond) {
1245 assert(!pkt->memInhibitAsserted());
1246 pkt->assertMemInhibit();
1247 if (have_exclusive) {
1248 pkt->setSupplyExclusive();
1249 }
1250 if (is_timing) {
1251 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1252 } else {
1253 pkt->makeAtomicResponse();
1254 pkt->setDataFromBlock(blk->data, blkSize);
1255 }
1256 } else if (is_timing && is_deferred) {
1257 // if it's a deferred timing snoop then we've made a copy of
1258 // the packet, and so if we're not using that copy to respond
1259 // then we need to delete it here.
1260 delete pkt;
1261 }
1262
1263 // Do this last in case it deallocates block data or something
1264 // like that
1265 if (invalidate) {
1266 tags->invalidateBlk(blk);
1267 }
1268 }
1269
1270
1271 template<class TagStore>
1272 void
1273 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1274 {
1275 // Note that some deferred snoops don't have requests, since the
1276 // original access may have already completed
1277 if ((pkt->req && pkt->req->isUncacheable()) ||
1278 pkt->cmd == MemCmd::Writeback) {
1279 //Can't get a hit on an uncacheable address
1280 //Revisit this for multi level coherence
1281 return;
1282 }
1283
1284 BlkType *blk = tags->findBlock(pkt->getAddr());
1285
1286 Addr blk_addr = blockAlign(pkt->getAddr());
1287 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1288
1289 // Let the MSHR itself track the snoop and decide whether we want
1290 // to go ahead and do the regular cache snoop
1291 if (mshr && mshr->handleSnoop(pkt, order++)) {
1292 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1293 blk_addr);
1294 if (mshr->getNumTargets() > numTarget)
1295 warn("allocating bonus target for snoop"); //handle later
1296 return;
1297 }
1298
1299 //We also need to check the writeback buffers and handle those
1300 std::vector<MSHR *> writebacks;
1301 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1302 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1303 pkt->getAddr());
1304
1305 //Look through writebacks for any non-uncachable writes, use that
1306 for (int i = 0; i < writebacks.size(); i++) {
1307 mshr = writebacks[i];
1308 assert(!mshr->isUncacheable());
1309 assert(mshr->getNumTargets() == 1);
1310 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1311 assert(wb_pkt->cmd == MemCmd::Writeback);
1312
1313 assert(!pkt->memInhibitAsserted());
1314 pkt->assertMemInhibit();
1315 if (!pkt->needsExclusive()) {
1316 pkt->assertShared();
1317 // the writeback is no longer the exclusive copy in the system
1318 wb_pkt->clearSupplyExclusive();
1319 } else {
1320 // if we're not asserting the shared line, we need to
1321 // invalidate our copy. we'll do that below as long as
1322 // the packet's invalidate flag is set...
1323 assert(pkt->isInvalidate());
1324 }
1325 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1326 false, false);
1327
1328 if (pkt->isInvalidate()) {
1329 // Invalidation trumps our writeback... discard here
1330 markInService(mshr);
1331 delete wb_pkt;
1332 }
1333
1334 // If this was a shared writeback, there may still be
1335 // other shared copies above that require invalidation.
1336 // We could be more selective and return here if the
1337 // request is non-exclusive or if the writeback is
1338 // exclusive.
1339 break;
1340 }
1341 }
1342
1343 handleSnoop(pkt, blk, true, false, false);
1344 }
1345
1346
1347 template<class TagStore>
1348 Tick
1349 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1350 {
1351 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1352 // Can't get a hit on an uncacheable address
1353 // Revisit this for multi level coherence
1354 return hitLatency;
1355 }
1356
1357 BlkType *blk = tags->findBlock(pkt->getAddr());
1358 handleSnoop(pkt, blk, false, false, false);
1359 return hitLatency;
1360 }
1361
1362
1363 template<class TagStore>
1364 MSHR *
1365 Cache<TagStore>::getNextMSHR()
1366 {
1367 // Check both MSHR queue and write buffer for potential requests
1368 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1369 MSHR *write_mshr = writeBuffer.getNextMSHR();
1370
1371 // Now figure out which one to send... some cases are easy
1372 if (miss_mshr && !write_mshr) {
1373 return miss_mshr;
1374 }
1375 if (write_mshr && !miss_mshr) {
1376 return write_mshr;
1377 }
1378
1379 if (miss_mshr && write_mshr) {
1380 // We have one of each... normally we favor the miss request
1381 // unless the write buffer is full
1382 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1383 // Write buffer is full, so we'd like to issue a write;
1384 // need to search MSHR queue for conflicting earlier miss.
1385 MSHR *conflict_mshr =
1386 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1387
1388 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1389 // Service misses in order until conflict is cleared.
1390 return conflict_mshr;
1391 }
1392
1393 // No conflicts; issue write
1394 return write_mshr;
1395 }
1396
1397 // Write buffer isn't full, but need to check it for
1398 // conflicting earlier writeback
1399 MSHR *conflict_mshr =
1400 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1401 if (conflict_mshr) {
1402 // not sure why we don't check order here... it was in the
1403 // original code but commented out.
1404
1405 // The only way this happens is if we are
1406 // doing a write and we didn't have permissions
1407 // then subsequently saw a writeback (owned got evicted)
1408 // We need to make sure to perform the writeback first
1409 // To preserve the dirty data, then we can issue the write
1410
1411 // should we return write_mshr here instead? I.e. do we
1412 // have to flush writes in order? I don't think so... not
1413 // for Alpha anyway. Maybe for x86?
1414 return conflict_mshr;
1415 }
1416
1417 // No conflicts; issue read
1418 return miss_mshr;
1419 }
1420
1421 // fall through... no pending requests. Try a prefetch.
1422 assert(!miss_mshr && !write_mshr);
1423 if (prefetcher && !mshrQueue.isFull()) {
1424 // If we have a miss queue slot, we can try a prefetch
1425 PacketPtr pkt = prefetcher->getPacket();
1426 if (pkt) {
1427 Addr pf_addr = blockAlign(pkt->getAddr());
1428 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) {
1429 // Update statistic on number of prefetches issued
1430 // (hwpf_mshr_misses)
1431 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1432 // Don't request bus, since we already have it
1433 return allocateMissBuffer(pkt, curTick(), false);
1434 }
1435 }
1436 }
1437
1438 return NULL;
1439 }
1440
1441
1442 template<class TagStore>
1443 PacketPtr
1444 Cache<TagStore>::getTimingPacket()
1445 {
1446 MSHR *mshr = getNextMSHR();
1447
1448 if (mshr == NULL) {
1449 return NULL;
1450 }
1451
1452 // use request from 1st target
1453 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1454 PacketPtr pkt = NULL;
1455
1456 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1457 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1458 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1459 // in MSHR, so now that we are getting around to processing
1460 // it, just treat it as if we got a failure response
1461 pkt = new Packet(tgt_pkt);
1462 pkt->cmd = MemCmd::UpgradeFailResp;
1463 pkt->senderState = mshr;
1464 pkt->firstWordTime = pkt->finishTime = curTick();
1465 handleResponse(pkt);
1466 return NULL;
1467 } else if (mshr->isForwardNoResponse()) {
1468 // no response expected, just forward packet as it is
1469 assert(tags->findBlock(mshr->addr) == NULL);
1470 pkt = tgt_pkt;
1471 } else {
1472 BlkType *blk = tags->findBlock(mshr->addr);
1473 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1474
1475 mshr->isForward = (pkt == NULL);
1476
1477 if (mshr->isForward) {
1478 // not a cache block request, but a response is expected
1479 // make copy of current packet to forward, keep current
1480 // copy for response handling
1481 pkt = new Packet(tgt_pkt);
1482 pkt->allocate();
1483 if (pkt->isWrite()) {
1484 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1485 }
1486 }
1487 }
1488
1489 assert(pkt != NULL);
1490 pkt->senderState = mshr;
1491 return pkt;
1492 }
1493
1494
1495 template<class TagStore>
1496 Tick
1497 Cache<TagStore>::nextMSHRReadyTime()
1498 {
1499 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1500 writeBuffer.nextMSHRReadyTime());
1501
1502 if (prefetcher) {
1503 nextReady = std::min(nextReady,
1504 prefetcher->nextPrefetchReadyTime());
1505 }
1506
1507 return nextReady;
1508 }
1509
1510
1511 ///////////////
1512 //
1513 // CpuSidePort
1514 //
1515 ///////////////
1516
1517 template<class TagStore>
1518 void
1519 Cache<TagStore>::CpuSidePort::
1520 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1521 {
1522 // CPU side port doesn't snoop; it's a target only. It can
1523 // potentially respond to any address.
1524 snoop = false;
1525 resp.push_back(myCache()->getAddrRange());
1526 }
1527
1528
1529 template<class TagStore>
1530 bool
1531 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1532 {
1533 // illegal to block responses... can lead to deadlock
1534 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1535 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1536 mustSendRetry = true;
1537 return false;
1538 }
1539
1540 myCache()->timingAccess(pkt);
1541 return true;
1542 }
1543
1544
1545 template<class TagStore>
1546 Tick
1547 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1548 {
1549 return myCache()->atomicAccess(pkt);
1550 }
1551
1552
1553 template<class TagStore>
1554 void
1555 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1556 {
1557 myCache()->functionalAccess(pkt, this, otherPort);
1558 }
1559
1560
1561 template<class TagStore>
1562 Cache<TagStore>::
1563 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1564 const std::string &_label)
1565 : BaseCache::CachePort(_name, _cache, _label)
1566 {
1567 }
1568
1569 ///////////////
1570 //
1571 // MemSidePort
1572 //
1573 ///////////////
1574
1575 template<class TagStore>
1576 void
1577 Cache<TagStore>::MemSidePort::
1578 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1579 {
1580 // Memory-side port always snoops, but never passes requests
1581 // through to targets on the cpu side (so we don't add anything to
1582 // the address range list).
1583 snoop = true;
1584 }
1585
1586
1587 template<class TagStore>
1588 bool
1589 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1590 {
1591 // this needs to be fixed so that the cache updates the mshr and sends the
1592 // packet back out on the link, but it probably won't happen so until this
1593 // gets fixed, just panic when it does
1594 if (pkt->wasNacked())
1595 panic("Need to implement cache resending nacked packets!\n");
1596
1597 if (pkt->isRequest() && blocked) {
1598 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1599 mustSendRetry = true;
1600 return false;
1601 }
1602
1603 if (pkt->isResponse()) {
1604 myCache()->handleResponse(pkt);
1605 } else {
1606 myCache()->snoopTiming(pkt);
1607 }
1608 return true;
1609 }
1610
1611
1612 template<class TagStore>
1613 Tick
1614 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1615 {
1616 // in atomic mode, responses go back to the sender via the
1617 // function return from sendAtomic(), not via a separate
1618 // sendAtomic() from the responder. Thus we should never see a
1619 // response packet in recvAtomic() (anywhere, not just here).
1620 assert(!pkt->isResponse());
1621 return myCache()->snoopAtomic(pkt);
1622 }
1623
1624
1625 template<class TagStore>
1626 void
1627 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1628 {
1629 myCache()->functionalAccess(pkt, this, otherPort);
1630 }
1631
1632
1633
1634 template<class TagStore>
1635 void
1636 Cache<TagStore>::MemSidePort::sendPacket()
1637 {
1638 // if we have responses that are ready, they take precedence
1639 if (deferredPacketReady()) {
1640 bool success = sendTiming(transmitList.front().pkt);
1641
1642 if (success) {
1643 //send successful, remove packet
1644 transmitList.pop_front();
1645 }
1646
1647 waitingOnRetry = !success;
1648 } else {
1649 // check for non-response packets (requests & writebacks)
1650 PacketPtr pkt = myCache()->getTimingPacket();
1651 if (pkt == NULL) {
1652 // can happen if e.g. we attempt a writeback and fail, but
1653 // before the retry, the writeback is eliminated because
1654 // we snoop another cache's ReadEx.
1655 waitingOnRetry = false;
1656 } else {
1657 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1658
1659 bool success = sendTiming(pkt);
1660
1661 waitingOnRetry = !success;
1662 if (waitingOnRetry) {
1663 DPRINTF(CachePort, "now waiting on a retry\n");
1664 if (!mshr->isForwardNoResponse()) {
1665 delete pkt;
1666 }
1667 } else {
1668 myCache()->markInService(mshr, pkt);
1669 }
1670 }
1671 }
1672
1673
1674 // tried to send packet... if it was successful (no retry), see if
1675 // we need to rerequest bus or not
1676 if (!waitingOnRetry) {
1677 Tick nextReady = std::min(deferredPacketReadyTime(),
1678 myCache()->nextMSHRReadyTime());
1679 // @TODO: need to facotr in prefetch requests here somehow
1680 if (nextReady != MaxTick) {
1681 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1682 schedule(sendEvent, std::max(nextReady, curTick() + 1));
1683 } else {
1684 // no more to send right now: if we're draining, we may be done
1685 if (drainEvent && !sendEvent->scheduled()) {
1686 drainEvent->process();
1687 drainEvent = NULL;
1688 }
1689 }
1690 }
1691 }
1692
1693 template<class TagStore>
1694 void
1695 Cache<TagStore>::MemSidePort::recvRetry()
1696 {
1697 assert(waitingOnRetry);
1698 sendPacket();
1699 }
1700
1701
1702 template<class TagStore>
1703 void
1704 Cache<TagStore>::MemSidePort::processSendEvent()
1705 {
1706 assert(!waitingOnRetry);
1707 sendPacket();
1708 }
1709
1710
1711 template<class TagStore>
1712 Cache<TagStore>::
1713 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1714 const std::string &_label)
1715 : BaseCache::CachePort(_name, _cache, _label)
1716 {
1717 // override default send event from SimpleTimingPort
1718 delete sendEvent;
1719 sendEvent = new SendEvent(this);
1720 }