MEM: Differentiate functional cache accesses from CPU and memory
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 */
47
48 /**
49 * @file
50 * Cache definitions.
51 */
52
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "debug/CachePort.hh"
59 #include "mem/cache/prefetch/base.hh"
60 #include "mem/cache/blk.hh"
61 #include "mem/cache/cache.hh"
62 #include "mem/cache/mshr.hh"
63 #include "sim/sim_exit.hh"
64
65 template<class TagStore>
66 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
67 : BaseCache(p),
68 tags(tags),
69 prefetcher(pf),
70 doFastWrites(true),
71 prefetchOnAccess(p->prefetch_on_access)
72 {
73 tempBlock = new BlkType();
74 tempBlock->data = new uint8_t[blkSize];
75
76 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
77 "CpuSidePort");
78 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
79 "MemSidePort");
80 cpuSidePort->setOtherPort(memSidePort);
81 memSidePort->setOtherPort(cpuSidePort);
82
83 tags->setCache(this);
84 if (prefetcher)
85 prefetcher->setCache(this);
86 }
87
88 template<class TagStore>
89 void
90 Cache<TagStore>::regStats()
91 {
92 BaseCache::regStats();
93 tags->regStats(name());
94 if (prefetcher)
95 prefetcher->regStats(name());
96 }
97
98 template<class TagStore>
99 Port *
100 Cache<TagStore>::getPort(const std::string &if_name, int idx)
101 {
102 if (if_name == "" || if_name == "cpu_side") {
103 return cpuSidePort;
104 } else if (if_name == "mem_side") {
105 return memSidePort;
106 } else if (if_name == "functional") {
107 CpuSidePort *funcPort =
108 new CpuSidePort(name() + "-cpu_side_funcport", this,
109 "CpuSideFuncPort");
110 funcPort->setOtherPort(memSidePort);
111 return funcPort;
112 } else {
113 panic("Port name %s unrecognized\n", if_name);
114 }
115 }
116
117 template<class TagStore>
118 void
119 Cache<TagStore>::deletePortRefs(Port *p)
120 {
121 if (cpuSidePort == p || memSidePort == p)
122 panic("Can only delete functional ports\n");
123
124 delete p;
125 }
126
127
128 template<class TagStore>
129 void
130 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
131 {
132 uint64_t overwrite_val;
133 bool overwrite_mem;
134 uint64_t condition_val64;
135 uint32_t condition_val32;
136
137 int offset = tags->extractBlkOffset(pkt->getAddr());
138 uint8_t *blk_data = blk->data + offset;
139
140 assert(sizeof(uint64_t) >= pkt->getSize());
141
142 overwrite_mem = true;
143 // keep a copy of our possible write value, and copy what is at the
144 // memory address into the packet
145 pkt->writeData((uint8_t *)&overwrite_val);
146 pkt->setData(blk_data);
147
148 if (pkt->req->isCondSwap()) {
149 if (pkt->getSize() == sizeof(uint64_t)) {
150 condition_val64 = pkt->req->getExtraData();
151 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
152 sizeof(uint64_t));
153 } else if (pkt->getSize() == sizeof(uint32_t)) {
154 condition_val32 = (uint32_t)pkt->req->getExtraData();
155 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
156 sizeof(uint32_t));
157 } else
158 panic("Invalid size for conditional read/write\n");
159 }
160
161 if (overwrite_mem) {
162 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
163 blk->status |= BlkDirty;
164 }
165 }
166
167
168 template<class TagStore>
169 void
170 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
171 bool deferred_response,
172 bool pending_downgrade)
173 {
174 assert(blk && blk->isValid());
175 // Occasionally this is not true... if we are a lower-level cache
176 // satisfying a string of Read and ReadEx requests from
177 // upper-level caches, a Read will mark the block as shared but we
178 // can satisfy a following ReadEx anyway since we can rely on the
179 // Read requester(s) to have buffered the ReadEx snoop and to
180 // invalidate their blocks after receiving them.
181 // assert(!pkt->needsExclusive() || blk->isWritable());
182 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
183
184 // Check RMW operations first since both isRead() and
185 // isWrite() will be true for them
186 if (pkt->cmd == MemCmd::SwapReq) {
187 cmpAndSwap(blk, pkt);
188 } else if (pkt->isWrite()) {
189 if (blk->checkWrite(pkt)) {
190 pkt->writeDataToBlock(blk->data, blkSize);
191 blk->status |= BlkDirty;
192 }
193 } else if (pkt->isRead()) {
194 if (pkt->isLLSC()) {
195 blk->trackLoadLocked(pkt);
196 }
197 pkt->setDataFromBlock(blk->data, blkSize);
198 if (pkt->getSize() == blkSize) {
199 // special handling for coherent block requests from
200 // upper-level caches
201 if (pkt->needsExclusive()) {
202 // if we have a dirty copy, make sure the recipient
203 // keeps it marked dirty
204 if (blk->isDirty()) {
205 pkt->assertMemInhibit();
206 }
207 // on ReadExReq we give up our copy unconditionally
208 tags->invalidateBlk(blk);
209 } else if (blk->isWritable() && !pending_downgrade
210 && !pkt->sharedAsserted()) {
211 // we can give the requester an exclusive copy (by not
212 // asserting shared line) on a read request if:
213 // - we have an exclusive copy at this level (& below)
214 // - we don't have a pending snoop from below
215 // signaling another read request
216 // - no other cache above has a copy (otherwise it
217 // would have asseretd shared line on request)
218
219 if (blk->isDirty()) {
220 // special considerations if we're owner:
221 if (!deferred_response && !isTopLevel) {
222 // if we are responding immediately and can
223 // signal that we're transferring ownership
224 // along with exclusivity, do so
225 pkt->assertMemInhibit();
226 blk->status &= ~BlkDirty;
227 } else {
228 // if we're responding after our own miss,
229 // there's a window where the recipient didn't
230 // know it was getting ownership and may not
231 // have responded to snoops correctly, so we
232 // can't pass off ownership *or* exclusivity
233 pkt->assertShared();
234 }
235 }
236 } else {
237 // otherwise only respond with a shared copy
238 pkt->assertShared();
239 }
240 }
241 } else {
242 // Not a read or write... must be an upgrade. it's OK
243 // to just ack those as long as we have an exclusive
244 // copy at this level.
245 assert(pkt->isUpgrade());
246 tags->invalidateBlk(blk);
247 }
248 }
249
250
251 /////////////////////////////////////////////////////
252 //
253 // MSHR helper functions
254 //
255 /////////////////////////////////////////////////////
256
257
258 template<class TagStore>
259 void
260 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
261 {
262 markInServiceInternal(mshr, pkt);
263 #if 0
264 if (mshr->originalCmd == MemCmd::HardPFReq) {
265 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
266 name());
267 //Also clear pending if need be
268 if (!prefetcher->havePending())
269 {
270 deassertMemSideBusRequest(Request_PF);
271 }
272 }
273 #endif
274 }
275
276
277 template<class TagStore>
278 void
279 Cache<TagStore>::squash(int threadNum)
280 {
281 bool unblock = false;
282 BlockedCause cause = NUM_BLOCKED_CAUSES;
283
284 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
285 noTargetMSHR = NULL;
286 unblock = true;
287 cause = Blocked_NoTargets;
288 }
289 if (mshrQueue.isFull()) {
290 unblock = true;
291 cause = Blocked_NoMSHRs;
292 }
293 mshrQueue.squash(threadNum);
294 if (unblock && !mshrQueue.isFull()) {
295 clearBlocked(cause);
296 }
297 }
298
299 /////////////////////////////////////////////////////
300 //
301 // Access path: requests coming in from the CPU side
302 //
303 /////////////////////////////////////////////////////
304
305 template<class TagStore>
306 bool
307 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
308 int &lat, PacketList &writebacks)
309 {
310 if (pkt->req->isUncacheable()) {
311 if (pkt->req->isClearLL()) {
312 tags->clearLocks();
313 } else {
314 blk = tags->findBlock(pkt->getAddr());
315 if (blk != NULL) {
316 tags->invalidateBlk(blk);
317 }
318 }
319
320 blk = NULL;
321 lat = hitLatency;
322 return false;
323 }
324
325 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
326 blk = tags->accessBlock(pkt->getAddr(), lat, id);
327
328 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
329 pkt->req->isInstFetch() ? " (ifetch)" : "",
330 pkt->getAddr(), (blk) ? "hit" : "miss");
331
332 if (blk != NULL) {
333
334 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
335 // OK to satisfy access
336 incHitCount(pkt, id);
337 satisfyCpuSideRequest(pkt, blk);
338 return true;
339 }
340 }
341
342 // Can't satisfy access normally... either no block (blk == NULL)
343 // or have block but need exclusive & only have shared.
344
345 // Writeback handling is special case. We can write the block
346 // into the cache without having a writeable copy (or any copy at
347 // all).
348 if (pkt->cmd == MemCmd::Writeback) {
349 assert(blkSize == pkt->getSize());
350 if (blk == NULL) {
351 // need to do a replacement
352 blk = allocateBlock(pkt->getAddr(), writebacks);
353 if (blk == NULL) {
354 // no replaceable block available, give up.
355 // writeback will be forwarded to next level.
356 incMissCount(pkt, id);
357 return false;
358 }
359 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
360 tags->insertBlock(pkt->getAddr(), blk, id);
361 blk->status = BlkValid | BlkReadable;
362 }
363 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
364 blk->status |= BlkDirty;
365 if (pkt->isSupplyExclusive()) {
366 blk->status |= BlkWritable;
367 }
368 // nothing else to do; writeback doesn't expect response
369 assert(!pkt->needsResponse());
370 incHitCount(pkt, id);
371 return true;
372 }
373
374 incMissCount(pkt, id);
375
376 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
377 // complete miss on store conditional... just give up now
378 pkt->req->setExtraData(0);
379 return true;
380 }
381
382 return false;
383 }
384
385
386 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
387 {
388 Packet::SenderState *prevSenderState;
389 int prevSrc;
390 #ifndef NDEBUG
391 BaseCache *cache;
392 #endif
393 public:
394 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
395 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
396 #ifndef NDEBUG
397 , cache(_cache)
398 #endif
399 {}
400 void restore(Packet *pkt, BaseCache *_cache)
401 {
402 assert(_cache == cache);
403 pkt->senderState = prevSenderState;
404 pkt->setDest(prevSrc);
405 }
406 };
407
408
409 template<class TagStore>
410 bool
411 Cache<TagStore>::timingAccess(PacketPtr pkt)
412 {
413 //@todo Add back in MemDebug Calls
414 // MemDebug::cacheAccess(pkt);
415
416 // we charge hitLatency for doing just about anything here
417 Tick time = curTick() + hitLatency;
418
419 if (pkt->isResponse()) {
420 // must be cache-to-cache response from upper to lower level
421 ForwardResponseRecord *rec =
422 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
423
424 if (rec == NULL) {
425 assert(pkt->cmd == MemCmd::HardPFResp);
426 // Check if it's a prefetch response and handle it. We shouldn't
427 // get any other kinds of responses without FRRs.
428 DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
429 pkt->getAddr());
430 handleResponse(pkt);
431 return true;
432 }
433
434 rec->restore(pkt, this);
435 delete rec;
436 memSidePort->respond(pkt, time);
437 return true;
438 }
439
440 assert(pkt->isRequest());
441
442 if (pkt->memInhibitAsserted()) {
443 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
444 pkt->getAddr());
445 assert(!pkt->req->isUncacheable());
446 // Special tweak for multilevel coherence: snoop downward here
447 // on invalidates since there may be other caches below here
448 // that have shared copies. Not necessary if we know that
449 // supplier had exclusive copy to begin with.
450 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
451 Packet *snoopPkt = new Packet(pkt, true); // clear flags
452 snoopPkt->setExpressSnoop();
453 snoopPkt->assertMemInhibit();
454 memSidePort->sendTiming(snoopPkt);
455 // main memory will delete snoopPkt
456 }
457 // since we're the official target but we aren't responding,
458 // delete the packet now.
459 delete pkt;
460 return true;
461 }
462
463 if (pkt->req->isUncacheable()) {
464 if (pkt->req->isClearLL()) {
465 tags->clearLocks();
466 } else {
467 BlkType *blk = tags->findBlock(pkt->getAddr());
468 if (blk != NULL) {
469 tags->invalidateBlk(blk);
470 }
471 }
472
473 // writes go in write buffer, reads use MSHR
474 if (pkt->isWrite() && !pkt->isRead()) {
475 allocateWriteBuffer(pkt, time, true);
476 } else {
477 allocateUncachedReadBuffer(pkt, time, true);
478 }
479 assert(pkt->needsResponse()); // else we should delete it here??
480 return true;
481 }
482
483 int lat = hitLatency;
484 BlkType *blk = NULL;
485 PacketList writebacks;
486
487 bool satisfied = access(pkt, blk, lat, writebacks);
488
489 #if 0
490 /** @todo make the fast write alloc (wh64) work with coherence. */
491
492 // If this is a block size write/hint (WH64) allocate the block here
493 // if the coherence protocol allows it.
494 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
495 (pkt->cmd == MemCmd::WriteReq
496 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
497 // not outstanding misses, can do this
498 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
499 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
500 if (outstanding_miss) {
501 warn("WriteInv doing a fastallocate"
502 "with an outstanding miss to the same address\n");
503 }
504 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
505 writebacks);
506 ++fastWrites;
507 }
508 }
509 #endif
510
511 // track time of availability of next prefetch, if any
512 Tick next_pf_time = 0;
513
514 bool needsResponse = pkt->needsResponse();
515
516 if (satisfied) {
517 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
518 if (blk)
519 blk->status &= ~BlkHWPrefetched;
520 next_pf_time = prefetcher->notify(pkt, time);
521 }
522
523 if (needsResponse) {
524 pkt->makeTimingResponse();
525 cpuSidePort->respond(pkt, curTick()+lat);
526 } else {
527 delete pkt;
528 }
529 } else {
530 // miss
531
532 Addr blk_addr = blockAlign(pkt->getAddr());
533 MSHR *mshr = mshrQueue.findMatch(blk_addr);
534
535 if (mshr) {
536 // MSHR hit
537 //@todo remove hw_pf here
538 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
539 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
540 mshr->threadNum = -1;
541 }
542 mshr->allocateTarget(pkt, time, order++);
543 if (mshr->getNumTargets() == numTarget) {
544 noTargetMSHR = mshr;
545 setBlocked(Blocked_NoTargets);
546 // need to be careful with this... if this mshr isn't
547 // ready yet (i.e. time > curTick()_, we don't want to
548 // move it ahead of mshrs that are ready
549 // mshrQueue.moveToFront(mshr);
550 }
551 } else {
552 // no MSHR
553 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
554 // always mark as cache fill for now... if we implement
555 // no-write-allocate or bypass accesses this will have to
556 // be changed.
557 if (pkt->cmd == MemCmd::Writeback) {
558 allocateWriteBuffer(pkt, time, true);
559 } else {
560 if (blk && blk->isValid()) {
561 // If we have a write miss to a valid block, we
562 // need to mark the block non-readable. Otherwise
563 // if we allow reads while there's an outstanding
564 // write miss, the read could return stale data
565 // out of the cache block... a more aggressive
566 // system could detect the overlap (if any) and
567 // forward data out of the MSHRs, but we don't do
568 // that yet. Note that we do need to leave the
569 // block valid so that it stays in the cache, in
570 // case we get an upgrade response (and hence no
571 // new data) when the write miss completes.
572 // As long as CPUs do proper store/load forwarding
573 // internally, and have a sufficiently weak memory
574 // model, this is probably unnecessary, but at some
575 // point it must have seemed like we needed it...
576 assert(pkt->needsExclusive() && !blk->isWritable());
577 blk->status &= ~BlkReadable;
578 }
579
580 allocateMissBuffer(pkt, time, true);
581 }
582
583 if (prefetcher) {
584 next_pf_time = prefetcher->notify(pkt, time);
585 }
586 }
587 }
588
589 if (next_pf_time != 0)
590 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
591
592 // copy writebacks to write buffer
593 while (!writebacks.empty()) {
594 PacketPtr wbPkt = writebacks.front();
595 allocateWriteBuffer(wbPkt, time, true);
596 writebacks.pop_front();
597 }
598
599 return true;
600 }
601
602
603 // See comment in cache.hh.
604 template<class TagStore>
605 PacketPtr
606 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
607 bool needsExclusive)
608 {
609 bool blkValid = blk && blk->isValid();
610
611 if (cpu_pkt->req->isUncacheable()) {
612 //assert(blk == NULL);
613 return NULL;
614 }
615
616 if (!blkValid &&
617 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
618 // Writebacks that weren't allocated in access() and upgrades
619 // from upper-level caches that missed completely just go
620 // through.
621 return NULL;
622 }
623
624 assert(cpu_pkt->needsResponse());
625
626 MemCmd cmd;
627 // @TODO make useUpgrades a parameter.
628 // Note that ownership protocols require upgrade, otherwise a
629 // write miss on a shared owned block will generate a ReadExcl,
630 // which will clobber the owned copy.
631 const bool useUpgrades = true;
632 if (blkValid && useUpgrades) {
633 // only reason to be here is that blk is shared
634 // (read-only) and we need exclusive
635 assert(needsExclusive && !blk->isWritable());
636 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
637 } else {
638 // block is invalid
639 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
640 }
641 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
642
643 pkt->allocate();
644 return pkt;
645 }
646
647
648 template<class TagStore>
649 Tick
650 Cache<TagStore>::atomicAccess(PacketPtr pkt)
651 {
652 int lat = hitLatency;
653
654 // @TODO: make this a parameter
655 bool last_level_cache = false;
656
657 if (pkt->memInhibitAsserted()) {
658 assert(!pkt->req->isUncacheable());
659 // have to invalidate ourselves and any lower caches even if
660 // upper cache will be responding
661 if (pkt->isInvalidate()) {
662 BlkType *blk = tags->findBlock(pkt->getAddr());
663 if (blk && blk->isValid()) {
664 tags->invalidateBlk(blk);
665 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
666 pkt->cmdString(), pkt->getAddr());
667 }
668 if (!last_level_cache) {
669 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
670 pkt->cmdString(), pkt->getAddr());
671 lat += memSidePort->sendAtomic(pkt);
672 }
673 } else {
674 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
675 pkt->cmdString(), pkt->getAddr());
676 }
677
678 return lat;
679 }
680
681 // should assert here that there are no outstanding MSHRs or
682 // writebacks... that would mean that someone used an atomic
683 // access in timing mode
684
685 BlkType *blk = NULL;
686 PacketList writebacks;
687
688 if (!access(pkt, blk, lat, writebacks)) {
689 // MISS
690 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
691
692 bool is_forward = (bus_pkt == NULL);
693
694 if (is_forward) {
695 // just forwarding the same request to the next level
696 // no local cache operation involved
697 bus_pkt = pkt;
698 }
699
700 DPRINTF(Cache, "Sending an atomic %s for %x\n",
701 bus_pkt->cmdString(), bus_pkt->getAddr());
702
703 #if TRACING_ON
704 CacheBlk::State old_state = blk ? blk->status : 0;
705 #endif
706
707 lat += memSidePort->sendAtomic(bus_pkt);
708
709 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
710 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
711
712 assert(!bus_pkt->wasNacked());
713
714 // If packet was a forward, the response (if any) is already
715 // in place in the bus_pkt == pkt structure, so we don't need
716 // to do anything. Otherwise, use the separate bus_pkt to
717 // generate response to pkt and then delete it.
718 if (!is_forward) {
719 if (pkt->needsResponse()) {
720 assert(bus_pkt->isResponse());
721 if (bus_pkt->isError()) {
722 pkt->makeAtomicResponse();
723 pkt->copyError(bus_pkt);
724 } else if (bus_pkt->isRead() ||
725 bus_pkt->cmd == MemCmd::UpgradeResp) {
726 // we're updating cache state to allow us to
727 // satisfy the upstream request from the cache
728 blk = handleFill(bus_pkt, blk, writebacks);
729 satisfyCpuSideRequest(pkt, blk);
730 } else {
731 // we're satisfying the upstream request without
732 // modifying cache state, e.g., a write-through
733 pkt->makeAtomicResponse();
734 }
735 }
736 delete bus_pkt;
737 }
738 }
739
740 // Note that we don't invoke the prefetcher at all in atomic mode.
741 // It's not clear how to do it properly, particularly for
742 // prefetchers that aggressively generate prefetch candidates and
743 // rely on bandwidth contention to throttle them; these will tend
744 // to pollute the cache in atomic mode since there is no bandwidth
745 // contention. If we ever do want to enable prefetching in atomic
746 // mode, though, this is the place to do it... see timingAccess()
747 // for an example (though we'd want to issue the prefetch(es)
748 // immediately rather than calling requestMemSideBus() as we do
749 // there).
750
751 // Handle writebacks if needed
752 while (!writebacks.empty()){
753 PacketPtr wbPkt = writebacks.front();
754 memSidePort->sendAtomic(wbPkt);
755 writebacks.pop_front();
756 delete wbPkt;
757 }
758
759 // We now have the block one way or another (hit or completed miss)
760
761 if (pkt->needsResponse()) {
762 pkt->makeAtomicResponse();
763 }
764
765 return lat;
766 }
767
768
769 template<class TagStore>
770 void
771 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
772 {
773 Addr blk_addr = blockAlign(pkt->getAddr());
774 BlkType *blk = tags->findBlock(pkt->getAddr());
775 MSHR *mshr = mshrQueue.findMatch(blk_addr);
776
777 pkt->pushLabel(name());
778
779 CacheBlkPrintWrapper cbpw(blk);
780
781 // Note that just because an L2/L3 has valid data doesn't mean an
782 // L1 doesn't have a more up-to-date modified copy that still
783 // needs to be found. As a result we always update the request if
784 // we have it, but only declare it satisfied if we are the owner.
785
786 // see if we have data at all (owned or otherwise)
787 bool have_data = blk && blk->isValid()
788 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
789
790 // data we have is dirty if marked as such or if valid & ownership
791 // pending due to outstanding UpgradeReq
792 bool have_dirty =
793 have_data && (blk->isDirty() ||
794 (mshr && mshr->inService && mshr->isPendingDirty()));
795
796 bool done = have_dirty
797 || cpuSidePort->checkFunctional(pkt)
798 || mshrQueue.checkFunctional(pkt, blk_addr)
799 || writeBuffer.checkFunctional(pkt, blk_addr)
800 || memSidePort->checkFunctional(pkt);
801
802 DPRINTF(Cache, "functional %s %x %s%s%s\n",
803 pkt->cmdString(), pkt->getAddr(),
804 (blk && blk->isValid()) ? "valid " : "",
805 have_data ? "data " : "", done ? "done " : "");
806
807 // We're leaving the cache, so pop cache->name() label
808 pkt->popLabel();
809
810 if (done) {
811 pkt->makeResponse();
812 } else {
813 // if it came as a request from the CPU side then make sure it
814 // continues towards the memory side
815 if (fromCpuSide) {
816 memSidePort->sendFunctional(pkt);
817 } else if (forwardSnoops) {
818 // if it came from the memory side, it must be a snoop request
819 // and we should only forward it if we are forwarding snoops
820 cpuSidePort->sendFunctional(pkt);
821 }
822 }
823 }
824
825
826 /////////////////////////////////////////////////////
827 //
828 // Response handling: responses from the memory side
829 //
830 /////////////////////////////////////////////////////
831
832
833 template<class TagStore>
834 void
835 Cache<TagStore>::handleResponse(PacketPtr pkt)
836 {
837 Tick time = curTick() + hitLatency;
838 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
839 bool is_error = pkt->isError();
840
841 assert(mshr);
842
843 if (pkt->wasNacked()) {
844 //pkt->reinitFromRequest();
845 warn("NACKs from devices not connected to the same bus "
846 "not implemented\n");
847 return;
848 }
849 if (is_error) {
850 DPRINTF(Cache, "Cache received packet with error for address %x, "
851 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
852 }
853
854 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
855
856 MSHRQueue *mq = mshr->queue;
857 bool wasFull = mq->isFull();
858
859 if (mshr == noTargetMSHR) {
860 // we always clear at least one target
861 clearBlocked(Blocked_NoTargets);
862 noTargetMSHR = NULL;
863 }
864
865 // Initial target is used just for stats
866 MSHR::Target *initial_tgt = mshr->getTarget();
867 BlkType *blk = tags->findBlock(pkt->getAddr());
868 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
869 Tick miss_latency = curTick() - initial_tgt->recvTime;
870 PacketList writebacks;
871
872 if (pkt->req->isUncacheable()) {
873 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
874 miss_latency;
875 } else {
876 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
877 miss_latency;
878 }
879
880 bool is_fill = !mshr->isForward &&
881 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
882
883 if (is_fill && !is_error) {
884 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
885 pkt->getAddr());
886
887 // give mshr a chance to do some dirty work
888 mshr->handleFill(pkt, blk);
889
890 blk = handleFill(pkt, blk, writebacks);
891 assert(blk != NULL);
892 }
893
894 // First offset for critical word first calculations
895 int initial_offset = 0;
896
897 if (mshr->hasTargets()) {
898 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
899 }
900
901 while (mshr->hasTargets()) {
902 MSHR::Target *target = mshr->getTarget();
903
904 switch (target->source) {
905 case MSHR::Target::FromCPU:
906 Tick completion_time;
907 if (is_fill) {
908 satisfyCpuSideRequest(target->pkt, blk,
909 true, mshr->hasPostDowngrade());
910 // How many bytes past the first request is this one
911 int transfer_offset =
912 target->pkt->getOffset(blkSize) - initial_offset;
913 if (transfer_offset < 0) {
914 transfer_offset += blkSize;
915 }
916
917 // If critical word (no offset) return first word time
918 completion_time = tags->getHitLatency() +
919 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
920
921 assert(!target->pkt->req->isUncacheable());
922 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
923 completion_time - target->recvTime;
924 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
925 // failed StoreCond upgrade
926 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
927 target->pkt->cmd == MemCmd::StoreCondFailReq ||
928 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
929 completion_time = tags->getHitLatency() + pkt->finishTime;
930 target->pkt->req->setExtraData(0);
931 } else {
932 // not a cache fill, just forwarding response
933 completion_time = tags->getHitLatency() + pkt->finishTime;
934 if (pkt->isRead() && !is_error) {
935 target->pkt->setData(pkt->getPtr<uint8_t>());
936 }
937 }
938 target->pkt->makeTimingResponse();
939 // if this packet is an error copy that to the new packet
940 if (is_error)
941 target->pkt->copyError(pkt);
942 if (target->pkt->cmd == MemCmd::ReadResp &&
943 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
944 // If intermediate cache got ReadRespWithInvalidate,
945 // propagate that. Response should not have
946 // isInvalidate() set otherwise.
947 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
948 }
949 cpuSidePort->respond(target->pkt, completion_time);
950 break;
951
952 case MSHR::Target::FromPrefetcher:
953 assert(target->pkt->cmd == MemCmd::HardPFReq);
954 if (blk)
955 blk->status |= BlkHWPrefetched;
956 delete target->pkt->req;
957 delete target->pkt;
958 break;
959
960 case MSHR::Target::FromSnoop:
961 // I don't believe that a snoop can be in an error state
962 assert(!is_error);
963 // response to snoop request
964 DPRINTF(Cache, "processing deferred snoop...\n");
965 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
966 handleSnoop(target->pkt, blk, true, true,
967 mshr->hasPostInvalidate());
968 break;
969
970 default:
971 panic("Illegal target->source enum %d\n", target->source);
972 }
973
974 mshr->popTarget();
975 }
976
977 if (blk) {
978 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
979 tags->invalidateBlk(blk);
980 } else if (mshr->hasPostDowngrade()) {
981 blk->status &= ~BlkWritable;
982 }
983 }
984
985 if (mshr->promoteDeferredTargets()) {
986 // avoid later read getting stale data while write miss is
987 // outstanding.. see comment in timingAccess()
988 if (blk) {
989 blk->status &= ~BlkReadable;
990 }
991 MSHRQueue *mq = mshr->queue;
992 mq->markPending(mshr);
993 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
994 } else {
995 mq->deallocate(mshr);
996 if (wasFull && !mq->isFull()) {
997 clearBlocked((BlockedCause)mq->index);
998 }
999 }
1000
1001 // copy writebacks to write buffer
1002 while (!writebacks.empty()) {
1003 PacketPtr wbPkt = writebacks.front();
1004 allocateWriteBuffer(wbPkt, time, true);
1005 writebacks.pop_front();
1006 }
1007 // if we used temp block, clear it out
1008 if (blk == tempBlock) {
1009 if (blk->isDirty()) {
1010 allocateWriteBuffer(writebackBlk(blk), time, true);
1011 }
1012 tags->invalidateBlk(blk);
1013 }
1014
1015 delete pkt;
1016 }
1017
1018
1019
1020
1021 template<class TagStore>
1022 PacketPtr
1023 Cache<TagStore>::writebackBlk(BlkType *blk)
1024 {
1025 assert(blk && blk->isValid() && blk->isDirty());
1026
1027 writebacks[0/*pkt->req->threadId()*/]++;
1028
1029 Request *writebackReq =
1030 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
1031 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
1032 if (blk->isWritable()) {
1033 writeback->setSupplyExclusive();
1034 }
1035 writeback->allocate();
1036 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1037
1038 blk->status &= ~BlkDirty;
1039 return writeback;
1040 }
1041
1042
1043 template<class TagStore>
1044 typename Cache<TagStore>::BlkType*
1045 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1046 {
1047 BlkType *blk = tags->findVictim(addr, writebacks);
1048
1049 if (blk->isValid()) {
1050 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1051 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1052 if (repl_mshr) {
1053 // must be an outstanding upgrade request on block
1054 // we're about to replace...
1055 assert(!blk->isWritable());
1056 assert(repl_mshr->needsExclusive());
1057 // too hard to replace block with transient state
1058 // allocation failed, block not inserted
1059 return NULL;
1060 } else {
1061 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1062 repl_addr, addr,
1063 blk->isDirty() ? "writeback" : "clean");
1064
1065 if (blk->isDirty()) {
1066 // Save writeback packet for handling by caller
1067 writebacks.push_back(writebackBlk(blk));
1068 }
1069 }
1070 }
1071
1072 return blk;
1073 }
1074
1075
1076 // Note that the reason we return a list of writebacks rather than
1077 // inserting them directly in the write buffer is that this function
1078 // is called by both atomic and timing-mode accesses, and in atomic
1079 // mode we don't mess with the write buffer (we just perform the
1080 // writebacks atomically once the original request is complete).
1081 template<class TagStore>
1082 typename Cache<TagStore>::BlkType*
1083 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1084 PacketList &writebacks)
1085 {
1086 Addr addr = pkt->getAddr();
1087 #if TRACING_ON
1088 CacheBlk::State old_state = blk ? blk->status : 0;
1089 #endif
1090
1091 if (blk == NULL) {
1092 // better have read new data...
1093 assert(pkt->hasData());
1094 // need to do a replacement
1095 blk = allocateBlock(addr, writebacks);
1096 if (blk == NULL) {
1097 // No replaceable block... just use temporary storage to
1098 // complete the current request and then get rid of it
1099 assert(!tempBlock->isValid());
1100 blk = tempBlock;
1101 tempBlock->set = tags->extractSet(addr);
1102 tempBlock->tag = tags->extractTag(addr);
1103 DPRINTF(Cache, "using temp block for %x\n", addr);
1104 } else {
1105 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
1106 tags->insertBlock(pkt->getAddr(), blk, id);
1107 }
1108
1109 // starting from scratch with a new block
1110 blk->status = 0;
1111 } else {
1112 // existing block... probably an upgrade
1113 assert(blk->tag == tags->extractTag(addr));
1114 // either we're getting new data or the block should already be valid
1115 assert(pkt->hasData() || blk->isValid());
1116 // don't clear block status... if block is already dirty we
1117 // don't want to lose that
1118 }
1119
1120 blk->status |= BlkValid | BlkReadable;
1121
1122 if (!pkt->sharedAsserted()) {
1123 blk->status |= BlkWritable;
1124 // If we got this via cache-to-cache transfer (i.e., from a
1125 // cache that was an owner) and took away that owner's copy,
1126 // then we need to write it back. Normally this happens
1127 // anyway as a side effect of getting a copy to write it, but
1128 // there are cases (such as failed store conditionals or
1129 // compare-and-swaps) where we'll demand an exclusive copy but
1130 // end up not writing it.
1131 if (pkt->memInhibitAsserted())
1132 blk->status |= BlkDirty;
1133 }
1134
1135 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1136 addr, old_state, blk->status);
1137
1138 // if we got new data, copy it in
1139 if (pkt->isRead()) {
1140 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1141 }
1142
1143 blk->whenReady = pkt->finishTime;
1144
1145 return blk;
1146 }
1147
1148
1149 /////////////////////////////////////////////////////
1150 //
1151 // Snoop path: requests coming in from the memory side
1152 //
1153 /////////////////////////////////////////////////////
1154
1155 template<class TagStore>
1156 void
1157 Cache<TagStore>::
1158 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1159 bool already_copied, bool pending_inval)
1160 {
1161 // timing-mode snoop responses require a new packet, unless we
1162 // already made a copy...
1163 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1164 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1165 pkt->allocate();
1166 pkt->makeTimingResponse();
1167 if (pkt->isRead()) {
1168 pkt->setDataFromBlock(blk_data, blkSize);
1169 }
1170 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1171 // Assume we defer a response to a read from a far-away cache
1172 // A, then later defer a ReadExcl from a cache B on the same
1173 // bus as us. We'll assert MemInhibit in both cases, but in
1174 // the latter case MemInhibit will keep the invalidation from
1175 // reaching cache A. This special response tells cache A that
1176 // it gets the block to satisfy its read, but must immediately
1177 // invalidate it.
1178 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1179 }
1180 memSidePort->respond(pkt, curTick() + hitLatency);
1181 }
1182
1183 template<class TagStore>
1184 void
1185 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1186 bool is_timing, bool is_deferred,
1187 bool pending_inval)
1188 {
1189 // deferred snoops can only happen in timing mode
1190 assert(!(is_deferred && !is_timing));
1191 // pending_inval only makes sense on deferred snoops
1192 assert(!(pending_inval && !is_deferred));
1193 assert(pkt->isRequest());
1194
1195 // the packet may get modified if we or a forwarded snooper
1196 // responds in atomic mode, so remember a few things about the
1197 // original packet up front
1198 bool invalidate = pkt->isInvalidate();
1199 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1200
1201 if (forwardSnoops) {
1202 // first propagate snoop upward to see if anyone above us wants to
1203 // handle it. save & restore packet src since it will get
1204 // rewritten to be relative to cpu-side bus (if any)
1205 bool alreadyResponded = pkt->memInhibitAsserted();
1206 if (is_timing) {
1207 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1208 snoopPkt->setExpressSnoop();
1209 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1210 cpuSidePort->sendTiming(snoopPkt);
1211 if (snoopPkt->memInhibitAsserted()) {
1212 // cache-to-cache response from some upper cache
1213 assert(!alreadyResponded);
1214 pkt->assertMemInhibit();
1215 } else {
1216 delete snoopPkt->senderState;
1217 }
1218 if (snoopPkt->sharedAsserted()) {
1219 pkt->assertShared();
1220 }
1221 delete snoopPkt;
1222 } else {
1223 int origSrc = pkt->getSrc();
1224 cpuSidePort->sendAtomic(pkt);
1225 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1226 // cache-to-cache response from some upper cache:
1227 // forward response to original requester
1228 assert(pkt->isResponse());
1229 }
1230 pkt->setSrc(origSrc);
1231 }
1232 }
1233
1234 if (!blk || !blk->isValid()) {
1235 return;
1236 }
1237
1238 // we may end up modifying both the block state and the packet (if
1239 // we respond in atomic mode), so just figure out what to do now
1240 // and then do it later
1241 bool respond = blk->isDirty() && pkt->needsResponse();
1242 bool have_exclusive = blk->isWritable();
1243
1244 if (pkt->isRead() && !invalidate) {
1245 assert(!needs_exclusive);
1246 pkt->assertShared();
1247 int bits_to_clear = BlkWritable;
1248 const bool haveOwnershipState = true; // for now
1249 if (!haveOwnershipState) {
1250 // if we don't support pure ownership (dirty && !writable),
1251 // have to clear dirty bit here, assume memory snarfs data
1252 // on cache-to-cache xfer
1253 bits_to_clear |= BlkDirty;
1254 }
1255 blk->status &= ~bits_to_clear;
1256 }
1257
1258 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1259 pkt->cmdString(), blockAlign(pkt->getAddr()),
1260 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1261
1262 if (respond) {
1263 assert(!pkt->memInhibitAsserted());
1264 pkt->assertMemInhibit();
1265 if (have_exclusive) {
1266 pkt->setSupplyExclusive();
1267 }
1268 if (is_timing) {
1269 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1270 } else {
1271 pkt->makeAtomicResponse();
1272 pkt->setDataFromBlock(blk->data, blkSize);
1273 }
1274 } else if (is_timing && is_deferred) {
1275 // if it's a deferred timing snoop then we've made a copy of
1276 // the packet, and so if we're not using that copy to respond
1277 // then we need to delete it here.
1278 delete pkt;
1279 }
1280
1281 // Do this last in case it deallocates block data or something
1282 // like that
1283 if (invalidate) {
1284 tags->invalidateBlk(blk);
1285 }
1286 }
1287
1288
1289 template<class TagStore>
1290 void
1291 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1292 {
1293 // Note that some deferred snoops don't have requests, since the
1294 // original access may have already completed
1295 if ((pkt->req && pkt->req->isUncacheable()) ||
1296 pkt->cmd == MemCmd::Writeback) {
1297 //Can't get a hit on an uncacheable address
1298 //Revisit this for multi level coherence
1299 return;
1300 }
1301
1302 BlkType *blk = tags->findBlock(pkt->getAddr());
1303
1304 Addr blk_addr = blockAlign(pkt->getAddr());
1305 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1306
1307 // Let the MSHR itself track the snoop and decide whether we want
1308 // to go ahead and do the regular cache snoop
1309 if (mshr && mshr->handleSnoop(pkt, order++)) {
1310 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1311 blk_addr);
1312 if (mshr->getNumTargets() > numTarget)
1313 warn("allocating bonus target for snoop"); //handle later
1314 return;
1315 }
1316
1317 //We also need to check the writeback buffers and handle those
1318 std::vector<MSHR *> writebacks;
1319 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1320 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1321 pkt->getAddr());
1322
1323 //Look through writebacks for any non-uncachable writes, use that
1324 for (int i = 0; i < writebacks.size(); i++) {
1325 mshr = writebacks[i];
1326 assert(!mshr->isUncacheable());
1327 assert(mshr->getNumTargets() == 1);
1328 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1329 assert(wb_pkt->cmd == MemCmd::Writeback);
1330
1331 assert(!pkt->memInhibitAsserted());
1332 pkt->assertMemInhibit();
1333 if (!pkt->needsExclusive()) {
1334 pkt->assertShared();
1335 // the writeback is no longer the exclusive copy in the system
1336 wb_pkt->clearSupplyExclusive();
1337 } else {
1338 // if we're not asserting the shared line, we need to
1339 // invalidate our copy. we'll do that below as long as
1340 // the packet's invalidate flag is set...
1341 assert(pkt->isInvalidate());
1342 }
1343 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1344 false, false);
1345
1346 if (pkt->isInvalidate()) {
1347 // Invalidation trumps our writeback... discard here
1348 markInService(mshr);
1349 delete wb_pkt;
1350 }
1351
1352 // If this was a shared writeback, there may still be
1353 // other shared copies above that require invalidation.
1354 // We could be more selective and return here if the
1355 // request is non-exclusive or if the writeback is
1356 // exclusive.
1357 break;
1358 }
1359 }
1360
1361 handleSnoop(pkt, blk, true, false, false);
1362 }
1363
1364
1365 template<class TagStore>
1366 Tick
1367 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1368 {
1369 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1370 // Can't get a hit on an uncacheable address
1371 // Revisit this for multi level coherence
1372 return hitLatency;
1373 }
1374
1375 BlkType *blk = tags->findBlock(pkt->getAddr());
1376 handleSnoop(pkt, blk, false, false, false);
1377 return hitLatency;
1378 }
1379
1380
1381 template<class TagStore>
1382 MSHR *
1383 Cache<TagStore>::getNextMSHR()
1384 {
1385 // Check both MSHR queue and write buffer for potential requests
1386 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1387 MSHR *write_mshr = writeBuffer.getNextMSHR();
1388
1389 // Now figure out which one to send... some cases are easy
1390 if (miss_mshr && !write_mshr) {
1391 return miss_mshr;
1392 }
1393 if (write_mshr && !miss_mshr) {
1394 return write_mshr;
1395 }
1396
1397 if (miss_mshr && write_mshr) {
1398 // We have one of each... normally we favor the miss request
1399 // unless the write buffer is full
1400 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1401 // Write buffer is full, so we'd like to issue a write;
1402 // need to search MSHR queue for conflicting earlier miss.
1403 MSHR *conflict_mshr =
1404 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1405
1406 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1407 // Service misses in order until conflict is cleared.
1408 return conflict_mshr;
1409 }
1410
1411 // No conflicts; issue write
1412 return write_mshr;
1413 }
1414
1415 // Write buffer isn't full, but need to check it for
1416 // conflicting earlier writeback
1417 MSHR *conflict_mshr =
1418 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1419 if (conflict_mshr) {
1420 // not sure why we don't check order here... it was in the
1421 // original code but commented out.
1422
1423 // The only way this happens is if we are
1424 // doing a write and we didn't have permissions
1425 // then subsequently saw a writeback (owned got evicted)
1426 // We need to make sure to perform the writeback first
1427 // To preserve the dirty data, then we can issue the write
1428
1429 // should we return write_mshr here instead? I.e. do we
1430 // have to flush writes in order? I don't think so... not
1431 // for Alpha anyway. Maybe for x86?
1432 return conflict_mshr;
1433 }
1434
1435 // No conflicts; issue read
1436 return miss_mshr;
1437 }
1438
1439 // fall through... no pending requests. Try a prefetch.
1440 assert(!miss_mshr && !write_mshr);
1441 if (prefetcher && !mshrQueue.isFull()) {
1442 // If we have a miss queue slot, we can try a prefetch
1443 PacketPtr pkt = prefetcher->getPacket();
1444 if (pkt) {
1445 Addr pf_addr = blockAlign(pkt->getAddr());
1446 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
1447 !writeBuffer.findMatch(pf_addr)) {
1448 // Update statistic on number of prefetches issued
1449 // (hwpf_mshr_misses)
1450 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1451 // Don't request bus, since we already have it
1452 return allocateMissBuffer(pkt, curTick(), false);
1453 } else {
1454 // free the request and packet
1455 delete pkt->req;
1456 delete pkt;
1457 }
1458 }
1459 }
1460
1461 return NULL;
1462 }
1463
1464
1465 template<class TagStore>
1466 PacketPtr
1467 Cache<TagStore>::getTimingPacket()
1468 {
1469 MSHR *mshr = getNextMSHR();
1470
1471 if (mshr == NULL) {
1472 return NULL;
1473 }
1474
1475 // use request from 1st target
1476 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1477 PacketPtr pkt = NULL;
1478
1479 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1480 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1481 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1482 // in MSHR, so now that we are getting around to processing
1483 // it, just treat it as if we got a failure response
1484 pkt = new Packet(tgt_pkt);
1485 pkt->cmd = MemCmd::UpgradeFailResp;
1486 pkt->senderState = mshr;
1487 pkt->firstWordTime = pkt->finishTime = curTick();
1488 handleResponse(pkt);
1489 return NULL;
1490 } else if (mshr->isForwardNoResponse()) {
1491 // no response expected, just forward packet as it is
1492 assert(tags->findBlock(mshr->addr) == NULL);
1493 pkt = tgt_pkt;
1494 } else {
1495 BlkType *blk = tags->findBlock(mshr->addr);
1496
1497 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1498 // It might be possible for a writeback to arrive between
1499 // the time the prefetch is placed in the MSHRs and when
1500 // it's selected to send... if so, this assert will catch
1501 // that, and then we'll have to figure out what to do.
1502 assert(blk == NULL);
1503
1504 // We need to check the caches above us to verify that they don't have
1505 // a copy of this block in the dirty state at the moment. Without this
1506 // check we could get a stale copy from memory that might get used
1507 // in place of the dirty one.
1508 PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
1509 snoop_pkt->setExpressSnoop();
1510 snoop_pkt->senderState = mshr;
1511 cpuSidePort->sendTiming(snoop_pkt);
1512
1513 if (snoop_pkt->memInhibitAsserted()) {
1514 markInService(mshr, snoop_pkt);
1515 DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
1516 tgt_pkt->getAddr());
1517 delete snoop_pkt;
1518 return NULL;
1519 }
1520 delete snoop_pkt;
1521 }
1522
1523 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1524
1525 mshr->isForward = (pkt == NULL);
1526
1527 if (mshr->isForward) {
1528 // not a cache block request, but a response is expected
1529 // make copy of current packet to forward, keep current
1530 // copy for response handling
1531 pkt = new Packet(tgt_pkt);
1532 pkt->allocate();
1533 if (pkt->isWrite()) {
1534 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1535 }
1536 }
1537 }
1538
1539 assert(pkt != NULL);
1540 pkt->senderState = mshr;
1541 return pkt;
1542 }
1543
1544
1545 template<class TagStore>
1546 Tick
1547 Cache<TagStore>::nextMSHRReadyTime()
1548 {
1549 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1550 writeBuffer.nextMSHRReadyTime());
1551
1552 if (prefetcher) {
1553 nextReady = std::min(nextReady,
1554 prefetcher->nextPrefetchReadyTime());
1555 }
1556
1557 return nextReady;
1558 }
1559
1560
1561 ///////////////
1562 //
1563 // CpuSidePort
1564 //
1565 ///////////////
1566
1567 template<class TagStore>
1568 void
1569 Cache<TagStore>::CpuSidePort::
1570 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1571 {
1572 // CPU side port doesn't snoop; it's a target only. It can
1573 // potentially respond to any address.
1574 snoop = false;
1575 resp.push_back(myCache()->getAddrRange());
1576 }
1577
1578
1579 template<class TagStore>
1580 bool
1581 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1582 {
1583 // illegal to block responses... can lead to deadlock
1584 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1585 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1586 mustSendRetry = true;
1587 return false;
1588 }
1589
1590 myCache()->timingAccess(pkt);
1591 return true;
1592 }
1593
1594
1595 template<class TagStore>
1596 Tick
1597 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1598 {
1599 return myCache()->atomicAccess(pkt);
1600 }
1601
1602
1603 template<class TagStore>
1604 void
1605 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1606 {
1607 myCache()->functionalAccess(pkt, true);
1608 }
1609
1610
1611 template<class TagStore>
1612 Cache<TagStore>::
1613 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1614 const std::string &_label)
1615 : BaseCache::CachePort(_name, _cache, _label)
1616 {
1617 }
1618
1619 ///////////////
1620 //
1621 // MemSidePort
1622 //
1623 ///////////////
1624
1625 template<class TagStore>
1626 void
1627 Cache<TagStore>::MemSidePort::
1628 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1629 {
1630 // Memory-side port always snoops, but never passes requests
1631 // through to targets on the cpu side (so we don't add anything to
1632 // the address range list).
1633 snoop = true;
1634 }
1635
1636
1637 template<class TagStore>
1638 bool
1639 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1640 {
1641 // this needs to be fixed so that the cache updates the mshr and sends the
1642 // packet back out on the link, but it probably won't happen so until this
1643 // gets fixed, just panic when it does
1644 if (pkt->wasNacked())
1645 panic("Need to implement cache resending nacked packets!\n");
1646
1647 if (pkt->isRequest() && blocked) {
1648 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1649 mustSendRetry = true;
1650 return false;
1651 }
1652
1653 if (pkt->isResponse()) {
1654 myCache()->handleResponse(pkt);
1655 } else {
1656 myCache()->snoopTiming(pkt);
1657 }
1658 return true;
1659 }
1660
1661
1662 template<class TagStore>
1663 Tick
1664 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1665 {
1666 // in atomic mode, responses go back to the sender via the
1667 // function return from sendAtomic(), not via a separate
1668 // sendAtomic() from the responder. Thus we should never see a
1669 // response packet in recvAtomic() (anywhere, not just here).
1670 assert(!pkt->isResponse());
1671 return myCache()->snoopAtomic(pkt);
1672 }
1673
1674
1675 template<class TagStore>
1676 void
1677 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1678 {
1679 myCache()->functionalAccess(pkt, false);
1680 }
1681
1682
1683
1684 template<class TagStore>
1685 void
1686 Cache<TagStore>::MemSidePort::sendPacket()
1687 {
1688 // if we have responses that are ready, they take precedence
1689 if (deferredPacketReady()) {
1690 bool success = sendTiming(transmitList.front().pkt);
1691
1692 if (success) {
1693 //send successful, remove packet
1694 transmitList.pop_front();
1695 }
1696
1697 waitingOnRetry = !success;
1698 } else {
1699 // check for non-response packets (requests & writebacks)
1700 PacketPtr pkt = myCache()->getTimingPacket();
1701 if (pkt == NULL) {
1702 // can happen if e.g. we attempt a writeback and fail, but
1703 // before the retry, the writeback is eliminated because
1704 // we snoop another cache's ReadEx.
1705 waitingOnRetry = false;
1706 } else {
1707 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1708
1709 bool success = sendTiming(pkt);
1710
1711 waitingOnRetry = !success;
1712 if (waitingOnRetry) {
1713 DPRINTF(CachePort, "now waiting on a retry\n");
1714 if (!mshr->isForwardNoResponse()) {
1715 delete pkt;
1716 }
1717 } else {
1718 myCache()->markInService(mshr, pkt);
1719 }
1720 }
1721 }
1722
1723
1724 // tried to send packet... if it was successful (no retry), see if
1725 // we need to rerequest bus or not
1726 if (!waitingOnRetry) {
1727 Tick nextReady = std::min(deferredPacketReadyTime(),
1728 myCache()->nextMSHRReadyTime());
1729 // @TODO: need to facotr in prefetch requests here somehow
1730 if (nextReady != MaxTick) {
1731 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1732 schedule(sendEvent, std::max(nextReady, curTick() + 1));
1733 } else {
1734 // no more to send right now: if we're draining, we may be done
1735 if (drainEvent && !sendEvent->scheduled()) {
1736 drainEvent->process();
1737 drainEvent = NULL;
1738 }
1739 }
1740 }
1741 }
1742
1743 template<class TagStore>
1744 void
1745 Cache<TagStore>::MemSidePort::recvRetry()
1746 {
1747 assert(waitingOnRetry);
1748 sendPacket();
1749 }
1750
1751
1752 template<class TagStore>
1753 void
1754 Cache<TagStore>::MemSidePort::processSendEvent()
1755 {
1756 assert(!waitingOnRetry);
1757 sendPacket();
1758 }
1759
1760
1761 template<class TagStore>
1762 Cache<TagStore>::
1763 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1764 const std::string &_label)
1765 : BaseCache::CachePort(_name, _cache, _label)
1766 {
1767 // override default send event from SimpleTimingPort
1768 delete sendEvent;
1769 sendEvent = new SendEvent(this);
1770 }