mem: fix m5.fast compile bug in previous cset
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 */
47
48 /**
49 * @file
50 * Cache definitions.
51 */
52
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "mem/cache/blk.hh"
58 #include "mem/cache/cache.hh"
59 #include "mem/cache/mshr.hh"
60 #include "mem/cache/prefetch/base.hh"
61 #include "sim/sim_exit.hh"
62
63 template<class TagStore>
64 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
65 : BaseCache(p),
66 tags(tags),
67 prefetcher(pf),
68 doFastWrites(true),
69 prefetchOnAccess(p->prefetch_on_access)
70 {
71 tempBlock = new BlkType();
72 tempBlock->data = new uint8_t[blkSize];
73
74 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
75 "CpuSidePort");
76 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
77 "MemSidePort");
78 cpuSidePort->setOtherPort(memSidePort);
79 memSidePort->setOtherPort(cpuSidePort);
80
81 tags->setCache(this);
82 if (prefetcher)
83 prefetcher->setCache(this);
84 }
85
86 template<class TagStore>
87 void
88 Cache<TagStore>::regStats()
89 {
90 BaseCache::regStats();
91 tags->regStats(name());
92 if (prefetcher)
93 prefetcher->regStats(name());
94 }
95
96 template<class TagStore>
97 Port *
98 Cache<TagStore>::getPort(const std::string &if_name, int idx)
99 {
100 if (if_name == "" || if_name == "cpu_side") {
101 return cpuSidePort;
102 } else if (if_name == "mem_side") {
103 return memSidePort;
104 } else if (if_name == "functional") {
105 CpuSidePort *funcPort =
106 new CpuSidePort(name() + "-cpu_side_funcport", this,
107 "CpuSideFuncPort");
108 funcPort->setOtherPort(memSidePort);
109 return funcPort;
110 } else {
111 panic("Port name %s unrecognized\n", if_name);
112 }
113 }
114
115 template<class TagStore>
116 void
117 Cache<TagStore>::deletePortRefs(Port *p)
118 {
119 if (cpuSidePort == p || memSidePort == p)
120 panic("Can only delete functional ports\n");
121
122 delete p;
123 }
124
125
126 template<class TagStore>
127 void
128 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
129 {
130 uint64_t overwrite_val;
131 bool overwrite_mem;
132 uint64_t condition_val64;
133 uint32_t condition_val32;
134
135 int offset = tags->extractBlkOffset(pkt->getAddr());
136 uint8_t *blk_data = blk->data + offset;
137
138 assert(sizeof(uint64_t) >= pkt->getSize());
139
140 overwrite_mem = true;
141 // keep a copy of our possible write value, and copy what is at the
142 // memory address into the packet
143 pkt->writeData((uint8_t *)&overwrite_val);
144 pkt->setData(blk_data);
145
146 if (pkt->req->isCondSwap()) {
147 if (pkt->getSize() == sizeof(uint64_t)) {
148 condition_val64 = pkt->req->getExtraData();
149 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
150 sizeof(uint64_t));
151 } else if (pkt->getSize() == sizeof(uint32_t)) {
152 condition_val32 = (uint32_t)pkt->req->getExtraData();
153 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
154 sizeof(uint32_t));
155 } else
156 panic("Invalid size for conditional read/write\n");
157 }
158
159 if (overwrite_mem) {
160 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
161 blk->status |= BlkDirty;
162 }
163 }
164
165
166 template<class TagStore>
167 void
168 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
169 {
170 assert(blk);
171 // Occasionally this is not true... if we are a lower-level cache
172 // satisfying a string of Read and ReadEx requests from
173 // upper-level caches, a Read will mark the block as shared but we
174 // can satisfy a following ReadEx anyway since we can rely on the
175 // Read requester(s) to have buffered the ReadEx snoop and to
176 // invalidate their blocks after receiving them.
177 // assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
178 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
179
180 // Check RMW operations first since both isRead() and
181 // isWrite() will be true for them
182 if (pkt->cmd == MemCmd::SwapReq) {
183 cmpAndSwap(blk, pkt);
184 } else if (pkt->isWrite()) {
185 if (blk->checkWrite(pkt)) {
186 pkt->writeDataToBlock(blk->data, blkSize);
187 blk->status |= BlkDirty;
188 }
189 } else if (pkt->isRead()) {
190 if (pkt->isLLSC()) {
191 blk->trackLoadLocked(pkt);
192 }
193 pkt->setDataFromBlock(blk->data, blkSize);
194 if (pkt->getSize() == blkSize) {
195 // special handling for coherent block requests from
196 // upper-level caches
197 if (pkt->needsExclusive()) {
198 // on ReadExReq we give up our copy
199 tags->invalidateBlk(blk);
200 } else {
201 // on ReadReq we create shareable copies here and in
202 // the requester
203 pkt->assertShared();
204 blk->status &= ~BlkWritable;
205 }
206 }
207 } else {
208 // Not a read or write... must be an upgrade. it's OK
209 // to just ack those as long as we have an exclusive
210 // copy at this level.
211 assert(pkt->isUpgrade());
212 tags->invalidateBlk(blk);
213 }
214 }
215
216
217 /////////////////////////////////////////////////////
218 //
219 // MSHR helper functions
220 //
221 /////////////////////////////////////////////////////
222
223
224 template<class TagStore>
225 void
226 Cache<TagStore>::markInService(MSHR *mshr)
227 {
228 markInServiceInternal(mshr);
229 #if 0
230 if (mshr->originalCmd == MemCmd::HardPFReq) {
231 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
232 name());
233 //Also clear pending if need be
234 if (!prefetcher->havePending())
235 {
236 deassertMemSideBusRequest(Request_PF);
237 }
238 }
239 #endif
240 }
241
242
243 template<class TagStore>
244 void
245 Cache<TagStore>::squash(int threadNum)
246 {
247 bool unblock = false;
248 BlockedCause cause = NUM_BLOCKED_CAUSES;
249
250 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
251 noTargetMSHR = NULL;
252 unblock = true;
253 cause = Blocked_NoTargets;
254 }
255 if (mshrQueue.isFull()) {
256 unblock = true;
257 cause = Blocked_NoMSHRs;
258 }
259 mshrQueue.squash(threadNum);
260 if (unblock && !mshrQueue.isFull()) {
261 clearBlocked(cause);
262 }
263 }
264
265 /////////////////////////////////////////////////////
266 //
267 // Access path: requests coming in from the CPU side
268 //
269 /////////////////////////////////////////////////////
270
271 template<class TagStore>
272 bool
273 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
274 int &lat, PacketList &writebacks)
275 {
276 if (pkt->req->isUncacheable()) {
277 if (pkt->req->isClrex()) {
278 tags->clearLocks();
279 } else {
280 blk = tags->findBlock(pkt->getAddr());
281 if (blk != NULL) {
282 tags->invalidateBlk(blk);
283 }
284 }
285
286 blk = NULL;
287 lat = hitLatency;
288 return false;
289 }
290
291 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
292 blk = tags->accessBlock(pkt->getAddr(), lat, id);
293
294 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
295 pkt->req->isInstFetch() ? " (ifetch)" : "",
296 pkt->getAddr(), (blk) ? "hit" : "miss");
297
298 if (blk != NULL) {
299
300 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
301 // OK to satisfy access
302 incHitCount(pkt, id);
303 satisfyCpuSideRequest(pkt, blk);
304 return true;
305 }
306 }
307
308 // Can't satisfy access normally... either no block (blk == NULL)
309 // or have block but need exclusive & only have shared.
310
311 // Writeback handling is special case. We can write the block
312 // into the cache without having a writeable copy (or any copy at
313 // all).
314 if (pkt->cmd == MemCmd::Writeback) {
315 assert(blkSize == pkt->getSize());
316 if (blk == NULL) {
317 // need to do a replacement
318 blk = allocateBlock(pkt->getAddr(), writebacks);
319 if (blk == NULL) {
320 // no replaceable block available, give up.
321 // writeback will be forwarded to next level.
322 incMissCount(pkt, id);
323 return false;
324 }
325 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
326 tags->insertBlock(pkt->getAddr(), blk, id);
327 blk->status = BlkValid | BlkReadable;
328 }
329 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
330 blk->status |= BlkDirty;
331 // nothing else to do; writeback doesn't expect response
332 assert(!pkt->needsResponse());
333 incHitCount(pkt, id);
334 return true;
335 }
336
337 incMissCount(pkt, id);
338
339 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
340 // complete miss on store conditional... just give up now
341 pkt->req->setExtraData(0);
342 return true;
343 }
344
345 return false;
346 }
347
348
349 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
350 {
351 Packet::SenderState *prevSenderState;
352 int prevSrc;
353 #ifndef NDEBUG
354 BaseCache *cache;
355 #endif
356 public:
357 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
358 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
359 #ifndef NDEBUG
360 , cache(_cache)
361 #endif
362 {}
363 void restore(Packet *pkt, BaseCache *_cache)
364 {
365 assert(_cache == cache);
366 pkt->senderState = prevSenderState;
367 pkt->setDest(prevSrc);
368 }
369 };
370
371
372 template<class TagStore>
373 bool
374 Cache<TagStore>::timingAccess(PacketPtr pkt)
375 {
376 //@todo Add back in MemDebug Calls
377 // MemDebug::cacheAccess(pkt);
378
379 // we charge hitLatency for doing just about anything here
380 Tick time = curTick + hitLatency;
381
382 if (pkt->isResponse()) {
383 // must be cache-to-cache response from upper to lower level
384 ForwardResponseRecord *rec =
385 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
386 assert(rec != NULL);
387 rec->restore(pkt, this);
388 delete rec;
389 memSidePort->respond(pkt, time);
390 return true;
391 }
392
393 assert(pkt->isRequest());
394
395 if (pkt->memInhibitAsserted()) {
396 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
397 pkt->getAddr());
398 assert(!pkt->req->isUncacheable());
399 // Special tweak for multilevel coherence: snoop downward here
400 // on invalidates since there may be other caches below here
401 // that have shared copies. Not necessary if we know that
402 // supplier had exclusive copy to begin with.
403 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
404 Packet *snoopPkt = new Packet(pkt, true); // clear flags
405 snoopPkt->setExpressSnoop();
406 snoopPkt->assertMemInhibit();
407 memSidePort->sendTiming(snoopPkt);
408 // main memory will delete snoopPkt
409 }
410 // since we're the official target but we aren't responding,
411 // delete the packet now.
412 delete pkt;
413 return true;
414 }
415
416 if (pkt->req->isUncacheable()) {
417 if (pkt->req->isClrex()) {
418 tags->clearLocks();
419 } else {
420 BlkType *blk = tags->findBlock(pkt->getAddr());
421 if (blk != NULL) {
422 tags->invalidateBlk(blk);
423 }
424 }
425
426 // writes go in write buffer, reads use MSHR
427 if (pkt->isWrite() && !pkt->isRead()) {
428 allocateWriteBuffer(pkt, time, true);
429 } else {
430 allocateUncachedReadBuffer(pkt, time, true);
431 }
432 assert(pkt->needsResponse()); // else we should delete it here??
433 return true;
434 }
435
436 int lat = hitLatency;
437 BlkType *blk = NULL;
438 PacketList writebacks;
439
440 bool satisfied = access(pkt, blk, lat, writebacks);
441
442 #if 0
443 /** @todo make the fast write alloc (wh64) work with coherence. */
444
445 // If this is a block size write/hint (WH64) allocate the block here
446 // if the coherence protocol allows it.
447 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
448 (pkt->cmd == MemCmd::WriteReq
449 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
450 // not outstanding misses, can do this
451 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
452 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
453 if (outstanding_miss) {
454 warn("WriteInv doing a fastallocate"
455 "with an outstanding miss to the same address\n");
456 }
457 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
458 writebacks);
459 ++fastWrites;
460 }
461 }
462 #endif
463
464 // track time of availability of next prefetch, if any
465 Tick next_pf_time = 0;
466
467 bool needsResponse = pkt->needsResponse();
468
469 if (satisfied) {
470 if (needsResponse) {
471 pkt->makeTimingResponse();
472 cpuSidePort->respond(pkt, curTick+lat);
473 } else {
474 delete pkt;
475 }
476
477 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
478 if (blk)
479 blk->status &= ~BlkHWPrefetched;
480 next_pf_time = prefetcher->notify(pkt, time);
481 }
482 } else {
483 // miss
484
485 Addr blk_addr = blockAlign(pkt->getAddr());
486 MSHR *mshr = mshrQueue.findMatch(blk_addr);
487
488 if (mshr) {
489 // MSHR hit
490 //@todo remove hw_pf here
491 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
492 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
493 mshr->threadNum = -1;
494 }
495 mshr->allocateTarget(pkt, time, order++);
496 if (mshr->getNumTargets() == numTarget) {
497 noTargetMSHR = mshr;
498 setBlocked(Blocked_NoTargets);
499 // need to be careful with this... if this mshr isn't
500 // ready yet (i.e. time > curTick_, we don't want to
501 // move it ahead of mshrs that are ready
502 // mshrQueue.moveToFront(mshr);
503 }
504 } else {
505 // no MSHR
506 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
507 // always mark as cache fill for now... if we implement
508 // no-write-allocate or bypass accesses this will have to
509 // be changed.
510 if (pkt->cmd == MemCmd::Writeback) {
511 allocateWriteBuffer(pkt, time, true);
512 } else {
513 if (blk && blk->isValid()) {
514 // If we have a write miss to a valid block, we
515 // need to mark the block non-readable. Otherwise
516 // if we allow reads while there's an outstanding
517 // write miss, the read could return stale data
518 // out of the cache block... a more aggressive
519 // system could detect the overlap (if any) and
520 // forward data out of the MSHRs, but we don't do
521 // that yet. Note that we do need to leave the
522 // block valid so that it stays in the cache, in
523 // case we get an upgrade response (and hence no
524 // new data) when the write miss completes.
525 // As long as CPUs do proper store/load forwarding
526 // internally, and have a sufficiently weak memory
527 // model, this is probably unnecessary, but at some
528 // point it must have seemed like we needed it...
529 assert(pkt->needsExclusive() && !blk->isWritable());
530 blk->status &= ~BlkReadable;
531 }
532
533 allocateMissBuffer(pkt, time, true);
534 }
535
536 if (prefetcher) {
537 next_pf_time = prefetcher->notify(pkt, time);
538 }
539 }
540 }
541
542 if (next_pf_time != 0)
543 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
544
545 // copy writebacks to write buffer
546 while (!writebacks.empty()) {
547 PacketPtr wbPkt = writebacks.front();
548 allocateWriteBuffer(wbPkt, time, true);
549 writebacks.pop_front();
550 }
551
552 return true;
553 }
554
555
556 // See comment in cache.hh.
557 template<class TagStore>
558 PacketPtr
559 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
560 bool needsExclusive)
561 {
562 bool blkValid = blk && blk->isValid();
563
564 if (cpu_pkt->req->isUncacheable()) {
565 //assert(blk == NULL);
566 return NULL;
567 }
568
569 if (!blkValid &&
570 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
571 // Writebacks that weren't allocated in access() and upgrades
572 // from upper-level caches that missed completely just go
573 // through.
574 return NULL;
575 }
576
577 assert(cpu_pkt->needsResponse());
578
579 MemCmd cmd;
580 // @TODO make useUpgrades a parameter.
581 // Note that ownership protocols require upgrade, otherwise a
582 // write miss on a shared owned block will generate a ReadExcl,
583 // which will clobber the owned copy.
584 const bool useUpgrades = true;
585 if (blkValid && useUpgrades) {
586 // only reason to be here is that blk is shared
587 // (read-only) and we need exclusive
588 assert(needsExclusive && !blk->isWritable());
589 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
590 } else {
591 // block is invalid
592 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
593 }
594 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
595
596 pkt->allocate();
597 return pkt;
598 }
599
600
601 template<class TagStore>
602 Tick
603 Cache<TagStore>::atomicAccess(PacketPtr pkt)
604 {
605 int lat = hitLatency;
606
607 // @TODO: make this a parameter
608 bool last_level_cache = false;
609
610 if (pkt->memInhibitAsserted()) {
611 assert(!pkt->req->isUncacheable());
612 // have to invalidate ourselves and any lower caches even if
613 // upper cache will be responding
614 if (pkt->isInvalidate()) {
615 BlkType *blk = tags->findBlock(pkt->getAddr());
616 if (blk && blk->isValid()) {
617 tags->invalidateBlk(blk);
618 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
619 pkt->cmdString(), pkt->getAddr());
620 }
621 if (!last_level_cache) {
622 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
623 pkt->cmdString(), pkt->getAddr());
624 lat += memSidePort->sendAtomic(pkt);
625 }
626 } else {
627 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
628 pkt->cmdString(), pkt->getAddr());
629 }
630
631 return lat;
632 }
633
634 // should assert here that there are no outstanding MSHRs or
635 // writebacks... that would mean that someone used an atomic
636 // access in timing mode
637
638 BlkType *blk = NULL;
639 PacketList writebacks;
640
641 if (!access(pkt, blk, lat, writebacks)) {
642 // MISS
643 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
644
645 bool is_forward = (bus_pkt == NULL);
646
647 if (is_forward) {
648 // just forwarding the same request to the next level
649 // no local cache operation involved
650 bus_pkt = pkt;
651 }
652
653 DPRINTF(Cache, "Sending an atomic %s for %x\n",
654 bus_pkt->cmdString(), bus_pkt->getAddr());
655
656 #if TRACING_ON
657 CacheBlk::State old_state = blk ? blk->status : 0;
658 #endif
659
660 lat += memSidePort->sendAtomic(bus_pkt);
661
662 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
663 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
664
665 assert(!bus_pkt->wasNacked());
666
667 // If packet was a forward, the response (if any) is already
668 // in place in the bus_pkt == pkt structure, so we don't need
669 // to do anything. Otherwise, use the separate bus_pkt to
670 // generate response to pkt and then delete it.
671 if (!is_forward) {
672 if (pkt->needsResponse()) {
673 assert(bus_pkt->isResponse());
674 if (bus_pkt->isError()) {
675 pkt->makeAtomicResponse();
676 pkt->copyError(bus_pkt);
677 } else if (bus_pkt->isRead() ||
678 bus_pkt->cmd == MemCmd::UpgradeResp) {
679 // we're updating cache state to allow us to
680 // satisfy the upstream request from the cache
681 blk = handleFill(bus_pkt, blk, writebacks);
682 satisfyCpuSideRequest(pkt, blk);
683 } else {
684 // we're satisfying the upstream request without
685 // modifying cache state, e.g., a write-through
686 pkt->makeAtomicResponse();
687 }
688 }
689 delete bus_pkt;
690 }
691 }
692
693 // Note that we don't invoke the prefetcher at all in atomic mode.
694 // It's not clear how to do it properly, particularly for
695 // prefetchers that aggressively generate prefetch candidates and
696 // rely on bandwidth contention to throttle them; these will tend
697 // to pollute the cache in atomic mode since there is no bandwidth
698 // contention. If we ever do want to enable prefetching in atomic
699 // mode, though, this is the place to do it... see timingAccess()
700 // for an example (though we'd want to issue the prefetch(es)
701 // immediately rather than calling requestMemSideBus() as we do
702 // there).
703
704 // Handle writebacks if needed
705 while (!writebacks.empty()){
706 PacketPtr wbPkt = writebacks.front();
707 memSidePort->sendAtomic(wbPkt);
708 writebacks.pop_front();
709 delete wbPkt;
710 }
711
712 // We now have the block one way or another (hit or completed miss)
713
714 if (pkt->needsResponse()) {
715 pkt->makeAtomicResponse();
716 }
717
718 return lat;
719 }
720
721
722 template<class TagStore>
723 void
724 Cache<TagStore>::functionalAccess(PacketPtr pkt,
725 CachePort *incomingPort,
726 CachePort *otherSidePort)
727 {
728 Addr blk_addr = blockAlign(pkt->getAddr());
729 BlkType *blk = tags->findBlock(pkt->getAddr());
730
731 pkt->pushLabel(name());
732
733 CacheBlkPrintWrapper cbpw(blk);
734 bool done =
735 (blk && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data))
736 || incomingPort->checkFunctional(pkt)
737 || mshrQueue.checkFunctional(pkt, blk_addr)
738 || writeBuffer.checkFunctional(pkt, blk_addr)
739 || otherSidePort->checkFunctional(pkt);
740
741 // We're leaving the cache, so pop cache->name() label
742 pkt->popLabel();
743
744 if (!done) {
745 otherSidePort->sendFunctional(pkt);
746 }
747 }
748
749
750 /////////////////////////////////////////////////////
751 //
752 // Response handling: responses from the memory side
753 //
754 /////////////////////////////////////////////////////
755
756
757 template<class TagStore>
758 void
759 Cache<TagStore>::handleResponse(PacketPtr pkt)
760 {
761 Tick time = curTick + hitLatency;
762 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
763 bool is_error = pkt->isError();
764
765 assert(mshr);
766
767 if (pkt->wasNacked()) {
768 //pkt->reinitFromRequest();
769 warn("NACKs from devices not connected to the same bus "
770 "not implemented\n");
771 return;
772 }
773 if (is_error) {
774 DPRINTF(Cache, "Cache received packet with error for address %x, "
775 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
776 }
777
778 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
779
780 MSHRQueue *mq = mshr->queue;
781 bool wasFull = mq->isFull();
782
783 if (mshr == noTargetMSHR) {
784 // we always clear at least one target
785 clearBlocked(Blocked_NoTargets);
786 noTargetMSHR = NULL;
787 }
788
789 // Initial target is used just for stats
790 MSHR::Target *initial_tgt = mshr->getTarget();
791 BlkType *blk = tags->findBlock(pkt->getAddr());
792 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
793 Tick miss_latency = curTick - initial_tgt->recvTime;
794 PacketList writebacks;
795
796 if (pkt->req->isUncacheable()) {
797 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
798 miss_latency;
799 } else {
800 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
801 miss_latency;
802 }
803
804 bool is_fill = !mshr->isForward &&
805 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
806
807 if (is_fill && !is_error) {
808 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
809 pkt->getAddr());
810
811 // give mshr a chance to do some dirty work
812 mshr->handleFill(pkt, blk);
813
814 blk = handleFill(pkt, blk, writebacks);
815 assert(blk != NULL);
816 }
817
818 // First offset for critical word first calculations
819 int initial_offset = 0;
820
821 if (mshr->hasTargets()) {
822 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
823 }
824
825 while (mshr->hasTargets()) {
826 MSHR::Target *target = mshr->getTarget();
827
828 switch (target->source) {
829 case MSHR::Target::FromCPU:
830 Tick completion_time;
831 if (is_fill) {
832 satisfyCpuSideRequest(target->pkt, blk);
833 // How many bytes past the first request is this one
834 int transfer_offset =
835 target->pkt->getOffset(blkSize) - initial_offset;
836 if (transfer_offset < 0) {
837 transfer_offset += blkSize;
838 }
839
840 // If critical word (no offset) return first word time
841 completion_time = tags->getHitLatency() +
842 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
843
844 assert(!target->pkt->req->isUncacheable());
845 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
846 completion_time - target->recvTime;
847 } else if (target->pkt->cmd == MemCmd::StoreCondReq &&
848 pkt->cmd == MemCmd::UpgradeFailResp) {
849 // failed StoreCond upgrade
850 completion_time = tags->getHitLatency() + pkt->finishTime;
851 target->pkt->req->setExtraData(0);
852 } else {
853 // not a cache fill, just forwarding response
854 completion_time = tags->getHitLatency() + pkt->finishTime;
855 if (pkt->isRead() && !is_error) {
856 target->pkt->setData(pkt->getPtr<uint8_t>());
857 }
858 }
859 target->pkt->makeTimingResponse();
860 // if this packet is an error copy that to the new packet
861 if (is_error)
862 target->pkt->copyError(pkt);
863 if (pkt->isInvalidate()) {
864 // If intermediate cache got ReadRespWithInvalidate,
865 // propagate that. Response should not have
866 // isInvalidate() set otherwise.
867 assert(target->pkt->cmd == MemCmd::ReadResp);
868 assert(pkt->cmd == MemCmd::ReadRespWithInvalidate);
869 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
870 }
871 cpuSidePort->respond(target->pkt, completion_time);
872 break;
873
874 case MSHR::Target::FromPrefetcher:
875 assert(target->pkt->cmd == MemCmd::HardPFReq);
876 if (blk)
877 blk->status |= BlkHWPrefetched;
878 delete target->pkt->req;
879 delete target->pkt;
880 break;
881
882 case MSHR::Target::FromSnoop:
883 // I don't believe that a snoop can be in an error state
884 assert(!is_error);
885 // response to snoop request
886 DPRINTF(Cache, "processing deferred snoop...\n");
887 handleSnoop(target->pkt, blk, true, true,
888 mshr->pendingInvalidate || pkt->isInvalidate());
889 break;
890
891 default:
892 panic("Illegal target->source enum %d\n", target->source);
893 }
894
895 mshr->popTarget();
896 }
897
898 if (pkt->isInvalidate()) {
899 tags->invalidateBlk(blk);
900 }
901
902 if (mshr->promoteDeferredTargets()) {
903 // avoid later read getting stale data while write miss is
904 // outstanding.. see comment in timingAccess()
905 blk->status &= ~BlkReadable;
906 MSHRQueue *mq = mshr->queue;
907 mq->markPending(mshr);
908 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
909 } else {
910 mq->deallocate(mshr);
911 if (wasFull && !mq->isFull()) {
912 clearBlocked((BlockedCause)mq->index);
913 }
914 }
915
916 // copy writebacks to write buffer
917 while (!writebacks.empty()) {
918 PacketPtr wbPkt = writebacks.front();
919 allocateWriteBuffer(wbPkt, time, true);
920 writebacks.pop_front();
921 }
922 // if we used temp block, clear it out
923 if (blk == tempBlock) {
924 if (blk->isDirty()) {
925 allocateWriteBuffer(writebackBlk(blk), time, true);
926 }
927 tags->invalidateBlk(blk);
928 }
929
930 delete pkt;
931 }
932
933
934
935
936 template<class TagStore>
937 PacketPtr
938 Cache<TagStore>::writebackBlk(BlkType *blk)
939 {
940 assert(blk && blk->isValid() && blk->isDirty());
941
942 writebacks[0/*pkt->req->threadId()*/]++;
943
944 Request *writebackReq =
945 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
946 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
947 writeback->allocate();
948 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
949
950 blk->status &= ~BlkDirty;
951 return writeback;
952 }
953
954
955 template<class TagStore>
956 typename Cache<TagStore>::BlkType*
957 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
958 {
959 BlkType *blk = tags->findVictim(addr, writebacks);
960
961 if (blk->isValid()) {
962 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
963 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
964 if (repl_mshr) {
965 // must be an outstanding upgrade request on block
966 // we're about to replace...
967 assert(!blk->isWritable());
968 assert(repl_mshr->needsExclusive());
969 // too hard to replace block with transient state
970 // allocation failed, block not inserted
971 return NULL;
972 } else {
973 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
974 repl_addr, addr,
975 blk->isDirty() ? "writeback" : "clean");
976
977 if (blk->isDirty()) {
978 // Save writeback packet for handling by caller
979 writebacks.push_back(writebackBlk(blk));
980 }
981 }
982 }
983
984 return blk;
985 }
986
987
988 // Note that the reason we return a list of writebacks rather than
989 // inserting them directly in the write buffer is that this function
990 // is called by both atomic and timing-mode accesses, and in atomic
991 // mode we don't mess with the write buffer (we just perform the
992 // writebacks atomically once the original request is complete).
993 template<class TagStore>
994 typename Cache<TagStore>::BlkType*
995 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
996 PacketList &writebacks)
997 {
998 Addr addr = pkt->getAddr();
999 #if TRACING_ON
1000 CacheBlk::State old_state = blk ? blk->status : 0;
1001 #endif
1002
1003 if (blk == NULL) {
1004 // better have read new data...
1005 assert(pkt->hasData());
1006 // need to do a replacement
1007 blk = allocateBlock(addr, writebacks);
1008 if (blk == NULL) {
1009 // No replaceable block... just use temporary storage to
1010 // complete the current request and then get rid of it
1011 assert(!tempBlock->isValid());
1012 blk = tempBlock;
1013 tempBlock->set = tags->extractSet(addr);
1014 tempBlock->tag = tags->extractTag(addr);
1015 DPRINTF(Cache, "using temp block for %x\n", addr);
1016 } else {
1017 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
1018 tags->insertBlock(pkt->getAddr(), blk, id);
1019 }
1020 } else {
1021 // existing block... probably an upgrade
1022 assert(blk->tag == tags->extractTag(addr));
1023 // either we're getting new data or the block should already be valid
1024 assert(pkt->hasData() || blk->isValid());
1025 }
1026
1027 blk->status = BlkValid | BlkReadable;
1028
1029 if (!pkt->sharedAsserted()) {
1030 blk->status |= BlkWritable;
1031 // If we got this via cache-to-cache transfer (i.e., from a
1032 // cache that was an owner) and took away that owner's copy,
1033 // then we need to write it back. Normally this happens
1034 // anyway as a side effect of getting a copy to write it, but
1035 // there are cases (such as failed store conditionals or
1036 // compare-and-swaps) where we'll demand an exclusive copy but
1037 // end up not writing it.
1038 if (pkt->memInhibitAsserted())
1039 blk->status |= BlkDirty;
1040 }
1041
1042 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1043 addr, old_state, blk->status);
1044
1045 // if we got new data, copy it in
1046 if (pkt->isRead()) {
1047 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1048 }
1049
1050 blk->whenReady = pkt->finishTime;
1051
1052 return blk;
1053 }
1054
1055
1056 /////////////////////////////////////////////////////
1057 //
1058 // Snoop path: requests coming in from the memory side
1059 //
1060 /////////////////////////////////////////////////////
1061
1062 template<class TagStore>
1063 void
1064 Cache<TagStore>::
1065 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1066 bool already_copied, bool pending_inval)
1067 {
1068 // timing-mode snoop responses require a new packet, unless we
1069 // already made a copy...
1070 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1071 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1072 pkt->allocate();
1073 pkt->makeTimingResponse();
1074 if (pkt->isRead()) {
1075 pkt->setDataFromBlock(blk_data, blkSize);
1076 }
1077 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1078 // Assume we defer a response to a read from a far-away cache
1079 // A, then later defer a ReadExcl from a cache B on the same
1080 // bus as us. We'll assert MemInhibit in both cases, but in
1081 // the latter case MemInhibit will keep the invalidation from
1082 // reaching cache A. This special response tells cache A that
1083 // it gets the block to satisfy its read, but must immediately
1084 // invalidate it.
1085 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1086 }
1087 memSidePort->respond(pkt, curTick + hitLatency);
1088 }
1089
1090 template<class TagStore>
1091 void
1092 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1093 bool is_timing, bool is_deferred,
1094 bool pending_inval)
1095 {
1096 // deferred snoops can only happen in timing mode
1097 assert(!(is_deferred && !is_timing));
1098 // pending_inval only makes sense on deferred snoops
1099 assert(!(pending_inval && !is_deferred));
1100 assert(pkt->isRequest());
1101
1102 // the packet may get modified if we or a forwarded snooper
1103 // responds in atomic mode, so remember a few things about the
1104 // original packet up front
1105 bool invalidate = pkt->isInvalidate();
1106 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1107
1108 if (forwardSnoops) {
1109 // first propagate snoop upward to see if anyone above us wants to
1110 // handle it. save & restore packet src since it will get
1111 // rewritten to be relative to cpu-side bus (if any)
1112 bool alreadyResponded = pkt->memInhibitAsserted();
1113 if (is_timing) {
1114 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1115 snoopPkt->setExpressSnoop();
1116 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1117 cpuSidePort->sendTiming(snoopPkt);
1118 if (snoopPkt->memInhibitAsserted()) {
1119 // cache-to-cache response from some upper cache
1120 assert(!alreadyResponded);
1121 pkt->assertMemInhibit();
1122 } else {
1123 delete snoopPkt->senderState;
1124 }
1125 if (snoopPkt->sharedAsserted()) {
1126 pkt->assertShared();
1127 }
1128 delete snoopPkt;
1129 } else {
1130 int origSrc = pkt->getSrc();
1131 cpuSidePort->sendAtomic(pkt);
1132 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1133 // cache-to-cache response from some upper cache:
1134 // forward response to original requester
1135 assert(pkt->isResponse());
1136 }
1137 pkt->setSrc(origSrc);
1138 }
1139 }
1140
1141 if (!blk || !blk->isValid()) {
1142 return;
1143 }
1144
1145 // we may end up modifying both the block state and the packet (if
1146 // we respond in atomic mode), so just figure out what to do now
1147 // and then do it later
1148 bool respond = blk->isDirty() && pkt->needsResponse();
1149 bool have_exclusive = blk->isWritable();
1150
1151 if (pkt->isRead() && !invalidate) {
1152 assert(!needs_exclusive);
1153 pkt->assertShared();
1154 int bits_to_clear = BlkWritable;
1155 const bool haveOwnershipState = true; // for now
1156 if (!haveOwnershipState) {
1157 // if we don't support pure ownership (dirty && !writable),
1158 // have to clear dirty bit here, assume memory snarfs data
1159 // on cache-to-cache xfer
1160 bits_to_clear |= BlkDirty;
1161 }
1162 blk->status &= ~bits_to_clear;
1163 }
1164
1165 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1166 pkt->cmdString(), blockAlign(pkt->getAddr()),
1167 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1168
1169 if (respond) {
1170 assert(!pkt->memInhibitAsserted());
1171 pkt->assertMemInhibit();
1172 if (have_exclusive) {
1173 pkt->setSupplyExclusive();
1174 }
1175 if (is_timing) {
1176 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1177 } else {
1178 pkt->makeAtomicResponse();
1179 pkt->setDataFromBlock(blk->data, blkSize);
1180 }
1181 } else if (is_timing && is_deferred) {
1182 // if it's a deferred timing snoop then we've made a copy of
1183 // the packet, and so if we're not using that copy to respond
1184 // then we need to delete it here.
1185 delete pkt;
1186 }
1187
1188 // Do this last in case it deallocates block data or something
1189 // like that
1190 if (invalidate) {
1191 tags->invalidateBlk(blk);
1192 }
1193 }
1194
1195
1196 template<class TagStore>
1197 void
1198 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1199 {
1200 // Note that some deferred snoops don't have requests, since the
1201 // original access may have already completed
1202 if ((pkt->req && pkt->req->isUncacheable()) ||
1203 pkt->cmd == MemCmd::Writeback) {
1204 //Can't get a hit on an uncacheable address
1205 //Revisit this for multi level coherence
1206 return;
1207 }
1208
1209 BlkType *blk = tags->findBlock(pkt->getAddr());
1210
1211 Addr blk_addr = blockAlign(pkt->getAddr());
1212 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1213
1214 // Let the MSHR itself track the snoop and decide whether we want
1215 // to go ahead and do the regular cache snoop
1216 if (mshr && mshr->handleSnoop(pkt, order++)) {
1217 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1218 blk_addr);
1219 if (mshr->getNumTargets() > numTarget)
1220 warn("allocating bonus target for snoop"); //handle later
1221 return;
1222 }
1223
1224 //We also need to check the writeback buffers and handle those
1225 std::vector<MSHR *> writebacks;
1226 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1227 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1228 pkt->getAddr());
1229
1230 //Look through writebacks for any non-uncachable writes, use that
1231 for (int i = 0; i < writebacks.size(); i++) {
1232 mshr = writebacks[i];
1233 assert(!mshr->isUncacheable());
1234 assert(mshr->getNumTargets() == 1);
1235 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1236 assert(wb_pkt->cmd == MemCmd::Writeback);
1237
1238 assert(!pkt->memInhibitAsserted());
1239 pkt->assertMemInhibit();
1240 if (!pkt->needsExclusive()) {
1241 pkt->assertShared();
1242 } else {
1243 // if we're not asserting the shared line, we need to
1244 // invalidate our copy. we'll do that below as long as
1245 // the packet's invalidate flag is set...
1246 assert(pkt->isInvalidate());
1247 }
1248 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1249 false, false);
1250
1251 if (pkt->isInvalidate()) {
1252 // Invalidation trumps our writeback... discard here
1253 markInService(mshr);
1254 delete wb_pkt;
1255 }
1256
1257 // If this was a shared writeback, there may still be
1258 // other shared copies above that require invalidation.
1259 // We could be more selective and return here if the
1260 // request is non-exclusive or if the writeback is
1261 // exclusive.
1262 break;
1263 }
1264 }
1265
1266 handleSnoop(pkt, blk, true, false, false);
1267 }
1268
1269
1270 template<class TagStore>
1271 Tick
1272 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1273 {
1274 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1275 // Can't get a hit on an uncacheable address
1276 // Revisit this for multi level coherence
1277 return hitLatency;
1278 }
1279
1280 BlkType *blk = tags->findBlock(pkt->getAddr());
1281 handleSnoop(pkt, blk, false, false, false);
1282 return hitLatency;
1283 }
1284
1285
1286 template<class TagStore>
1287 MSHR *
1288 Cache<TagStore>::getNextMSHR()
1289 {
1290 // Check both MSHR queue and write buffer for potential requests
1291 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1292 MSHR *write_mshr = writeBuffer.getNextMSHR();
1293
1294 // Now figure out which one to send... some cases are easy
1295 if (miss_mshr && !write_mshr) {
1296 return miss_mshr;
1297 }
1298 if (write_mshr && !miss_mshr) {
1299 return write_mshr;
1300 }
1301
1302 if (miss_mshr && write_mshr) {
1303 // We have one of each... normally we favor the miss request
1304 // unless the write buffer is full
1305 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1306 // Write buffer is full, so we'd like to issue a write;
1307 // need to search MSHR queue for conflicting earlier miss.
1308 MSHR *conflict_mshr =
1309 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1310
1311 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1312 // Service misses in order until conflict is cleared.
1313 return conflict_mshr;
1314 }
1315
1316 // No conflicts; issue write
1317 return write_mshr;
1318 }
1319
1320 // Write buffer isn't full, but need to check it for
1321 // conflicting earlier writeback
1322 MSHR *conflict_mshr =
1323 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1324 if (conflict_mshr) {
1325 // not sure why we don't check order here... it was in the
1326 // original code but commented out.
1327
1328 // The only way this happens is if we are
1329 // doing a write and we didn't have permissions
1330 // then subsequently saw a writeback (owned got evicted)
1331 // We need to make sure to perform the writeback first
1332 // To preserve the dirty data, then we can issue the write
1333
1334 // should we return write_mshr here instead? I.e. do we
1335 // have to flush writes in order? I don't think so... not
1336 // for Alpha anyway. Maybe for x86?
1337 return conflict_mshr;
1338 }
1339
1340 // No conflicts; issue read
1341 return miss_mshr;
1342 }
1343
1344 // fall through... no pending requests. Try a prefetch.
1345 assert(!miss_mshr && !write_mshr);
1346 if (prefetcher && !mshrQueue.isFull()) {
1347 // If we have a miss queue slot, we can try a prefetch
1348 PacketPtr pkt = prefetcher->getPacket();
1349 if (pkt) {
1350 Addr pf_addr = blockAlign(pkt->getAddr());
1351 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) {
1352 // Update statistic on number of prefetches issued
1353 // (hwpf_mshr_misses)
1354 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1355 // Don't request bus, since we already have it
1356 return allocateMissBuffer(pkt, curTick, false);
1357 }
1358 }
1359 }
1360
1361 return NULL;
1362 }
1363
1364
1365 template<class TagStore>
1366 PacketPtr
1367 Cache<TagStore>::getTimingPacket()
1368 {
1369 MSHR *mshr = getNextMSHR();
1370
1371 if (mshr == NULL) {
1372 return NULL;
1373 }
1374
1375 // use request from 1st target
1376 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1377 PacketPtr pkt = NULL;
1378
1379 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq) {
1380 // SCUpgradeReq saw invalidation while queued in MSHR, so now
1381 // that we are getting around to processing it, just treat it
1382 // as if we got a failure response
1383 pkt = new Packet(tgt_pkt);
1384 pkt->cmd = MemCmd::UpgradeFailResp;
1385 pkt->senderState = mshr;
1386 pkt->firstWordTime = pkt->finishTime = curTick;
1387 handleResponse(pkt);
1388 return NULL;
1389 } else if (mshr->isForwardNoResponse()) {
1390 // no response expected, just forward packet as it is
1391 assert(tags->findBlock(mshr->addr) == NULL);
1392 pkt = tgt_pkt;
1393 } else {
1394 BlkType *blk = tags->findBlock(mshr->addr);
1395 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1396
1397 mshr->isForward = (pkt == NULL);
1398
1399 if (mshr->isForward) {
1400 // not a cache block request, but a response is expected
1401 // make copy of current packet to forward, keep current
1402 // copy for response handling
1403 pkt = new Packet(tgt_pkt);
1404 pkt->allocate();
1405 if (pkt->isWrite()) {
1406 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1407 }
1408 }
1409 }
1410
1411 assert(pkt != NULL);
1412 pkt->senderState = mshr;
1413 return pkt;
1414 }
1415
1416
1417 template<class TagStore>
1418 Tick
1419 Cache<TagStore>::nextMSHRReadyTime()
1420 {
1421 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1422 writeBuffer.nextMSHRReadyTime());
1423
1424 if (prefetcher) {
1425 nextReady = std::min(nextReady,
1426 prefetcher->nextPrefetchReadyTime());
1427 }
1428
1429 return nextReady;
1430 }
1431
1432
1433 ///////////////
1434 //
1435 // CpuSidePort
1436 //
1437 ///////////////
1438
1439 template<class TagStore>
1440 void
1441 Cache<TagStore>::CpuSidePort::
1442 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1443 {
1444 // CPU side port doesn't snoop; it's a target only. It can
1445 // potentially respond to any address.
1446 snoop = false;
1447 resp.push_back(myCache()->getAddrRange());
1448 }
1449
1450
1451 template<class TagStore>
1452 bool
1453 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1454 {
1455 // illegal to block responses... can lead to deadlock
1456 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1457 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1458 mustSendRetry = true;
1459 return false;
1460 }
1461
1462 myCache()->timingAccess(pkt);
1463 return true;
1464 }
1465
1466
1467 template<class TagStore>
1468 Tick
1469 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1470 {
1471 return myCache()->atomicAccess(pkt);
1472 }
1473
1474
1475 template<class TagStore>
1476 void
1477 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1478 {
1479 myCache()->functionalAccess(pkt, this, otherPort);
1480 }
1481
1482
1483 template<class TagStore>
1484 Cache<TagStore>::
1485 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1486 const std::string &_label)
1487 : BaseCache::CachePort(_name, _cache, _label)
1488 {
1489 }
1490
1491 ///////////////
1492 //
1493 // MemSidePort
1494 //
1495 ///////////////
1496
1497 template<class TagStore>
1498 void
1499 Cache<TagStore>::MemSidePort::
1500 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
1501 {
1502 // Memory-side port always snoops, but never passes requests
1503 // through to targets on the cpu side (so we don't add anything to
1504 // the address range list).
1505 snoop = true;
1506 }
1507
1508
1509 template<class TagStore>
1510 bool
1511 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1512 {
1513 // this needs to be fixed so that the cache updates the mshr and sends the
1514 // packet back out on the link, but it probably won't happen so until this
1515 // gets fixed, just panic when it does
1516 if (pkt->wasNacked())
1517 panic("Need to implement cache resending nacked packets!\n");
1518
1519 if (pkt->isRequest() && blocked) {
1520 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1521 mustSendRetry = true;
1522 return false;
1523 }
1524
1525 if (pkt->isResponse()) {
1526 myCache()->handleResponse(pkt);
1527 } else {
1528 myCache()->snoopTiming(pkt);
1529 }
1530 return true;
1531 }
1532
1533
1534 template<class TagStore>
1535 Tick
1536 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1537 {
1538 // in atomic mode, responses go back to the sender via the
1539 // function return from sendAtomic(), not via a separate
1540 // sendAtomic() from the responder. Thus we should never see a
1541 // response packet in recvAtomic() (anywhere, not just here).
1542 assert(!pkt->isResponse());
1543 return myCache()->snoopAtomic(pkt);
1544 }
1545
1546
1547 template<class TagStore>
1548 void
1549 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1550 {
1551 myCache()->functionalAccess(pkt, this, otherPort);
1552 }
1553
1554
1555
1556 template<class TagStore>
1557 void
1558 Cache<TagStore>::MemSidePort::sendPacket()
1559 {
1560 // if we have responses that are ready, they take precedence
1561 if (deferredPacketReady()) {
1562 bool success = sendTiming(transmitList.front().pkt);
1563
1564 if (success) {
1565 //send successful, remove packet
1566 transmitList.pop_front();
1567 }
1568
1569 waitingOnRetry = !success;
1570 } else {
1571 // check for non-response packets (requests & writebacks)
1572 PacketPtr pkt = myCache()->getTimingPacket();
1573 if (pkt == NULL) {
1574 // can happen if e.g. we attempt a writeback and fail, but
1575 // before the retry, the writeback is eliminated because
1576 // we snoop another cache's ReadEx.
1577 waitingOnRetry = false;
1578 } else {
1579 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1580
1581 bool success = sendTiming(pkt);
1582
1583 waitingOnRetry = !success;
1584 if (waitingOnRetry) {
1585 DPRINTF(CachePort, "now waiting on a retry\n");
1586 if (!mshr->isForwardNoResponse()) {
1587 delete pkt;
1588 }
1589 } else {
1590 myCache()->markInService(mshr);
1591 }
1592 }
1593 }
1594
1595
1596 // tried to send packet... if it was successful (no retry), see if
1597 // we need to rerequest bus or not
1598 if (!waitingOnRetry) {
1599 Tick nextReady = std::min(deferredPacketReadyTime(),
1600 myCache()->nextMSHRReadyTime());
1601 // @TODO: need to facotr in prefetch requests here somehow
1602 if (nextReady != MaxTick) {
1603 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1604 schedule(sendEvent, std::max(nextReady, curTick + 1));
1605 } else {
1606 // no more to send right now: if we're draining, we may be done
1607 if (drainEvent && !sendEvent->scheduled()) {
1608 drainEvent->process();
1609 drainEvent = NULL;
1610 }
1611 }
1612 }
1613 }
1614
1615 template<class TagStore>
1616 void
1617 Cache<TagStore>::MemSidePort::recvRetry()
1618 {
1619 assert(waitingOnRetry);
1620 sendPacket();
1621 }
1622
1623
1624 template<class TagStore>
1625 void
1626 Cache<TagStore>::MemSidePort::processSendEvent()
1627 {
1628 assert(!waitingOnRetry);
1629 sendPacket();
1630 }
1631
1632
1633 template<class TagStore>
1634 Cache<TagStore>::
1635 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1636 const std::string &_label)
1637 : BaseCache::CachePort(_name, _cache, _label)
1638 {
1639 // override default send event from SimpleTimingPort
1640 delete sendEvent;
1641 sendEvent = new SendEvent(this);
1642 }