mem: Add a master ID to each request object.
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 */
47
48 /**
49 * @file
50 * Cache definitions.
51 */
52
53 #include "base/fast_alloc.hh"
54 #include "base/misc.hh"
55 #include "base/range.hh"
56 #include "base/types.hh"
57 #include "debug/Cache.hh"
58 #include "debug/CachePort.hh"
59 #include "mem/cache/prefetch/base.hh"
60 #include "mem/cache/blk.hh"
61 #include "mem/cache/cache.hh"
62 #include "mem/cache/mshr.hh"
63 #include "sim/sim_exit.hh"
64
65 template<class TagStore>
66 Cache<TagStore>::Cache(const Params *p, TagStore *tags)
67 : BaseCache(p),
68 tags(tags),
69 prefetcher(p->prefetcher),
70 doFastWrites(true),
71 prefetchOnAccess(p->prefetch_on_access)
72 {
73 tempBlock = new BlkType();
74 tempBlock->data = new uint8_t[blkSize];
75
76 cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
77 "CpuSidePort");
78 memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
79 "MemSidePort");
80
81 tags->setCache(this);
82 if (prefetcher)
83 prefetcher->setCache(this);
84 }
85
86 template<class TagStore>
87 void
88 Cache<TagStore>::regStats()
89 {
90 BaseCache::regStats();
91 tags->regStats(name());
92 }
93
94 template<class TagStore>
95 Port *
96 Cache<TagStore>::getPort(const std::string &if_name, int idx)
97 {
98 if (if_name == "" || if_name == "cpu_side") {
99 return cpuSidePort;
100 } else if (if_name == "mem_side") {
101 return memSidePort;
102 } else {
103 panic("Port name %s unrecognized\n", if_name);
104 }
105 }
106
107 template<class TagStore>
108 void
109 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
110 {
111 uint64_t overwrite_val;
112 bool overwrite_mem;
113 uint64_t condition_val64;
114 uint32_t condition_val32;
115
116 int offset = tags->extractBlkOffset(pkt->getAddr());
117 uint8_t *blk_data = blk->data + offset;
118
119 assert(sizeof(uint64_t) >= pkt->getSize());
120
121 overwrite_mem = true;
122 // keep a copy of our possible write value, and copy what is at the
123 // memory address into the packet
124 pkt->writeData((uint8_t *)&overwrite_val);
125 pkt->setData(blk_data);
126
127 if (pkt->req->isCondSwap()) {
128 if (pkt->getSize() == sizeof(uint64_t)) {
129 condition_val64 = pkt->req->getExtraData();
130 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
131 sizeof(uint64_t));
132 } else if (pkt->getSize() == sizeof(uint32_t)) {
133 condition_val32 = (uint32_t)pkt->req->getExtraData();
134 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
135 sizeof(uint32_t));
136 } else
137 panic("Invalid size for conditional read/write\n");
138 }
139
140 if (overwrite_mem) {
141 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
142 blk->status |= BlkDirty;
143 }
144 }
145
146
147 template<class TagStore>
148 void
149 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
150 bool deferred_response,
151 bool pending_downgrade)
152 {
153 assert(blk && blk->isValid());
154 // Occasionally this is not true... if we are a lower-level cache
155 // satisfying a string of Read and ReadEx requests from
156 // upper-level caches, a Read will mark the block as shared but we
157 // can satisfy a following ReadEx anyway since we can rely on the
158 // Read requester(s) to have buffered the ReadEx snoop and to
159 // invalidate their blocks after receiving them.
160 // assert(!pkt->needsExclusive() || blk->isWritable());
161 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
162
163 // Check RMW operations first since both isRead() and
164 // isWrite() will be true for them
165 if (pkt->cmd == MemCmd::SwapReq) {
166 cmpAndSwap(blk, pkt);
167 } else if (pkt->isWrite()) {
168 if (blk->checkWrite(pkt)) {
169 pkt->writeDataToBlock(blk->data, blkSize);
170 blk->status |= BlkDirty;
171 }
172 } else if (pkt->isRead()) {
173 if (pkt->isLLSC()) {
174 blk->trackLoadLocked(pkt);
175 }
176 pkt->setDataFromBlock(blk->data, blkSize);
177 if (pkt->getSize() == blkSize) {
178 // special handling for coherent block requests from
179 // upper-level caches
180 if (pkt->needsExclusive()) {
181 // if we have a dirty copy, make sure the recipient
182 // keeps it marked dirty
183 if (blk->isDirty()) {
184 pkt->assertMemInhibit();
185 }
186 // on ReadExReq we give up our copy unconditionally
187 tags->invalidateBlk(blk);
188 } else if (blk->isWritable() && !pending_downgrade
189 && !pkt->sharedAsserted()) {
190 // we can give the requester an exclusive copy (by not
191 // asserting shared line) on a read request if:
192 // - we have an exclusive copy at this level (& below)
193 // - we don't have a pending snoop from below
194 // signaling another read request
195 // - no other cache above has a copy (otherwise it
196 // would have asseretd shared line on request)
197
198 if (blk->isDirty()) {
199 // special considerations if we're owner:
200 if (!deferred_response && !isTopLevel) {
201 // if we are responding immediately and can
202 // signal that we're transferring ownership
203 // along with exclusivity, do so
204 pkt->assertMemInhibit();
205 blk->status &= ~BlkDirty;
206 } else {
207 // if we're responding after our own miss,
208 // there's a window where the recipient didn't
209 // know it was getting ownership and may not
210 // have responded to snoops correctly, so we
211 // can't pass off ownership *or* exclusivity
212 pkt->assertShared();
213 }
214 }
215 } else {
216 // otherwise only respond with a shared copy
217 pkt->assertShared();
218 }
219 }
220 } else {
221 // Not a read or write... must be an upgrade. it's OK
222 // to just ack those as long as we have an exclusive
223 // copy at this level.
224 assert(pkt->isUpgrade());
225 tags->invalidateBlk(blk);
226 }
227 }
228
229
230 /////////////////////////////////////////////////////
231 //
232 // MSHR helper functions
233 //
234 /////////////////////////////////////////////////////
235
236
237 template<class TagStore>
238 void
239 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
240 {
241 markInServiceInternal(mshr, pkt);
242 #if 0
243 if (mshr->originalCmd == MemCmd::HardPFReq) {
244 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
245 name());
246 //Also clear pending if need be
247 if (!prefetcher->havePending())
248 {
249 deassertMemSideBusRequest(Request_PF);
250 }
251 }
252 #endif
253 }
254
255
256 template<class TagStore>
257 void
258 Cache<TagStore>::squash(int threadNum)
259 {
260 bool unblock = false;
261 BlockedCause cause = NUM_BLOCKED_CAUSES;
262
263 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
264 noTargetMSHR = NULL;
265 unblock = true;
266 cause = Blocked_NoTargets;
267 }
268 if (mshrQueue.isFull()) {
269 unblock = true;
270 cause = Blocked_NoMSHRs;
271 }
272 mshrQueue.squash(threadNum);
273 if (unblock && !mshrQueue.isFull()) {
274 clearBlocked(cause);
275 }
276 }
277
278 /////////////////////////////////////////////////////
279 //
280 // Access path: requests coming in from the CPU side
281 //
282 /////////////////////////////////////////////////////
283
284 template<class TagStore>
285 bool
286 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
287 int &lat, PacketList &writebacks)
288 {
289 if (pkt->req->isUncacheable()) {
290 if (pkt->req->isClearLL()) {
291 tags->clearLocks();
292 } else {
293 blk = tags->findBlock(pkt->getAddr());
294 if (blk != NULL) {
295 tags->invalidateBlk(blk);
296 }
297 }
298
299 blk = NULL;
300 lat = hitLatency;
301 return false;
302 }
303
304 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
305 blk = tags->accessBlock(pkt->getAddr(), lat, id);
306
307 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
308 pkt->req->isInstFetch() ? " (ifetch)" : "",
309 pkt->getAddr(), (blk) ? "hit" : "miss");
310
311 if (blk != NULL) {
312
313 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
314 // OK to satisfy access
315 incHitCount(pkt, id);
316 satisfyCpuSideRequest(pkt, blk);
317 return true;
318 }
319 }
320
321 // Can't satisfy access normally... either no block (blk == NULL)
322 // or have block but need exclusive & only have shared.
323
324 // Writeback handling is special case. We can write the block
325 // into the cache without having a writeable copy (or any copy at
326 // all).
327 if (pkt->cmd == MemCmd::Writeback) {
328 assert(blkSize == pkt->getSize());
329 if (blk == NULL) {
330 // need to do a replacement
331 blk = allocateBlock(pkt->getAddr(), writebacks);
332 if (blk == NULL) {
333 // no replaceable block available, give up.
334 // writeback will be forwarded to next level.
335 incMissCount(pkt, id);
336 return false;
337 }
338 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
339 tags->insertBlock(pkt->getAddr(), blk, id);
340 blk->status = BlkValid | BlkReadable;
341 }
342 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
343 blk->status |= BlkDirty;
344 if (pkt->isSupplyExclusive()) {
345 blk->status |= BlkWritable;
346 }
347 // nothing else to do; writeback doesn't expect response
348 assert(!pkt->needsResponse());
349 incHitCount(pkt, id);
350 return true;
351 }
352
353 incMissCount(pkt, id);
354
355 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
356 // complete miss on store conditional... just give up now
357 pkt->req->setExtraData(0);
358 return true;
359 }
360
361 return false;
362 }
363
364
365 class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
366 {
367 Packet::SenderState *prevSenderState;
368 int prevSrc;
369 #ifndef NDEBUG
370 BaseCache *cache;
371 #endif
372 public:
373 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
374 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
375 #ifndef NDEBUG
376 , cache(_cache)
377 #endif
378 {}
379 void restore(Packet *pkt, BaseCache *_cache)
380 {
381 assert(_cache == cache);
382 pkt->senderState = prevSenderState;
383 pkt->setDest(prevSrc);
384 }
385 };
386
387
388 template<class TagStore>
389 bool
390 Cache<TagStore>::timingAccess(PacketPtr pkt)
391 {
392 //@todo Add back in MemDebug Calls
393 // MemDebug::cacheAccess(pkt);
394
395 // we charge hitLatency for doing just about anything here
396 Tick time = curTick() + hitLatency;
397
398 if (pkt->isResponse()) {
399 // must be cache-to-cache response from upper to lower level
400 ForwardResponseRecord *rec =
401 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
402
403 if (rec == NULL) {
404 assert(pkt->cmd == MemCmd::HardPFResp);
405 // Check if it's a prefetch response and handle it. We shouldn't
406 // get any other kinds of responses without FRRs.
407 DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
408 pkt->getAddr());
409 handleResponse(pkt);
410 return true;
411 }
412
413 rec->restore(pkt, this);
414 delete rec;
415 memSidePort->respond(pkt, time);
416 return true;
417 }
418
419 assert(pkt->isRequest());
420
421 if (pkt->memInhibitAsserted()) {
422 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
423 pkt->getAddr());
424 assert(!pkt->req->isUncacheable());
425 // Special tweak for multilevel coherence: snoop downward here
426 // on invalidates since there may be other caches below here
427 // that have shared copies. Not necessary if we know that
428 // supplier had exclusive copy to begin with.
429 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
430 Packet *snoopPkt = new Packet(pkt, true); // clear flags
431 snoopPkt->setExpressSnoop();
432 snoopPkt->assertMemInhibit();
433 memSidePort->sendTiming(snoopPkt);
434 // main memory will delete snoopPkt
435 }
436 // since we're the official target but we aren't responding,
437 // delete the packet now.
438 delete pkt;
439 return true;
440 }
441
442 if (pkt->req->isUncacheable()) {
443 if (pkt->req->isClearLL()) {
444 tags->clearLocks();
445 } else {
446 BlkType *blk = tags->findBlock(pkt->getAddr());
447 if (blk != NULL) {
448 tags->invalidateBlk(blk);
449 }
450 }
451
452 // writes go in write buffer, reads use MSHR
453 if (pkt->isWrite() && !pkt->isRead()) {
454 allocateWriteBuffer(pkt, time, true);
455 } else {
456 allocateUncachedReadBuffer(pkt, time, true);
457 }
458 assert(pkt->needsResponse()); // else we should delete it here??
459 return true;
460 }
461
462 int lat = hitLatency;
463 BlkType *blk = NULL;
464 PacketList writebacks;
465
466 bool satisfied = access(pkt, blk, lat, writebacks);
467
468 #if 0
469 /** @todo make the fast write alloc (wh64) work with coherence. */
470
471 // If this is a block size write/hint (WH64) allocate the block here
472 // if the coherence protocol allows it.
473 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
474 (pkt->cmd == MemCmd::WriteReq
475 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
476 // not outstanding misses, can do this
477 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
478 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
479 if (outstanding_miss) {
480 warn("WriteInv doing a fastallocate"
481 "with an outstanding miss to the same address\n");
482 }
483 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
484 writebacks);
485 ++fastWrites;
486 }
487 }
488 #endif
489
490 // track time of availability of next prefetch, if any
491 Tick next_pf_time = 0;
492
493 bool needsResponse = pkt->needsResponse();
494
495 if (satisfied) {
496 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
497 if (blk)
498 blk->status &= ~BlkHWPrefetched;
499 next_pf_time = prefetcher->notify(pkt, time);
500 }
501
502 if (needsResponse) {
503 pkt->makeTimingResponse();
504 cpuSidePort->respond(pkt, curTick()+lat);
505 } else {
506 delete pkt;
507 }
508 } else {
509 // miss
510
511 Addr blk_addr = blockAlign(pkt->getAddr());
512 MSHR *mshr = mshrQueue.findMatch(blk_addr);
513
514 if (mshr) {
515 // MSHR hit
516 //@todo remove hw_pf here
517 mshr_hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
518 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
519 mshr->threadNum = -1;
520 }
521 mshr->allocateTarget(pkt, time, order++);
522 if (mshr->getNumTargets() == numTarget) {
523 noTargetMSHR = mshr;
524 setBlocked(Blocked_NoTargets);
525 // need to be careful with this... if this mshr isn't
526 // ready yet (i.e. time > curTick()_, we don't want to
527 // move it ahead of mshrs that are ready
528 // mshrQueue.moveToFront(mshr);
529 }
530 } else {
531 // no MSHR
532 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
533 // always mark as cache fill for now... if we implement
534 // no-write-allocate or bypass accesses this will have to
535 // be changed.
536 if (pkt->cmd == MemCmd::Writeback) {
537 allocateWriteBuffer(pkt, time, true);
538 } else {
539 if (blk && blk->isValid()) {
540 // If we have a write miss to a valid block, we
541 // need to mark the block non-readable. Otherwise
542 // if we allow reads while there's an outstanding
543 // write miss, the read could return stale data
544 // out of the cache block... a more aggressive
545 // system could detect the overlap (if any) and
546 // forward data out of the MSHRs, but we don't do
547 // that yet. Note that we do need to leave the
548 // block valid so that it stays in the cache, in
549 // case we get an upgrade response (and hence no
550 // new data) when the write miss completes.
551 // As long as CPUs do proper store/load forwarding
552 // internally, and have a sufficiently weak memory
553 // model, this is probably unnecessary, but at some
554 // point it must have seemed like we needed it...
555 assert(pkt->needsExclusive() && !blk->isWritable());
556 blk->status &= ~BlkReadable;
557 }
558
559 allocateMissBuffer(pkt, time, true);
560 }
561
562 if (prefetcher) {
563 next_pf_time = prefetcher->notify(pkt, time);
564 }
565 }
566 }
567
568 if (next_pf_time != 0)
569 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
570
571 // copy writebacks to write buffer
572 while (!writebacks.empty()) {
573 PacketPtr wbPkt = writebacks.front();
574 allocateWriteBuffer(wbPkt, time, true);
575 writebacks.pop_front();
576 }
577
578 return true;
579 }
580
581
582 // See comment in cache.hh.
583 template<class TagStore>
584 PacketPtr
585 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
586 bool needsExclusive)
587 {
588 bool blkValid = blk && blk->isValid();
589
590 if (cpu_pkt->req->isUncacheable()) {
591 //assert(blk == NULL);
592 return NULL;
593 }
594
595 if (!blkValid &&
596 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
597 // Writebacks that weren't allocated in access() and upgrades
598 // from upper-level caches that missed completely just go
599 // through.
600 return NULL;
601 }
602
603 assert(cpu_pkt->needsResponse());
604
605 MemCmd cmd;
606 // @TODO make useUpgrades a parameter.
607 // Note that ownership protocols require upgrade, otherwise a
608 // write miss on a shared owned block will generate a ReadExcl,
609 // which will clobber the owned copy.
610 const bool useUpgrades = true;
611 if (blkValid && useUpgrades) {
612 // only reason to be here is that blk is shared
613 // (read-only) and we need exclusive
614 assert(needsExclusive && !blk->isWritable());
615 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
616 } else {
617 // block is invalid
618 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
619 }
620 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
621
622 pkt->allocate();
623 return pkt;
624 }
625
626
627 template<class TagStore>
628 Tick
629 Cache<TagStore>::atomicAccess(PacketPtr pkt)
630 {
631 int lat = hitLatency;
632
633 // @TODO: make this a parameter
634 bool last_level_cache = false;
635
636 if (pkt->memInhibitAsserted()) {
637 assert(!pkt->req->isUncacheable());
638 // have to invalidate ourselves and any lower caches even if
639 // upper cache will be responding
640 if (pkt->isInvalidate()) {
641 BlkType *blk = tags->findBlock(pkt->getAddr());
642 if (blk && blk->isValid()) {
643 tags->invalidateBlk(blk);
644 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
645 pkt->cmdString(), pkt->getAddr());
646 }
647 if (!last_level_cache) {
648 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
649 pkt->cmdString(), pkt->getAddr());
650 lat += memSidePort->sendAtomic(pkt);
651 }
652 } else {
653 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
654 pkt->cmdString(), pkt->getAddr());
655 }
656
657 return lat;
658 }
659
660 // should assert here that there are no outstanding MSHRs or
661 // writebacks... that would mean that someone used an atomic
662 // access in timing mode
663
664 BlkType *blk = NULL;
665 PacketList writebacks;
666
667 if (!access(pkt, blk, lat, writebacks)) {
668 // MISS
669 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
670
671 bool is_forward = (bus_pkt == NULL);
672
673 if (is_forward) {
674 // just forwarding the same request to the next level
675 // no local cache operation involved
676 bus_pkt = pkt;
677 }
678
679 DPRINTF(Cache, "Sending an atomic %s for %x\n",
680 bus_pkt->cmdString(), bus_pkt->getAddr());
681
682 #if TRACING_ON
683 CacheBlk::State old_state = blk ? blk->status : 0;
684 #endif
685
686 lat += memSidePort->sendAtomic(bus_pkt);
687
688 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
689 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
690
691 assert(!bus_pkt->wasNacked());
692
693 // If packet was a forward, the response (if any) is already
694 // in place in the bus_pkt == pkt structure, so we don't need
695 // to do anything. Otherwise, use the separate bus_pkt to
696 // generate response to pkt and then delete it.
697 if (!is_forward) {
698 if (pkt->needsResponse()) {
699 assert(bus_pkt->isResponse());
700 if (bus_pkt->isError()) {
701 pkt->makeAtomicResponse();
702 pkt->copyError(bus_pkt);
703 } else if (bus_pkt->isRead() ||
704 bus_pkt->cmd == MemCmd::UpgradeResp) {
705 // we're updating cache state to allow us to
706 // satisfy the upstream request from the cache
707 blk = handleFill(bus_pkt, blk, writebacks);
708 satisfyCpuSideRequest(pkt, blk);
709 } else {
710 // we're satisfying the upstream request without
711 // modifying cache state, e.g., a write-through
712 pkt->makeAtomicResponse();
713 }
714 }
715 delete bus_pkt;
716 }
717 }
718
719 // Note that we don't invoke the prefetcher at all in atomic mode.
720 // It's not clear how to do it properly, particularly for
721 // prefetchers that aggressively generate prefetch candidates and
722 // rely on bandwidth contention to throttle them; these will tend
723 // to pollute the cache in atomic mode since there is no bandwidth
724 // contention. If we ever do want to enable prefetching in atomic
725 // mode, though, this is the place to do it... see timingAccess()
726 // for an example (though we'd want to issue the prefetch(es)
727 // immediately rather than calling requestMemSideBus() as we do
728 // there).
729
730 // Handle writebacks if needed
731 while (!writebacks.empty()){
732 PacketPtr wbPkt = writebacks.front();
733 memSidePort->sendAtomic(wbPkt);
734 writebacks.pop_front();
735 delete wbPkt;
736 }
737
738 // We now have the block one way or another (hit or completed miss)
739
740 if (pkt->needsResponse()) {
741 pkt->makeAtomicResponse();
742 }
743
744 return lat;
745 }
746
747
748 template<class TagStore>
749 void
750 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
751 {
752 Addr blk_addr = blockAlign(pkt->getAddr());
753 BlkType *blk = tags->findBlock(pkt->getAddr());
754 MSHR *mshr = mshrQueue.findMatch(blk_addr);
755
756 pkt->pushLabel(name());
757
758 CacheBlkPrintWrapper cbpw(blk);
759
760 // Note that just because an L2/L3 has valid data doesn't mean an
761 // L1 doesn't have a more up-to-date modified copy that still
762 // needs to be found. As a result we always update the request if
763 // we have it, but only declare it satisfied if we are the owner.
764
765 // see if we have data at all (owned or otherwise)
766 bool have_data = blk && blk->isValid()
767 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
768
769 // data we have is dirty if marked as such or if valid & ownership
770 // pending due to outstanding UpgradeReq
771 bool have_dirty =
772 have_data && (blk->isDirty() ||
773 (mshr && mshr->inService && mshr->isPendingDirty()));
774
775 bool done = have_dirty
776 || cpuSidePort->checkFunctional(pkt)
777 || mshrQueue.checkFunctional(pkt, blk_addr)
778 || writeBuffer.checkFunctional(pkt, blk_addr)
779 || memSidePort->checkFunctional(pkt);
780
781 DPRINTF(Cache, "functional %s %x %s%s%s\n",
782 pkt->cmdString(), pkt->getAddr(),
783 (blk && blk->isValid()) ? "valid " : "",
784 have_data ? "data " : "", done ? "done " : "");
785
786 // We're leaving the cache, so pop cache->name() label
787 pkt->popLabel();
788
789 if (done) {
790 pkt->makeResponse();
791 } else {
792 // if it came as a request from the CPU side then make sure it
793 // continues towards the memory side
794 if (fromCpuSide) {
795 memSidePort->sendFunctional(pkt);
796 } else if (forwardSnoops) {
797 // if it came from the memory side, it must be a snoop request
798 // and we should only forward it if we are forwarding snoops
799 cpuSidePort->sendFunctional(pkt);
800 }
801 }
802 }
803
804
805 /////////////////////////////////////////////////////
806 //
807 // Response handling: responses from the memory side
808 //
809 /////////////////////////////////////////////////////
810
811
812 template<class TagStore>
813 void
814 Cache<TagStore>::handleResponse(PacketPtr pkt)
815 {
816 Tick time = curTick() + hitLatency;
817 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
818 bool is_error = pkt->isError();
819
820 assert(mshr);
821
822 if (pkt->wasNacked()) {
823 //pkt->reinitFromRequest();
824 warn("NACKs from devices not connected to the same bus "
825 "not implemented\n");
826 return;
827 }
828 if (is_error) {
829 DPRINTF(Cache, "Cache received packet with error for address %x, "
830 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
831 }
832
833 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
834
835 MSHRQueue *mq = mshr->queue;
836 bool wasFull = mq->isFull();
837
838 if (mshr == noTargetMSHR) {
839 // we always clear at least one target
840 clearBlocked(Blocked_NoTargets);
841 noTargetMSHR = NULL;
842 }
843
844 // Initial target is used just for stats
845 MSHR::Target *initial_tgt = mshr->getTarget();
846 BlkType *blk = tags->findBlock(pkt->getAddr());
847 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
848 Tick miss_latency = curTick() - initial_tgt->recvTime;
849 PacketList writebacks;
850
851 if (pkt->req->isUncacheable()) {
852 mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
853 miss_latency;
854 } else {
855 mshr_miss_latency[stats_cmd_idx][0/*pkt->req->threadId()*/] +=
856 miss_latency;
857 }
858
859 bool is_fill = !mshr->isForward &&
860 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
861
862 if (is_fill && !is_error) {
863 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
864 pkt->getAddr());
865
866 // give mshr a chance to do some dirty work
867 mshr->handleFill(pkt, blk);
868
869 blk = handleFill(pkt, blk, writebacks);
870 assert(blk != NULL);
871 }
872
873 // First offset for critical word first calculations
874 int initial_offset = 0;
875
876 if (mshr->hasTargets()) {
877 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
878 }
879
880 while (mshr->hasTargets()) {
881 MSHR::Target *target = mshr->getTarget();
882
883 switch (target->source) {
884 case MSHR::Target::FromCPU:
885 Tick completion_time;
886 if (is_fill) {
887 satisfyCpuSideRequest(target->pkt, blk,
888 true, mshr->hasPostDowngrade());
889 // How many bytes past the first request is this one
890 int transfer_offset =
891 target->pkt->getOffset(blkSize) - initial_offset;
892 if (transfer_offset < 0) {
893 transfer_offset += blkSize;
894 }
895
896 // If critical word (no offset) return first word time
897 completion_time = tags->getHitLatency() +
898 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
899
900 assert(!target->pkt->req->isUncacheable());
901 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
902 completion_time - target->recvTime;
903 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
904 // failed StoreCond upgrade
905 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
906 target->pkt->cmd == MemCmd::StoreCondFailReq ||
907 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
908 completion_time = tags->getHitLatency() + pkt->finishTime;
909 target->pkt->req->setExtraData(0);
910 } else {
911 // not a cache fill, just forwarding response
912 completion_time = tags->getHitLatency() + pkt->finishTime;
913 if (pkt->isRead() && !is_error) {
914 target->pkt->setData(pkt->getPtr<uint8_t>());
915 }
916 }
917 target->pkt->makeTimingResponse();
918 // if this packet is an error copy that to the new packet
919 if (is_error)
920 target->pkt->copyError(pkt);
921 if (target->pkt->cmd == MemCmd::ReadResp &&
922 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
923 // If intermediate cache got ReadRespWithInvalidate,
924 // propagate that. Response should not have
925 // isInvalidate() set otherwise.
926 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
927 }
928 cpuSidePort->respond(target->pkt, completion_time);
929 break;
930
931 case MSHR::Target::FromPrefetcher:
932 assert(target->pkt->cmd == MemCmd::HardPFReq);
933 if (blk)
934 blk->status |= BlkHWPrefetched;
935 delete target->pkt->req;
936 delete target->pkt;
937 break;
938
939 case MSHR::Target::FromSnoop:
940 // I don't believe that a snoop can be in an error state
941 assert(!is_error);
942 // response to snoop request
943 DPRINTF(Cache, "processing deferred snoop...\n");
944 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
945 handleSnoop(target->pkt, blk, true, true,
946 mshr->hasPostInvalidate());
947 break;
948
949 default:
950 panic("Illegal target->source enum %d\n", target->source);
951 }
952
953 mshr->popTarget();
954 }
955
956 if (blk) {
957 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
958 tags->invalidateBlk(blk);
959 } else if (mshr->hasPostDowngrade()) {
960 blk->status &= ~BlkWritable;
961 }
962 }
963
964 if (mshr->promoteDeferredTargets()) {
965 // avoid later read getting stale data while write miss is
966 // outstanding.. see comment in timingAccess()
967 if (blk) {
968 blk->status &= ~BlkReadable;
969 }
970 MSHRQueue *mq = mshr->queue;
971 mq->markPending(mshr);
972 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
973 } else {
974 mq->deallocate(mshr);
975 if (wasFull && !mq->isFull()) {
976 clearBlocked((BlockedCause)mq->index);
977 }
978 }
979
980 // copy writebacks to write buffer
981 while (!writebacks.empty()) {
982 PacketPtr wbPkt = writebacks.front();
983 allocateWriteBuffer(wbPkt, time, true);
984 writebacks.pop_front();
985 }
986 // if we used temp block, clear it out
987 if (blk == tempBlock) {
988 if (blk->isDirty()) {
989 allocateWriteBuffer(writebackBlk(blk), time, true);
990 }
991 tags->invalidateBlk(blk);
992 }
993
994 delete pkt;
995 }
996
997
998
999
1000 template<class TagStore>
1001 PacketPtr
1002 Cache<TagStore>::writebackBlk(BlkType *blk)
1003 {
1004 assert(blk && blk->isValid() && blk->isDirty());
1005
1006 writebacks[0/*pkt->req->threadId()*/]++;
1007
1008 Request *writebackReq =
1009 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1010 Request::wbMasterId);
1011 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
1012 if (blk->isWritable()) {
1013 writeback->setSupplyExclusive();
1014 }
1015 writeback->allocate();
1016 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1017
1018 blk->status &= ~BlkDirty;
1019 return writeback;
1020 }
1021
1022
1023 template<class TagStore>
1024 typename Cache<TagStore>::BlkType*
1025 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1026 {
1027 BlkType *blk = tags->findVictim(addr, writebacks);
1028
1029 if (blk->isValid()) {
1030 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1031 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1032 if (repl_mshr) {
1033 // must be an outstanding upgrade request on block
1034 // we're about to replace...
1035 assert(!blk->isWritable());
1036 assert(repl_mshr->needsExclusive());
1037 // too hard to replace block with transient state
1038 // allocation failed, block not inserted
1039 return NULL;
1040 } else {
1041 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1042 repl_addr, addr,
1043 blk->isDirty() ? "writeback" : "clean");
1044
1045 if (blk->isDirty()) {
1046 // Save writeback packet for handling by caller
1047 writebacks.push_back(writebackBlk(blk));
1048 }
1049 }
1050 }
1051
1052 return blk;
1053 }
1054
1055
1056 // Note that the reason we return a list of writebacks rather than
1057 // inserting them directly in the write buffer is that this function
1058 // is called by both atomic and timing-mode accesses, and in atomic
1059 // mode we don't mess with the write buffer (we just perform the
1060 // writebacks atomically once the original request is complete).
1061 template<class TagStore>
1062 typename Cache<TagStore>::BlkType*
1063 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1064 PacketList &writebacks)
1065 {
1066 Addr addr = pkt->getAddr();
1067 #if TRACING_ON
1068 CacheBlk::State old_state = blk ? blk->status : 0;
1069 #endif
1070
1071 if (blk == NULL) {
1072 // better have read new data...
1073 assert(pkt->hasData());
1074 // need to do a replacement
1075 blk = allocateBlock(addr, writebacks);
1076 if (blk == NULL) {
1077 // No replaceable block... just use temporary storage to
1078 // complete the current request and then get rid of it
1079 assert(!tempBlock->isValid());
1080 blk = tempBlock;
1081 tempBlock->set = tags->extractSet(addr);
1082 tempBlock->tag = tags->extractTag(addr);
1083 DPRINTF(Cache, "using temp block for %x\n", addr);
1084 } else {
1085 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
1086 tags->insertBlock(pkt->getAddr(), blk, id);
1087 }
1088
1089 // starting from scratch with a new block
1090 blk->status = 0;
1091 } else {
1092 // existing block... probably an upgrade
1093 assert(blk->tag == tags->extractTag(addr));
1094 // either we're getting new data or the block should already be valid
1095 assert(pkt->hasData() || blk->isValid());
1096 // don't clear block status... if block is already dirty we
1097 // don't want to lose that
1098 }
1099
1100 blk->status |= BlkValid | BlkReadable;
1101
1102 if (!pkt->sharedAsserted()) {
1103 blk->status |= BlkWritable;
1104 // If we got this via cache-to-cache transfer (i.e., from a
1105 // cache that was an owner) and took away that owner's copy,
1106 // then we need to write it back. Normally this happens
1107 // anyway as a side effect of getting a copy to write it, but
1108 // there are cases (such as failed store conditionals or
1109 // compare-and-swaps) where we'll demand an exclusive copy but
1110 // end up not writing it.
1111 if (pkt->memInhibitAsserted())
1112 blk->status |= BlkDirty;
1113 }
1114
1115 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1116 addr, old_state, blk->status);
1117
1118 // if we got new data, copy it in
1119 if (pkt->isRead()) {
1120 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1121 }
1122
1123 blk->whenReady = pkt->finishTime;
1124
1125 return blk;
1126 }
1127
1128
1129 /////////////////////////////////////////////////////
1130 //
1131 // Snoop path: requests coming in from the memory side
1132 //
1133 /////////////////////////////////////////////////////
1134
1135 template<class TagStore>
1136 void
1137 Cache<TagStore>::
1138 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1139 bool already_copied, bool pending_inval)
1140 {
1141 // timing-mode snoop responses require a new packet, unless we
1142 // already made a copy...
1143 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1144 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1145 pkt->allocate();
1146 pkt->makeTimingResponse();
1147 if (pkt->isRead()) {
1148 pkt->setDataFromBlock(blk_data, blkSize);
1149 }
1150 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1151 // Assume we defer a response to a read from a far-away cache
1152 // A, then later defer a ReadExcl from a cache B on the same
1153 // bus as us. We'll assert MemInhibit in both cases, but in
1154 // the latter case MemInhibit will keep the invalidation from
1155 // reaching cache A. This special response tells cache A that
1156 // it gets the block to satisfy its read, but must immediately
1157 // invalidate it.
1158 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1159 }
1160 memSidePort->respond(pkt, curTick() + hitLatency);
1161 }
1162
1163 template<class TagStore>
1164 void
1165 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1166 bool is_timing, bool is_deferred,
1167 bool pending_inval)
1168 {
1169 // deferred snoops can only happen in timing mode
1170 assert(!(is_deferred && !is_timing));
1171 // pending_inval only makes sense on deferred snoops
1172 assert(!(pending_inval && !is_deferred));
1173 assert(pkt->isRequest());
1174
1175 // the packet may get modified if we or a forwarded snooper
1176 // responds in atomic mode, so remember a few things about the
1177 // original packet up front
1178 bool invalidate = pkt->isInvalidate();
1179 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1180
1181 if (forwardSnoops) {
1182 // first propagate snoop upward to see if anyone above us wants to
1183 // handle it. save & restore packet src since it will get
1184 // rewritten to be relative to cpu-side bus (if any)
1185 bool alreadyResponded = pkt->memInhibitAsserted();
1186 if (is_timing) {
1187 Packet *snoopPkt = new Packet(pkt, true); // clear flags
1188 snoopPkt->setExpressSnoop();
1189 snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
1190 cpuSidePort->sendTiming(snoopPkt);
1191 if (snoopPkt->memInhibitAsserted()) {
1192 // cache-to-cache response from some upper cache
1193 assert(!alreadyResponded);
1194 pkt->assertMemInhibit();
1195 } else {
1196 delete snoopPkt->senderState;
1197 }
1198 if (snoopPkt->sharedAsserted()) {
1199 pkt->assertShared();
1200 }
1201 delete snoopPkt;
1202 } else {
1203 int origSrc = pkt->getSrc();
1204 cpuSidePort->sendAtomic(pkt);
1205 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1206 // cache-to-cache response from some upper cache:
1207 // forward response to original requester
1208 assert(pkt->isResponse());
1209 }
1210 pkt->setSrc(origSrc);
1211 }
1212 }
1213
1214 if (!blk || !blk->isValid()) {
1215 return;
1216 }
1217
1218 // we may end up modifying both the block state and the packet (if
1219 // we respond in atomic mode), so just figure out what to do now
1220 // and then do it later
1221 bool respond = blk->isDirty() && pkt->needsResponse();
1222 bool have_exclusive = blk->isWritable();
1223
1224 if (pkt->isRead() && !invalidate) {
1225 assert(!needs_exclusive);
1226 pkt->assertShared();
1227 int bits_to_clear = BlkWritable;
1228 const bool haveOwnershipState = true; // for now
1229 if (!haveOwnershipState) {
1230 // if we don't support pure ownership (dirty && !writable),
1231 // have to clear dirty bit here, assume memory snarfs data
1232 // on cache-to-cache xfer
1233 bits_to_clear |= BlkDirty;
1234 }
1235 blk->status &= ~bits_to_clear;
1236 }
1237
1238 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1239 pkt->cmdString(), blockAlign(pkt->getAddr()),
1240 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1241
1242 if (respond) {
1243 assert(!pkt->memInhibitAsserted());
1244 pkt->assertMemInhibit();
1245 if (have_exclusive) {
1246 pkt->setSupplyExclusive();
1247 }
1248 if (is_timing) {
1249 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1250 } else {
1251 pkt->makeAtomicResponse();
1252 pkt->setDataFromBlock(blk->data, blkSize);
1253 }
1254 } else if (is_timing && is_deferred) {
1255 // if it's a deferred timing snoop then we've made a copy of
1256 // the packet, and so if we're not using that copy to respond
1257 // then we need to delete it here.
1258 delete pkt;
1259 }
1260
1261 // Do this last in case it deallocates block data or something
1262 // like that
1263 if (invalidate) {
1264 tags->invalidateBlk(blk);
1265 }
1266 }
1267
1268
1269 template<class TagStore>
1270 void
1271 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1272 {
1273 // Note that some deferred snoops don't have requests, since the
1274 // original access may have already completed
1275 if ((pkt->req && pkt->req->isUncacheable()) ||
1276 pkt->cmd == MemCmd::Writeback) {
1277 //Can't get a hit on an uncacheable address
1278 //Revisit this for multi level coherence
1279 return;
1280 }
1281
1282 BlkType *blk = tags->findBlock(pkt->getAddr());
1283
1284 Addr blk_addr = blockAlign(pkt->getAddr());
1285 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1286
1287 // Let the MSHR itself track the snoop and decide whether we want
1288 // to go ahead and do the regular cache snoop
1289 if (mshr && mshr->handleSnoop(pkt, order++)) {
1290 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1291 blk_addr);
1292 if (mshr->getNumTargets() > numTarget)
1293 warn("allocating bonus target for snoop"); //handle later
1294 return;
1295 }
1296
1297 //We also need to check the writeback buffers and handle those
1298 std::vector<MSHR *> writebacks;
1299 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1300 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1301 pkt->getAddr());
1302
1303 //Look through writebacks for any non-uncachable writes, use that
1304 for (int i = 0; i < writebacks.size(); i++) {
1305 mshr = writebacks[i];
1306 assert(!mshr->isUncacheable());
1307 assert(mshr->getNumTargets() == 1);
1308 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1309 assert(wb_pkt->cmd == MemCmd::Writeback);
1310
1311 assert(!pkt->memInhibitAsserted());
1312 pkt->assertMemInhibit();
1313 if (!pkt->needsExclusive()) {
1314 pkt->assertShared();
1315 // the writeback is no longer the exclusive copy in the system
1316 wb_pkt->clearSupplyExclusive();
1317 } else {
1318 // if we're not asserting the shared line, we need to
1319 // invalidate our copy. we'll do that below as long as
1320 // the packet's invalidate flag is set...
1321 assert(pkt->isInvalidate());
1322 }
1323 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1324 false, false);
1325
1326 if (pkt->isInvalidate()) {
1327 // Invalidation trumps our writeback... discard here
1328 markInService(mshr);
1329 delete wb_pkt;
1330 }
1331
1332 // If this was a shared writeback, there may still be
1333 // other shared copies above that require invalidation.
1334 // We could be more selective and return here if the
1335 // request is non-exclusive or if the writeback is
1336 // exclusive.
1337 break;
1338 }
1339 }
1340
1341 handleSnoop(pkt, blk, true, false, false);
1342 }
1343
1344
1345 template<class TagStore>
1346 Tick
1347 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1348 {
1349 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1350 // Can't get a hit on an uncacheable address
1351 // Revisit this for multi level coherence
1352 return hitLatency;
1353 }
1354
1355 BlkType *blk = tags->findBlock(pkt->getAddr());
1356 handleSnoop(pkt, blk, false, false, false);
1357 return hitLatency;
1358 }
1359
1360
1361 template<class TagStore>
1362 MSHR *
1363 Cache<TagStore>::getNextMSHR()
1364 {
1365 // Check both MSHR queue and write buffer for potential requests
1366 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1367 MSHR *write_mshr = writeBuffer.getNextMSHR();
1368
1369 // Now figure out which one to send... some cases are easy
1370 if (miss_mshr && !write_mshr) {
1371 return miss_mshr;
1372 }
1373 if (write_mshr && !miss_mshr) {
1374 return write_mshr;
1375 }
1376
1377 if (miss_mshr && write_mshr) {
1378 // We have one of each... normally we favor the miss request
1379 // unless the write buffer is full
1380 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1381 // Write buffer is full, so we'd like to issue a write;
1382 // need to search MSHR queue for conflicting earlier miss.
1383 MSHR *conflict_mshr =
1384 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1385
1386 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1387 // Service misses in order until conflict is cleared.
1388 return conflict_mshr;
1389 }
1390
1391 // No conflicts; issue write
1392 return write_mshr;
1393 }
1394
1395 // Write buffer isn't full, but need to check it for
1396 // conflicting earlier writeback
1397 MSHR *conflict_mshr =
1398 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1399 if (conflict_mshr) {
1400 // not sure why we don't check order here... it was in the
1401 // original code but commented out.
1402
1403 // The only way this happens is if we are
1404 // doing a write and we didn't have permissions
1405 // then subsequently saw a writeback (owned got evicted)
1406 // We need to make sure to perform the writeback first
1407 // To preserve the dirty data, then we can issue the write
1408
1409 // should we return write_mshr here instead? I.e. do we
1410 // have to flush writes in order? I don't think so... not
1411 // for Alpha anyway. Maybe for x86?
1412 return conflict_mshr;
1413 }
1414
1415 // No conflicts; issue read
1416 return miss_mshr;
1417 }
1418
1419 // fall through... no pending requests. Try a prefetch.
1420 assert(!miss_mshr && !write_mshr);
1421 if (prefetcher && !mshrQueue.isFull()) {
1422 // If we have a miss queue slot, we can try a prefetch
1423 PacketPtr pkt = prefetcher->getPacket();
1424 if (pkt) {
1425 Addr pf_addr = blockAlign(pkt->getAddr());
1426 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
1427 !writeBuffer.findMatch(pf_addr)) {
1428 // Update statistic on number of prefetches issued
1429 // (hwpf_mshr_misses)
1430 mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
1431 // Don't request bus, since we already have it
1432 return allocateMissBuffer(pkt, curTick(), false);
1433 } else {
1434 // free the request and packet
1435 delete pkt->req;
1436 delete pkt;
1437 }
1438 }
1439 }
1440
1441 return NULL;
1442 }
1443
1444
1445 template<class TagStore>
1446 PacketPtr
1447 Cache<TagStore>::getTimingPacket()
1448 {
1449 MSHR *mshr = getNextMSHR();
1450
1451 if (mshr == NULL) {
1452 return NULL;
1453 }
1454
1455 // use request from 1st target
1456 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1457 PacketPtr pkt = NULL;
1458
1459 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1460 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1461 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1462 // in MSHR, so now that we are getting around to processing
1463 // it, just treat it as if we got a failure response
1464 pkt = new Packet(tgt_pkt);
1465 pkt->cmd = MemCmd::UpgradeFailResp;
1466 pkt->senderState = mshr;
1467 pkt->firstWordTime = pkt->finishTime = curTick();
1468 handleResponse(pkt);
1469 return NULL;
1470 } else if (mshr->isForwardNoResponse()) {
1471 // no response expected, just forward packet as it is
1472 assert(tags->findBlock(mshr->addr) == NULL);
1473 pkt = tgt_pkt;
1474 } else {
1475 BlkType *blk = tags->findBlock(mshr->addr);
1476
1477 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1478 // It might be possible for a writeback to arrive between
1479 // the time the prefetch is placed in the MSHRs and when
1480 // it's selected to send... if so, this assert will catch
1481 // that, and then we'll have to figure out what to do.
1482 assert(blk == NULL);
1483
1484 // We need to check the caches above us to verify that they don't have
1485 // a copy of this block in the dirty state at the moment. Without this
1486 // check we could get a stale copy from memory that might get used
1487 // in place of the dirty one.
1488 PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
1489 snoop_pkt->setExpressSnoop();
1490 snoop_pkt->senderState = mshr;
1491 cpuSidePort->sendTiming(snoop_pkt);
1492
1493 if (snoop_pkt->memInhibitAsserted()) {
1494 markInService(mshr, snoop_pkt);
1495 DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
1496 tgt_pkt->getAddr());
1497 delete snoop_pkt;
1498 return NULL;
1499 }
1500 delete snoop_pkt;
1501 }
1502
1503 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1504
1505 mshr->isForward = (pkt == NULL);
1506
1507 if (mshr->isForward) {
1508 // not a cache block request, but a response is expected
1509 // make copy of current packet to forward, keep current
1510 // copy for response handling
1511 pkt = new Packet(tgt_pkt);
1512 pkt->allocate();
1513 if (pkt->isWrite()) {
1514 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1515 }
1516 }
1517 }
1518
1519 assert(pkt != NULL);
1520 pkt->senderState = mshr;
1521 return pkt;
1522 }
1523
1524
1525 template<class TagStore>
1526 Tick
1527 Cache<TagStore>::nextMSHRReadyTime()
1528 {
1529 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1530 writeBuffer.nextMSHRReadyTime());
1531
1532 if (prefetcher) {
1533 nextReady = std::min(nextReady,
1534 prefetcher->nextPrefetchReadyTime());
1535 }
1536
1537 return nextReady;
1538 }
1539
1540
1541 ///////////////
1542 //
1543 // CpuSidePort
1544 //
1545 ///////////////
1546
1547 template<class TagStore>
1548 AddrRangeList
1549 Cache<TagStore>::CpuSidePort::
1550 getAddrRanges()
1551 {
1552 // CPU side port doesn't snoop; it's a target only. It can
1553 // potentially respond to any address.
1554 AddrRangeList ranges;
1555 ranges.push_back(myCache()->getAddrRange());
1556 return ranges;
1557 }
1558
1559
1560 template<class TagStore>
1561 bool
1562 Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
1563 {
1564 // illegal to block responses... can lead to deadlock
1565 if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
1566 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1567 mustSendRetry = true;
1568 return false;
1569 }
1570
1571 myCache()->timingAccess(pkt);
1572 return true;
1573 }
1574
1575
1576 template<class TagStore>
1577 Tick
1578 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1579 {
1580 return myCache()->atomicAccess(pkt);
1581 }
1582
1583
1584 template<class TagStore>
1585 void
1586 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1587 {
1588 myCache()->functionalAccess(pkt, true);
1589 }
1590
1591
1592 template<class TagStore>
1593 Cache<TagStore>::
1594 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1595 const std::string &_label)
1596 : BaseCache::CachePort(_name, _cache, _label)
1597 {
1598 }
1599
1600 ///////////////
1601 //
1602 // MemSidePort
1603 //
1604 ///////////////
1605
1606 template<class TagStore>
1607 bool
1608 Cache<TagStore>::MemSidePort::isSnooping()
1609 {
1610 // Memory-side port always snoops, but never passes requests
1611 // through to targets on the cpu side (so we don't add anything to
1612 // the address range list).
1613 return true;
1614 }
1615
1616
1617 template<class TagStore>
1618 bool
1619 Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
1620 {
1621 // this needs to be fixed so that the cache updates the mshr and sends the
1622 // packet back out on the link, but it probably won't happen so until this
1623 // gets fixed, just panic when it does
1624 if (pkt->wasNacked())
1625 panic("Need to implement cache resending nacked packets!\n");
1626
1627 if (pkt->isRequest() && blocked) {
1628 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1629 mustSendRetry = true;
1630 return false;
1631 }
1632
1633 if (pkt->isResponse()) {
1634 myCache()->handleResponse(pkt);
1635 } else {
1636 myCache()->snoopTiming(pkt);
1637 }
1638 return true;
1639 }
1640
1641
1642 template<class TagStore>
1643 Tick
1644 Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
1645 {
1646 // in atomic mode, responses go back to the sender via the
1647 // function return from sendAtomic(), not via a separate
1648 // sendAtomic() from the responder. Thus we should never see a
1649 // response packet in recvAtomic() (anywhere, not just here).
1650 assert(!pkt->isResponse());
1651 return myCache()->snoopAtomic(pkt);
1652 }
1653
1654
1655 template<class TagStore>
1656 void
1657 Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
1658 {
1659 myCache()->functionalAccess(pkt, false);
1660 }
1661
1662
1663
1664 template<class TagStore>
1665 void
1666 Cache<TagStore>::MemSidePort::sendPacket()
1667 {
1668 // if we have responses that are ready, they take precedence
1669 if (deferredPacketReady()) {
1670 bool success = sendTiming(transmitList.front().pkt);
1671
1672 if (success) {
1673 //send successful, remove packet
1674 transmitList.pop_front();
1675 }
1676
1677 waitingOnRetry = !success;
1678 } else {
1679 // check for non-response packets (requests & writebacks)
1680 PacketPtr pkt = myCache()->getTimingPacket();
1681 if (pkt == NULL) {
1682 // can happen if e.g. we attempt a writeback and fail, but
1683 // before the retry, the writeback is eliminated because
1684 // we snoop another cache's ReadEx.
1685 waitingOnRetry = false;
1686 } else {
1687 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1688
1689 bool success = sendTiming(pkt);
1690
1691 waitingOnRetry = !success;
1692 if (waitingOnRetry) {
1693 DPRINTF(CachePort, "now waiting on a retry\n");
1694 if (!mshr->isForwardNoResponse()) {
1695 delete pkt;
1696 }
1697 } else {
1698 myCache()->markInService(mshr, pkt);
1699 }
1700 }
1701 }
1702
1703
1704 // tried to send packet... if it was successful (no retry), see if
1705 // we need to rerequest bus or not
1706 if (!waitingOnRetry) {
1707 Tick nextReady = std::min(deferredPacketReadyTime(),
1708 myCache()->nextMSHRReadyTime());
1709 // @TODO: need to facotr in prefetch requests here somehow
1710 if (nextReady != MaxTick) {
1711 DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
1712 cache->schedule(sendEvent, std::max(nextReady, curTick() + 1));
1713 } else {
1714 // no more to send right now: if we're draining, we may be done
1715 if (drainEvent && !sendEvent->scheduled()) {
1716 drainEvent->process();
1717 drainEvent = NULL;
1718 }
1719 }
1720 }
1721 }
1722
1723 template<class TagStore>
1724 void
1725 Cache<TagStore>::MemSidePort::recvRetry()
1726 {
1727 assert(waitingOnRetry);
1728 sendPacket();
1729 }
1730
1731
1732 template<class TagStore>
1733 void
1734 Cache<TagStore>::MemSidePort::processSendEvent()
1735 {
1736 assert(!waitingOnRetry);
1737 sendPacket();
1738 }
1739
1740
1741 template<class TagStore>
1742 Cache<TagStore>::
1743 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1744 const std::string &_label)
1745 : BaseCache::CachePort(_name, _cache, _label)
1746 {
1747 // override default send event from SimpleTimingPort
1748 delete sendEvent;
1749 sendEvent = new SendEvent(this);
1750 }