ruby: Replace Time with Cycles in SequencerMessage
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 * Andreas Sandberg
47 */
48
49 /**
50 * @file
51 * Cache definitions.
52 */
53
54 #include "base/misc.hh"
55 #include "base/types.hh"
56 #include "debug/Cache.hh"
57 #include "debug/CachePort.hh"
58 #include "mem/cache/prefetch/base.hh"
59 #include "mem/cache/blk.hh"
60 #include "mem/cache/cache.hh"
61 #include "mem/cache/mshr.hh"
62 #include "sim/sim_exit.hh"
63
64 template<class TagStore>
65 Cache<TagStore>::Cache(const Params *p, TagStore *tags)
66 : BaseCache(p),
67 tags(tags),
68 prefetcher(p->prefetcher),
69 doFastWrites(true),
70 prefetchOnAccess(p->prefetch_on_access)
71 {
72 tempBlock = new BlkType();
73 tempBlock->data = new uint8_t[blkSize];
74
75 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
76 "CpuSidePort");
77 memSidePort = new MemSidePort(p->name + ".mem_side", this,
78 "MemSidePort");
79
80 tags->setCache(this);
81 if (prefetcher)
82 prefetcher->setCache(this);
83 }
84
85 template<class TagStore>
86 void
87 Cache<TagStore>::regStats()
88 {
89 BaseCache::regStats();
90 tags->regStats(name());
91 }
92
93 template<class TagStore>
94 void
95 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
96 {
97 uint64_t overwrite_val;
98 bool overwrite_mem;
99 uint64_t condition_val64;
100 uint32_t condition_val32;
101
102 int offset = tags->extractBlkOffset(pkt->getAddr());
103 uint8_t *blk_data = blk->data + offset;
104
105 assert(sizeof(uint64_t) >= pkt->getSize());
106
107 overwrite_mem = true;
108 // keep a copy of our possible write value, and copy what is at the
109 // memory address into the packet
110 pkt->writeData((uint8_t *)&overwrite_val);
111 pkt->setData(blk_data);
112
113 if (pkt->req->isCondSwap()) {
114 if (pkt->getSize() == sizeof(uint64_t)) {
115 condition_val64 = pkt->req->getExtraData();
116 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
117 sizeof(uint64_t));
118 } else if (pkt->getSize() == sizeof(uint32_t)) {
119 condition_val32 = (uint32_t)pkt->req->getExtraData();
120 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
121 sizeof(uint32_t));
122 } else
123 panic("Invalid size for conditional read/write\n");
124 }
125
126 if (overwrite_mem) {
127 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
128 blk->status |= BlkDirty;
129 }
130 }
131
132
133 template<class TagStore>
134 void
135 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
136 bool deferred_response,
137 bool pending_downgrade)
138 {
139 assert(blk && blk->isValid());
140 // Occasionally this is not true... if we are a lower-level cache
141 // satisfying a string of Read and ReadEx requests from
142 // upper-level caches, a Read will mark the block as shared but we
143 // can satisfy a following ReadEx anyway since we can rely on the
144 // Read requester(s) to have buffered the ReadEx snoop and to
145 // invalidate their blocks after receiving them.
146 // assert(!pkt->needsExclusive() || blk->isWritable());
147 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
148
149 // Check RMW operations first since both isRead() and
150 // isWrite() will be true for them
151 if (pkt->cmd == MemCmd::SwapReq) {
152 cmpAndSwap(blk, pkt);
153 } else if (pkt->isWrite()) {
154 if (blk->checkWrite(pkt)) {
155 pkt->writeDataToBlock(blk->data, blkSize);
156 blk->status |= BlkDirty;
157 }
158 } else if (pkt->isRead()) {
159 if (pkt->isLLSC()) {
160 blk->trackLoadLocked(pkt);
161 }
162 pkt->setDataFromBlock(blk->data, blkSize);
163 if (pkt->getSize() == blkSize) {
164 // special handling for coherent block requests from
165 // upper-level caches
166 if (pkt->needsExclusive()) {
167 // if we have a dirty copy, make sure the recipient
168 // keeps it marked dirty
169 if (blk->isDirty()) {
170 pkt->assertMemInhibit();
171 }
172 // on ReadExReq we give up our copy unconditionally
173 assert(blk != tempBlock);
174 tags->invalidate(blk);
175 blk->invalidate();
176 } else if (blk->isWritable() && !pending_downgrade
177 && !pkt->sharedAsserted() && !pkt->req->isInstFetch()) {
178 // we can give the requester an exclusive copy (by not
179 // asserting shared line) on a read request if:
180 // - we have an exclusive copy at this level (& below)
181 // - we don't have a pending snoop from below
182 // signaling another read request
183 // - no other cache above has a copy (otherwise it
184 // would have asseretd shared line on request)
185 // - we are not satisfying an instruction fetch (this
186 // prevents dirty data in the i-cache)
187
188 if (blk->isDirty()) {
189 // special considerations if we're owner:
190 if (!deferred_response && !isTopLevel) {
191 // if we are responding immediately and can
192 // signal that we're transferring ownership
193 // along with exclusivity, do so
194 pkt->assertMemInhibit();
195 blk->status &= ~BlkDirty;
196 } else {
197 // if we're responding after our own miss,
198 // there's a window where the recipient didn't
199 // know it was getting ownership and may not
200 // have responded to snoops correctly, so we
201 // can't pass off ownership *or* exclusivity
202 pkt->assertShared();
203 }
204 }
205 } else {
206 // otherwise only respond with a shared copy
207 pkt->assertShared();
208 }
209 }
210 } else {
211 // Not a read or write... must be an upgrade. it's OK
212 // to just ack those as long as we have an exclusive
213 // copy at this level.
214 assert(pkt->isUpgrade());
215 assert(blk != tempBlock);
216 tags->invalidate(blk);
217 blk->invalidate();
218 }
219 }
220
221
222 /////////////////////////////////////////////////////
223 //
224 // MSHR helper functions
225 //
226 /////////////////////////////////////////////////////
227
228
229 template<class TagStore>
230 void
231 Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
232 {
233 markInServiceInternal(mshr, pkt);
234 #if 0
235 if (mshr->originalCmd == MemCmd::HardPFReq) {
236 DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
237 name());
238 //Also clear pending if need be
239 if (!prefetcher->havePending())
240 {
241 deassertMemSideBusRequest(Request_PF);
242 }
243 }
244 #endif
245 }
246
247
248 template<class TagStore>
249 void
250 Cache<TagStore>::squash(int threadNum)
251 {
252 bool unblock = false;
253 BlockedCause cause = NUM_BLOCKED_CAUSES;
254
255 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
256 noTargetMSHR = NULL;
257 unblock = true;
258 cause = Blocked_NoTargets;
259 }
260 if (mshrQueue.isFull()) {
261 unblock = true;
262 cause = Blocked_NoMSHRs;
263 }
264 mshrQueue.squash(threadNum);
265 if (unblock && !mshrQueue.isFull()) {
266 clearBlocked(cause);
267 }
268 }
269
270 /////////////////////////////////////////////////////
271 //
272 // Access path: requests coming in from the CPU side
273 //
274 /////////////////////////////////////////////////////
275
276 template<class TagStore>
277 bool
278 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
279 Cycles &lat, PacketList &writebacks)
280 {
281 if (pkt->req->isUncacheable()) {
282 uncacheableFlush(pkt);
283 blk = NULL;
284 lat = hitLatency;
285 return false;
286 }
287
288 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
289 blk = tags->accessBlock(pkt->getAddr(), lat, id);
290
291 DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
292 pkt->req->isInstFetch() ? " (ifetch)" : "",
293 pkt->getAddr(), (blk) ? "hit" : "miss");
294
295 if (blk != NULL) {
296
297 if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
298 // OK to satisfy access
299 incHitCount(pkt);
300 satisfyCpuSideRequest(pkt, blk);
301 return true;
302 }
303 }
304
305 // Can't satisfy access normally... either no block (blk == NULL)
306 // or have block but need exclusive & only have shared.
307
308 // Writeback handling is special case. We can write the block
309 // into the cache without having a writeable copy (or any copy at
310 // all).
311 if (pkt->cmd == MemCmd::Writeback) {
312 assert(blkSize == pkt->getSize());
313 if (blk == NULL) {
314 // need to do a replacement
315 blk = allocateBlock(pkt->getAddr(), writebacks);
316 if (blk == NULL) {
317 // no replaceable block available, give up.
318 // writeback will be forwarded to next level.
319 incMissCount(pkt);
320 return false;
321 }
322 int id = pkt->req->masterId();
323 tags->insertBlock(pkt->getAddr(), blk, id);
324 blk->status = BlkValid | BlkReadable;
325 }
326 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
327 blk->status |= BlkDirty;
328 if (pkt->isSupplyExclusive()) {
329 blk->status |= BlkWritable;
330 }
331 // nothing else to do; writeback doesn't expect response
332 assert(!pkt->needsResponse());
333 incHitCount(pkt);
334 return true;
335 }
336
337 incMissCount(pkt);
338
339 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
340 // complete miss on store conditional... just give up now
341 pkt->req->setExtraData(0);
342 return true;
343 }
344
345 return false;
346 }
347
348
349 class ForwardResponseRecord : public Packet::SenderState
350 {
351 Packet::SenderState *prevSenderState;
352 PortID prevSrc;
353 #ifndef NDEBUG
354 BaseCache *cache;
355 #endif
356 public:
357 ForwardResponseRecord(Packet *pkt, BaseCache *_cache)
358 : prevSenderState(pkt->senderState), prevSrc(pkt->getSrc())
359 #ifndef NDEBUG
360 , cache(_cache)
361 #endif
362 {}
363 void restore(Packet *pkt, BaseCache *_cache)
364 {
365 assert(_cache == cache);
366 pkt->senderState = prevSenderState;
367 pkt->setDest(prevSrc);
368 }
369 };
370
371
372 template<class TagStore>
373 bool
374 Cache<TagStore>::timingAccess(PacketPtr pkt)
375 {
376 //@todo Add back in MemDebug Calls
377 // MemDebug::cacheAccess(pkt);
378
379
380 /// @todo temporary hack to deal with memory corruption issue until
381 /// 4-phase transactions are complete
382 for (int x = 0; x < pendingDelete.size(); x++)
383 delete pendingDelete[x];
384 pendingDelete.clear();
385
386 // we charge hitLatency for doing just about anything here
387 Tick time = clockEdge(hitLatency);
388
389 if (pkt->isResponse()) {
390 // must be cache-to-cache response from upper to lower level
391 ForwardResponseRecord *rec =
392 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
393
394 if (rec == NULL) {
395 assert(pkt->cmd == MemCmd::HardPFResp);
396 // Check if it's a prefetch response and handle it. We shouldn't
397 // get any other kinds of responses without FRRs.
398 DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
399 pkt->getAddr());
400 handleResponse(pkt);
401 return true;
402 }
403
404 rec->restore(pkt, this);
405 delete rec;
406 memSidePort->schedTimingSnoopResp(pkt, time);
407 return true;
408 }
409
410 assert(pkt->isRequest());
411
412 if (pkt->memInhibitAsserted()) {
413 DPRINTF(Cache, "mem inhibited on 0x%x: not responding\n",
414 pkt->getAddr());
415 assert(!pkt->req->isUncacheable());
416 // Special tweak for multilevel coherence: snoop downward here
417 // on invalidates since there may be other caches below here
418 // that have shared copies. Not necessary if we know that
419 // supplier had exclusive copy to begin with.
420 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
421 Packet *snoopPkt = new Packet(pkt, true); // clear flags
422 snoopPkt->setExpressSnoop();
423 snoopPkt->assertMemInhibit();
424 memSidePort->sendTimingReq(snoopPkt);
425 // main memory will delete snoopPkt
426 }
427 // since we're the official target but we aren't responding,
428 // delete the packet now.
429
430 /// @todo nominally we should just delete the packet here,
431 /// however, until 4-phase stuff we can't because sending
432 /// cache is still relying on it
433 pendingDelete.push_back(pkt);
434 return true;
435 }
436
437 if (pkt->req->isUncacheable()) {
438 uncacheableFlush(pkt);
439
440 // writes go in write buffer, reads use MSHR
441 if (pkt->isWrite() && !pkt->isRead()) {
442 allocateWriteBuffer(pkt, time, true);
443 } else {
444 allocateUncachedReadBuffer(pkt, time, true);
445 }
446 assert(pkt->needsResponse()); // else we should delete it here??
447 return true;
448 }
449
450 Cycles lat = hitLatency;
451 BlkType *blk = NULL;
452 PacketList writebacks;
453
454 bool satisfied = access(pkt, blk, lat, writebacks);
455
456 #if 0
457 /** @todo make the fast write alloc (wh64) work with coherence. */
458
459 // If this is a block size write/hint (WH64) allocate the block here
460 // if the coherence protocol allows it.
461 if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
462 (pkt->cmd == MemCmd::WriteReq
463 || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
464 // not outstanding misses, can do this
465 MSHR *outstanding_miss = mshrQueue.findMatch(pkt->getAddr());
466 if (pkt->cmd == MemCmd::WriteInvalidateReq || !outstanding_miss) {
467 if (outstanding_miss) {
468 warn("WriteInv doing a fastallocate"
469 "with an outstanding miss to the same address\n");
470 }
471 blk = handleFill(NULL, pkt, BlkValid | BlkWritable,
472 writebacks);
473 ++fastWrites;
474 }
475 }
476 #endif
477
478 // track time of availability of next prefetch, if any
479 Tick next_pf_time = 0;
480
481 bool needsResponse = pkt->needsResponse();
482
483 if (satisfied) {
484 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
485 if (blk)
486 blk->status &= ~BlkHWPrefetched;
487 next_pf_time = prefetcher->notify(pkt, time);
488 }
489
490 if (needsResponse) {
491 pkt->makeTimingResponse();
492 cpuSidePort->schedTimingResp(pkt, clockEdge(lat));
493 } else {
494 /// @todo nominally we should just delete the packet here,
495 /// however, until 4-phase stuff we can't because sending
496 /// cache is still relying on it
497 pendingDelete.push_back(pkt);
498 }
499 } else {
500 // miss
501
502 Addr blk_addr = blockAlign(pkt->getAddr());
503 MSHR *mshr = mshrQueue.findMatch(blk_addr);
504
505 if (mshr) {
506 /// MSHR hit
507 /// @note writebacks will be checked in getNextMSHR()
508 /// for any conflicting requests to the same block
509
510 //@todo remove hw_pf here
511 assert(pkt->req->masterId() < system->maxMasters());
512 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
513 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
514 mshr->threadNum = -1;
515 }
516 mshr->allocateTarget(pkt, time, order++);
517 if (mshr->getNumTargets() == numTarget) {
518 noTargetMSHR = mshr;
519 setBlocked(Blocked_NoTargets);
520 // need to be careful with this... if this mshr isn't
521 // ready yet (i.e. time > curTick()_, we don't want to
522 // move it ahead of mshrs that are ready
523 // mshrQueue.moveToFront(mshr);
524 }
525 } else {
526 // no MSHR
527 assert(pkt->req->masterId() < system->maxMasters());
528 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
529 // always mark as cache fill for now... if we implement
530 // no-write-allocate or bypass accesses this will have to
531 // be changed.
532 if (pkt->cmd == MemCmd::Writeback) {
533 allocateWriteBuffer(pkt, time, true);
534 } else {
535 if (blk && blk->isValid()) {
536 // If we have a write miss to a valid block, we
537 // need to mark the block non-readable. Otherwise
538 // if we allow reads while there's an outstanding
539 // write miss, the read could return stale data
540 // out of the cache block... a more aggressive
541 // system could detect the overlap (if any) and
542 // forward data out of the MSHRs, but we don't do
543 // that yet. Note that we do need to leave the
544 // block valid so that it stays in the cache, in
545 // case we get an upgrade response (and hence no
546 // new data) when the write miss completes.
547 // As long as CPUs do proper store/load forwarding
548 // internally, and have a sufficiently weak memory
549 // model, this is probably unnecessary, but at some
550 // point it must have seemed like we needed it...
551 assert(pkt->needsExclusive() && !blk->isWritable());
552 blk->status &= ~BlkReadable;
553 }
554
555 allocateMissBuffer(pkt, time, true);
556 }
557
558 if (prefetcher) {
559 next_pf_time = prefetcher->notify(pkt, time);
560 }
561 }
562 }
563
564 if (next_pf_time != 0)
565 requestMemSideBus(Request_PF, std::max(time, next_pf_time));
566
567 // copy writebacks to write buffer
568 while (!writebacks.empty()) {
569 PacketPtr wbPkt = writebacks.front();
570 allocateWriteBuffer(wbPkt, time, true);
571 writebacks.pop_front();
572 }
573
574 return true;
575 }
576
577
578 // See comment in cache.hh.
579 template<class TagStore>
580 PacketPtr
581 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
582 bool needsExclusive)
583 {
584 bool blkValid = blk && blk->isValid();
585
586 if (cpu_pkt->req->isUncacheable()) {
587 //assert(blk == NULL);
588 return NULL;
589 }
590
591 if (!blkValid &&
592 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
593 // Writebacks that weren't allocated in access() and upgrades
594 // from upper-level caches that missed completely just go
595 // through.
596 return NULL;
597 }
598
599 assert(cpu_pkt->needsResponse());
600
601 MemCmd cmd;
602 // @TODO make useUpgrades a parameter.
603 // Note that ownership protocols require upgrade, otherwise a
604 // write miss on a shared owned block will generate a ReadExcl,
605 // which will clobber the owned copy.
606 const bool useUpgrades = true;
607 if (blkValid && useUpgrades) {
608 // only reason to be here is that blk is shared
609 // (read-only) and we need exclusive
610 assert(needsExclusive && !blk->isWritable());
611 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
612 } else {
613 // block is invalid
614 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
615 }
616 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
617
618 pkt->allocate();
619 return pkt;
620 }
621
622
623 template<class TagStore>
624 Tick
625 Cache<TagStore>::atomicAccess(PacketPtr pkt)
626 {
627 Cycles lat = hitLatency;
628
629 // @TODO: make this a parameter
630 bool last_level_cache = false;
631
632 if (pkt->memInhibitAsserted()) {
633 assert(!pkt->req->isUncacheable());
634 // have to invalidate ourselves and any lower caches even if
635 // upper cache will be responding
636 if (pkt->isInvalidate()) {
637 BlkType *blk = tags->findBlock(pkt->getAddr());
638 if (blk && blk->isValid()) {
639 tags->invalidate(blk);
640 blk->invalidate();
641 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: invalidating\n",
642 pkt->cmdString(), pkt->getAddr());
643 }
644 if (!last_level_cache) {
645 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x\n",
646 pkt->cmdString(), pkt->getAddr());
647 lat += ticksToCycles(memSidePort->sendAtomic(pkt));
648 }
649 } else {
650 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
651 pkt->cmdString(), pkt->getAddr());
652 }
653
654 return lat;
655 }
656
657 // should assert here that there are no outstanding MSHRs or
658 // writebacks... that would mean that someone used an atomic
659 // access in timing mode
660
661 BlkType *blk = NULL;
662 PacketList writebacks;
663
664 if (!access(pkt, blk, lat, writebacks)) {
665 // MISS
666 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
667
668 bool is_forward = (bus_pkt == NULL);
669
670 if (is_forward) {
671 // just forwarding the same request to the next level
672 // no local cache operation involved
673 bus_pkt = pkt;
674 }
675
676 DPRINTF(Cache, "Sending an atomic %s for %x\n",
677 bus_pkt->cmdString(), bus_pkt->getAddr());
678
679 #if TRACING_ON
680 CacheBlk::State old_state = blk ? blk->status : 0;
681 #endif
682
683 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
684
685 DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
686 bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
687
688 // If packet was a forward, the response (if any) is already
689 // in place in the bus_pkt == pkt structure, so we don't need
690 // to do anything. Otherwise, use the separate bus_pkt to
691 // generate response to pkt and then delete it.
692 if (!is_forward) {
693 if (pkt->needsResponse()) {
694 assert(bus_pkt->isResponse());
695 if (bus_pkt->isError()) {
696 pkt->makeAtomicResponse();
697 pkt->copyError(bus_pkt);
698 } else if (bus_pkt->isRead() ||
699 bus_pkt->cmd == MemCmd::UpgradeResp) {
700 // we're updating cache state to allow us to
701 // satisfy the upstream request from the cache
702 blk = handleFill(bus_pkt, blk, writebacks);
703 satisfyCpuSideRequest(pkt, blk);
704 } else {
705 // we're satisfying the upstream request without
706 // modifying cache state, e.g., a write-through
707 pkt->makeAtomicResponse();
708 }
709 }
710 delete bus_pkt;
711 }
712 }
713
714 // Note that we don't invoke the prefetcher at all in atomic mode.
715 // It's not clear how to do it properly, particularly for
716 // prefetchers that aggressively generate prefetch candidates and
717 // rely on bandwidth contention to throttle them; these will tend
718 // to pollute the cache in atomic mode since there is no bandwidth
719 // contention. If we ever do want to enable prefetching in atomic
720 // mode, though, this is the place to do it... see timingAccess()
721 // for an example (though we'd want to issue the prefetch(es)
722 // immediately rather than calling requestMemSideBus() as we do
723 // there).
724
725 // Handle writebacks if needed
726 while (!writebacks.empty()){
727 PacketPtr wbPkt = writebacks.front();
728 memSidePort->sendAtomic(wbPkt);
729 writebacks.pop_front();
730 delete wbPkt;
731 }
732
733 // We now have the block one way or another (hit or completed miss)
734
735 if (pkt->needsResponse()) {
736 pkt->makeAtomicResponse();
737 }
738
739 return lat;
740 }
741
742
743 template<class TagStore>
744 void
745 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
746 {
747 Addr blk_addr = blockAlign(pkt->getAddr());
748 BlkType *blk = tags->findBlock(pkt->getAddr());
749 MSHR *mshr = mshrQueue.findMatch(blk_addr);
750
751 pkt->pushLabel(name());
752
753 CacheBlkPrintWrapper cbpw(blk);
754
755 // Note that just because an L2/L3 has valid data doesn't mean an
756 // L1 doesn't have a more up-to-date modified copy that still
757 // needs to be found. As a result we always update the request if
758 // we have it, but only declare it satisfied if we are the owner.
759
760 // see if we have data at all (owned or otherwise)
761 bool have_data = blk && blk->isValid()
762 && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
763
764 // data we have is dirty if marked as such or if valid & ownership
765 // pending due to outstanding UpgradeReq
766 bool have_dirty =
767 have_data && (blk->isDirty() ||
768 (mshr && mshr->inService && mshr->isPendingDirty()));
769
770 bool done = have_dirty
771 || cpuSidePort->checkFunctional(pkt)
772 || mshrQueue.checkFunctional(pkt, blk_addr)
773 || writeBuffer.checkFunctional(pkt, blk_addr)
774 || memSidePort->checkFunctional(pkt);
775
776 DPRINTF(Cache, "functional %s %x %s%s%s\n",
777 pkt->cmdString(), pkt->getAddr(),
778 (blk && blk->isValid()) ? "valid " : "",
779 have_data ? "data " : "", done ? "done " : "");
780
781 // We're leaving the cache, so pop cache->name() label
782 pkt->popLabel();
783
784 if (done) {
785 pkt->makeResponse();
786 } else {
787 // if it came as a request from the CPU side then make sure it
788 // continues towards the memory side
789 if (fromCpuSide) {
790 memSidePort->sendFunctional(pkt);
791 } else if (forwardSnoops && cpuSidePort->isSnooping()) {
792 // if it came from the memory side, it must be a snoop request
793 // and we should only forward it if we are forwarding snoops
794 cpuSidePort->sendFunctionalSnoop(pkt);
795 }
796 }
797 }
798
799
800 /////////////////////////////////////////////////////
801 //
802 // Response handling: responses from the memory side
803 //
804 /////////////////////////////////////////////////////
805
806
807 template<class TagStore>
808 void
809 Cache<TagStore>::handleResponse(PacketPtr pkt)
810 {
811 Tick time = clockEdge(hitLatency);
812 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
813 bool is_error = pkt->isError();
814
815 assert(mshr);
816
817 if (is_error) {
818 DPRINTF(Cache, "Cache received packet with error for address %x, "
819 "cmd: %s\n", pkt->getAddr(), pkt->cmdString());
820 }
821
822 DPRINTF(Cache, "Handling response to %x\n", pkt->getAddr());
823
824 MSHRQueue *mq = mshr->queue;
825 bool wasFull = mq->isFull();
826
827 if (mshr == noTargetMSHR) {
828 // we always clear at least one target
829 clearBlocked(Blocked_NoTargets);
830 noTargetMSHR = NULL;
831 }
832
833 // Initial target is used just for stats
834 MSHR::Target *initial_tgt = mshr->getTarget();
835 BlkType *blk = tags->findBlock(pkt->getAddr());
836 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
837 Tick miss_latency = curTick() - initial_tgt->recvTime;
838 PacketList writebacks;
839
840 if (pkt->req->isUncacheable()) {
841 assert(pkt->req->masterId() < system->maxMasters());
842 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
843 miss_latency;
844 } else {
845 assert(pkt->req->masterId() < system->maxMasters());
846 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
847 miss_latency;
848 }
849
850 bool is_fill = !mshr->isForward &&
851 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
852
853 if (is_fill && !is_error) {
854 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
855 pkt->getAddr());
856
857 // give mshr a chance to do some dirty work
858 mshr->handleFill(pkt, blk);
859
860 blk = handleFill(pkt, blk, writebacks);
861 assert(blk != NULL);
862 }
863
864 // First offset for critical word first calculations
865 int initial_offset = 0;
866
867 if (mshr->hasTargets()) {
868 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
869 }
870
871 while (mshr->hasTargets()) {
872 MSHR::Target *target = mshr->getTarget();
873
874 switch (target->source) {
875 case MSHR::Target::FromCPU:
876 Tick completion_time;
877 if (is_fill) {
878 satisfyCpuSideRequest(target->pkt, blk,
879 true, mshr->hasPostDowngrade());
880 // How many bytes past the first request is this one
881 int transfer_offset =
882 target->pkt->getOffset(blkSize) - initial_offset;
883 if (transfer_offset < 0) {
884 transfer_offset += blkSize;
885 }
886
887 // If critical word (no offset) return first word time.
888 // responseLatency is the latency of the return path
889 // from lower level caches/memory to an upper level cache or
890 // the core.
891 completion_time = responseLatency * clock +
892 (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
893
894 assert(!target->pkt->req->isUncacheable());
895
896 assert(target->pkt->req->masterId() < system->maxMasters());
897 missLatency[target->pkt->cmdToIndex()][target->pkt->req->masterId()] +=
898 completion_time - target->recvTime;
899 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
900 // failed StoreCond upgrade
901 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
902 target->pkt->cmd == MemCmd::StoreCondFailReq ||
903 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
904 // responseLatency is the latency of the return path
905 // from lower level caches/memory to an upper level cache or
906 // the core.
907 completion_time = responseLatency * clock + pkt->finishTime;
908 target->pkt->req->setExtraData(0);
909 } else {
910 // not a cache fill, just forwarding response
911 // responseLatency is the latency of the return path
912 // from lower level cahces/memory to the core.
913 completion_time = responseLatency * clock + pkt->finishTime;
914 if (pkt->isRead() && !is_error) {
915 target->pkt->setData(pkt->getPtr<uint8_t>());
916 }
917 }
918 target->pkt->makeTimingResponse();
919 // if this packet is an error copy that to the new packet
920 if (is_error)
921 target->pkt->copyError(pkt);
922 if (target->pkt->cmd == MemCmd::ReadResp &&
923 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
924 // If intermediate cache got ReadRespWithInvalidate,
925 // propagate that. Response should not have
926 // isInvalidate() set otherwise.
927 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
928 }
929 cpuSidePort->schedTimingResp(target->pkt, completion_time);
930 break;
931
932 case MSHR::Target::FromPrefetcher:
933 assert(target->pkt->cmd == MemCmd::HardPFReq);
934 if (blk)
935 blk->status |= BlkHWPrefetched;
936 delete target->pkt->req;
937 delete target->pkt;
938 break;
939
940 case MSHR::Target::FromSnoop:
941 // I don't believe that a snoop can be in an error state
942 assert(!is_error);
943 // response to snoop request
944 DPRINTF(Cache, "processing deferred snoop...\n");
945 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
946 handleSnoop(target->pkt, blk, true, true,
947 mshr->hasPostInvalidate());
948 break;
949
950 default:
951 panic("Illegal target->source enum %d\n", target->source);
952 }
953
954 mshr->popTarget();
955 }
956
957 if (blk && blk->isValid()) {
958 if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
959 assert(blk != tempBlock);
960 tags->invalidate(blk);
961 blk->invalidate();
962 } else if (mshr->hasPostDowngrade()) {
963 blk->status &= ~BlkWritable;
964 }
965 }
966
967 if (mshr->promoteDeferredTargets()) {
968 // avoid later read getting stale data while write miss is
969 // outstanding.. see comment in timingAccess()
970 if (blk) {
971 blk->status &= ~BlkReadable;
972 }
973 MSHRQueue *mq = mshr->queue;
974 mq->markPending(mshr);
975 requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
976 } else {
977 mq->deallocate(mshr);
978 if (wasFull && !mq->isFull()) {
979 clearBlocked((BlockedCause)mq->index);
980 }
981 }
982
983 // copy writebacks to write buffer
984 while (!writebacks.empty()) {
985 PacketPtr wbPkt = writebacks.front();
986 allocateWriteBuffer(wbPkt, time, true);
987 writebacks.pop_front();
988 }
989 // if we used temp block, clear it out
990 if (blk == tempBlock) {
991 if (blk->isDirty()) {
992 allocateWriteBuffer(writebackBlk(blk), time, true);
993 }
994 blk->invalidate();
995 }
996
997 delete pkt;
998 }
999
1000
1001
1002
1003 template<class TagStore>
1004 PacketPtr
1005 Cache<TagStore>::writebackBlk(BlkType *blk)
1006 {
1007 assert(blk && blk->isValid() && blk->isDirty());
1008
1009 writebacks[Request::wbMasterId]++;
1010
1011 Request *writebackReq =
1012 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1013 Request::wbMasterId);
1014 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback);
1015 if (blk->isWritable()) {
1016 writeback->setSupplyExclusive();
1017 }
1018 writeback->allocate();
1019 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1020
1021 blk->status &= ~BlkDirty;
1022 return writeback;
1023 }
1024
1025 template<class TagStore>
1026 void
1027 Cache<TagStore>::memWriteback()
1028 {
1029 WrappedBlkVisitor visitor(*this, &Cache<TagStore>::writebackVisitor);
1030 tags->forEachBlk(visitor);
1031 }
1032
1033 template<class TagStore>
1034 void
1035 Cache<TagStore>::memInvalidate()
1036 {
1037 WrappedBlkVisitor visitor(*this, &Cache<TagStore>::invalidateVisitor);
1038 tags->forEachBlk(visitor);
1039 }
1040
1041 template<class TagStore>
1042 bool
1043 Cache<TagStore>::isDirty() const
1044 {
1045 CacheBlkIsDirtyVisitor<BlkType> visitor;
1046 tags->forEachBlk(visitor);
1047
1048 return visitor.isDirty();
1049 }
1050
1051 template<class TagStore>
1052 bool
1053 Cache<TagStore>::writebackVisitor(BlkType &blk)
1054 {
1055 if (blk.isDirty()) {
1056 assert(blk.isValid());
1057
1058 Request request(tags->regenerateBlkAddr(blk.tag, blk.set),
1059 blkSize, 0, Request::funcMasterId);
1060
1061 Packet packet(&request, MemCmd::WriteReq);
1062 packet.dataStatic(blk.data);
1063
1064 memSidePort->sendFunctional(&packet);
1065
1066 blk.status &= ~BlkDirty;
1067 }
1068
1069 return true;
1070 }
1071
1072 template<class TagStore>
1073 bool
1074 Cache<TagStore>::invalidateVisitor(BlkType &blk)
1075 {
1076
1077 if (blk.isDirty())
1078 warn_once("Invalidating dirty cache lines. Expect things to break.\n");
1079
1080 if (blk.isValid()) {
1081 assert(!blk.isDirty());
1082 tags->invalidate(dynamic_cast< BlkType *>(&blk));
1083 blk.invalidate();
1084 }
1085
1086 return true;
1087 }
1088
1089 template<class TagStore>
1090 void
1091 Cache<TagStore>::uncacheableFlush(PacketPtr pkt)
1092 {
1093 DPRINTF(Cache, "%s%s %x uncacheable\n", pkt->cmdString(),
1094 pkt->req->isInstFetch() ? " (ifetch)" : "",
1095 pkt->getAddr());
1096
1097 if (pkt->req->isClearLL())
1098 tags->clearLocks();
1099
1100 BlkType *blk(tags->findBlock(pkt->getAddr()));
1101 if (blk) {
1102 writebackVisitor(*blk);
1103 invalidateVisitor(*blk);
1104 }
1105 }
1106
1107
1108 template<class TagStore>
1109 typename Cache<TagStore>::BlkType*
1110 Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
1111 {
1112 BlkType *blk = tags->findVictim(addr, writebacks);
1113
1114 if (blk->isValid()) {
1115 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1116 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
1117 if (repl_mshr) {
1118 // must be an outstanding upgrade request on block
1119 // we're about to replace...
1120 assert(!blk->isWritable());
1121 assert(repl_mshr->needsExclusive());
1122 // too hard to replace block with transient state
1123 // allocation failed, block not inserted
1124 return NULL;
1125 } else {
1126 DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
1127 repl_addr, addr,
1128 blk->isDirty() ? "writeback" : "clean");
1129
1130 if (blk->isDirty()) {
1131 // Save writeback packet for handling by caller
1132 writebacks.push_back(writebackBlk(blk));
1133 }
1134 }
1135 }
1136
1137 return blk;
1138 }
1139
1140
1141 // Note that the reason we return a list of writebacks rather than
1142 // inserting them directly in the write buffer is that this function
1143 // is called by both atomic and timing-mode accesses, and in atomic
1144 // mode we don't mess with the write buffer (we just perform the
1145 // writebacks atomically once the original request is complete).
1146 template<class TagStore>
1147 typename Cache<TagStore>::BlkType*
1148 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1149 PacketList &writebacks)
1150 {
1151 Addr addr = pkt->getAddr();
1152 #if TRACING_ON
1153 CacheBlk::State old_state = blk ? blk->status : 0;
1154 #endif
1155
1156 if (blk == NULL) {
1157 // better have read new data...
1158 assert(pkt->hasData());
1159 // need to do a replacement
1160 blk = allocateBlock(addr, writebacks);
1161 if (blk == NULL) {
1162 // No replaceable block... just use temporary storage to
1163 // complete the current request and then get rid of it
1164 assert(!tempBlock->isValid());
1165 blk = tempBlock;
1166 tempBlock->set = tags->extractSet(addr);
1167 tempBlock->tag = tags->extractTag(addr);
1168 DPRINTF(Cache, "using temp block for %x\n", addr);
1169 } else {
1170 int id = pkt->req->masterId();
1171 tags->insertBlock(pkt->getAddr(), blk, id);
1172 }
1173
1174 // we should never be overwriting a valid block
1175 assert(!blk->isValid());
1176 } else {
1177 // existing block... probably an upgrade
1178 assert(blk->tag == tags->extractTag(addr));
1179 // either we're getting new data or the block should already be valid
1180 assert(pkt->hasData() || blk->isValid());
1181 // don't clear block status... if block is already dirty we
1182 // don't want to lose that
1183 }
1184
1185 blk->status |= BlkValid | BlkReadable;
1186
1187 if (!pkt->sharedAsserted()) {
1188 blk->status |= BlkWritable;
1189 // If we got this via cache-to-cache transfer (i.e., from a
1190 // cache that was an owner) and took away that owner's copy,
1191 // then we need to write it back. Normally this happens
1192 // anyway as a side effect of getting a copy to write it, but
1193 // there are cases (such as failed store conditionals or
1194 // compare-and-swaps) where we'll demand an exclusive copy but
1195 // end up not writing it.
1196 if (pkt->memInhibitAsserted())
1197 blk->status |= BlkDirty;
1198 }
1199
1200 DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
1201 addr, old_state, blk->status);
1202
1203 // if we got new data, copy it in
1204 if (pkt->isRead()) {
1205 std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
1206 }
1207
1208 blk->whenReady = pkt->finishTime;
1209
1210 return blk;
1211 }
1212
1213
1214 /////////////////////////////////////////////////////
1215 //
1216 // Snoop path: requests coming in from the memory side
1217 //
1218 /////////////////////////////////////////////////////
1219
1220 template<class TagStore>
1221 void
1222 Cache<TagStore>::
1223 doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
1224 bool already_copied, bool pending_inval)
1225 {
1226 // timing-mode snoop responses require a new packet, unless we
1227 // already made a copy...
1228 PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
1229 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1230 pkt->allocate();
1231 pkt->makeTimingResponse();
1232 if (pkt->isRead()) {
1233 pkt->setDataFromBlock(blk_data, blkSize);
1234 }
1235 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1236 // Assume we defer a response to a read from a far-away cache
1237 // A, then later defer a ReadExcl from a cache B on the same
1238 // bus as us. We'll assert MemInhibit in both cases, but in
1239 // the latter case MemInhibit will keep the invalidation from
1240 // reaching cache A. This special response tells cache A that
1241 // it gets the block to satisfy its read, but must immediately
1242 // invalidate it.
1243 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1244 }
1245 memSidePort->schedTimingSnoopResp(pkt, clockEdge(hitLatency));
1246 }
1247
1248 template<class TagStore>
1249 void
1250 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1251 bool is_timing, bool is_deferred,
1252 bool pending_inval)
1253 {
1254 // deferred snoops can only happen in timing mode
1255 assert(!(is_deferred && !is_timing));
1256 // pending_inval only makes sense on deferred snoops
1257 assert(!(pending_inval && !is_deferred));
1258 assert(pkt->isRequest());
1259
1260 // the packet may get modified if we or a forwarded snooper
1261 // responds in atomic mode, so remember a few things about the
1262 // original packet up front
1263 bool invalidate = pkt->isInvalidate();
1264 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1265
1266 if (forwardSnoops) {
1267 // first propagate snoop upward to see if anyone above us wants to
1268 // handle it. save & restore packet src since it will get
1269 // rewritten to be relative to cpu-side bus (if any)
1270 bool alreadyResponded = pkt->memInhibitAsserted();
1271 if (is_timing) {
1272 Packet snoopPkt(pkt, true); // clear flags
1273 snoopPkt.setExpressSnoop();
1274 snoopPkt.senderState = new ForwardResponseRecord(pkt, this);
1275 cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1276 if (snoopPkt.memInhibitAsserted()) {
1277 // cache-to-cache response from some upper cache
1278 assert(!alreadyResponded);
1279 pkt->assertMemInhibit();
1280 } else {
1281 delete snoopPkt.senderState;
1282 }
1283 if (snoopPkt.sharedAsserted()) {
1284 pkt->assertShared();
1285 }
1286 } else {
1287 cpuSidePort->sendAtomicSnoop(pkt);
1288 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1289 // cache-to-cache response from some upper cache:
1290 // forward response to original requester
1291 assert(pkt->isResponse());
1292 }
1293 }
1294 }
1295
1296 if (!blk || !blk->isValid()) {
1297 return;
1298 }
1299
1300 // we may end up modifying both the block state and the packet (if
1301 // we respond in atomic mode), so just figure out what to do now
1302 // and then do it later
1303 bool respond = blk->isDirty() && pkt->needsResponse();
1304 bool have_exclusive = blk->isWritable();
1305
1306 if (pkt->isRead() && !invalidate) {
1307 assert(!needs_exclusive);
1308 pkt->assertShared();
1309 int bits_to_clear = BlkWritable;
1310 const bool haveOwnershipState = true; // for now
1311 if (!haveOwnershipState) {
1312 // if we don't support pure ownership (dirty && !writable),
1313 // have to clear dirty bit here, assume memory snarfs data
1314 // on cache-to-cache xfer
1315 bits_to_clear |= BlkDirty;
1316 }
1317 blk->status &= ~bits_to_clear;
1318 }
1319
1320 DPRINTF(Cache, "snooped a %s request for addr %x, %snew state is %i\n",
1321 pkt->cmdString(), blockAlign(pkt->getAddr()),
1322 respond ? "responding, " : "", invalidate ? 0 : blk->status);
1323
1324 if (respond) {
1325 assert(!pkt->memInhibitAsserted());
1326 pkt->assertMemInhibit();
1327 if (have_exclusive) {
1328 pkt->setSupplyExclusive();
1329 }
1330 if (is_timing) {
1331 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1332 } else {
1333 pkt->makeAtomicResponse();
1334 pkt->setDataFromBlock(blk->data, blkSize);
1335 }
1336 } else if (is_timing && is_deferred) {
1337 // if it's a deferred timing snoop then we've made a copy of
1338 // the packet, and so if we're not using that copy to respond
1339 // then we need to delete it here.
1340 delete pkt;
1341 }
1342
1343 // Do this last in case it deallocates block data or something
1344 // like that
1345 if (invalidate) {
1346 assert(blk != tempBlock);
1347 tags->invalidate(blk);
1348 blk->invalidate();
1349 }
1350 }
1351
1352
1353 template<class TagStore>
1354 void
1355 Cache<TagStore>::snoopTiming(PacketPtr pkt)
1356 {
1357 // Note that some deferred snoops don't have requests, since the
1358 // original access may have already completed
1359 if ((pkt->req && pkt->req->isUncacheable()) ||
1360 pkt->cmd == MemCmd::Writeback) {
1361 //Can't get a hit on an uncacheable address
1362 //Revisit this for multi level coherence
1363 return;
1364 }
1365
1366 BlkType *blk = tags->findBlock(pkt->getAddr());
1367
1368 Addr blk_addr = blockAlign(pkt->getAddr());
1369 MSHR *mshr = mshrQueue.findMatch(blk_addr);
1370
1371 // Let the MSHR itself track the snoop and decide whether we want
1372 // to go ahead and do the regular cache snoop
1373 if (mshr && mshr->handleSnoop(pkt, order++)) {
1374 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x\n",
1375 blk_addr);
1376 if (mshr->getNumTargets() > numTarget)
1377 warn("allocating bonus target for snoop"); //handle later
1378 return;
1379 }
1380
1381 //We also need to check the writeback buffers and handle those
1382 std::vector<MSHR *> writebacks;
1383 if (writeBuffer.findMatches(blk_addr, writebacks)) {
1384 DPRINTF(Cache, "Snoop hit in writeback to addr: %x\n",
1385 pkt->getAddr());
1386
1387 //Look through writebacks for any non-uncachable writes, use that
1388 if (writebacks.size()) {
1389 // We should only ever find a single match
1390 assert(writebacks.size() == 1);
1391 mshr = writebacks[0];
1392 assert(!mshr->isUncacheable());
1393 assert(mshr->getNumTargets() == 1);
1394 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1395 assert(wb_pkt->cmd == MemCmd::Writeback);
1396
1397 assert(!pkt->memInhibitAsserted());
1398 pkt->assertMemInhibit();
1399 if (!pkt->needsExclusive()) {
1400 pkt->assertShared();
1401 // the writeback is no longer the exclusive copy in the system
1402 wb_pkt->clearSupplyExclusive();
1403 } else {
1404 // if we're not asserting the shared line, we need to
1405 // invalidate our copy. we'll do that below as long as
1406 // the packet's invalidate flag is set...
1407 assert(pkt->isInvalidate());
1408 }
1409 doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
1410 false, false);
1411
1412 if (pkt->isInvalidate()) {
1413 // Invalidation trumps our writeback... discard here
1414 markInService(mshr);
1415 delete wb_pkt;
1416 }
1417 } // writebacks.size()
1418 }
1419
1420 // If this was a shared writeback, there may still be
1421 // other shared copies above that require invalidation.
1422 // We could be more selective and return here if the
1423 // request is non-exclusive or if the writeback is
1424 // exclusive.
1425 handleSnoop(pkt, blk, true, false, false);
1426 }
1427
1428 template<class TagStore>
1429 bool
1430 Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
1431 {
1432 // Express snoop responses from master to slave, e.g., from L1 to L2
1433 cache->timingAccess(pkt);
1434 return true;
1435 }
1436
1437 template<class TagStore>
1438 Cycles
1439 Cache<TagStore>::snoopAtomic(PacketPtr pkt)
1440 {
1441 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1442 // Can't get a hit on an uncacheable address
1443 // Revisit this for multi level coherence
1444 return hitLatency;
1445 }
1446
1447 BlkType *blk = tags->findBlock(pkt->getAddr());
1448 handleSnoop(pkt, blk, false, false, false);
1449 return hitLatency;
1450 }
1451
1452
1453 template<class TagStore>
1454 MSHR *
1455 Cache<TagStore>::getNextMSHR()
1456 {
1457 // Check both MSHR queue and write buffer for potential requests
1458 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1459 MSHR *write_mshr = writeBuffer.getNextMSHR();
1460
1461 // Now figure out which one to send... some cases are easy
1462 if (miss_mshr && !write_mshr) {
1463 return miss_mshr;
1464 }
1465 if (write_mshr && !miss_mshr) {
1466 return write_mshr;
1467 }
1468
1469 if (miss_mshr && write_mshr) {
1470 // We have one of each... normally we favor the miss request
1471 // unless the write buffer is full
1472 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1473 // Write buffer is full, so we'd like to issue a write;
1474 // need to search MSHR queue for conflicting earlier miss.
1475 MSHR *conflict_mshr =
1476 mshrQueue.findPending(write_mshr->addr, write_mshr->size);
1477
1478 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1479 // Service misses in order until conflict is cleared.
1480 return conflict_mshr;
1481 }
1482
1483 // No conflicts; issue write
1484 return write_mshr;
1485 }
1486
1487 // Write buffer isn't full, but need to check it for
1488 // conflicting earlier writeback
1489 MSHR *conflict_mshr =
1490 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size);
1491 if (conflict_mshr) {
1492 // not sure why we don't check order here... it was in the
1493 // original code but commented out.
1494
1495 // The only way this happens is if we are
1496 // doing a write and we didn't have permissions
1497 // then subsequently saw a writeback (owned got evicted)
1498 // We need to make sure to perform the writeback first
1499 // To preserve the dirty data, then we can issue the write
1500
1501 // should we return write_mshr here instead? I.e. do we
1502 // have to flush writes in order? I don't think so... not
1503 // for Alpha anyway. Maybe for x86?
1504 return conflict_mshr;
1505 }
1506
1507 // No conflicts; issue read
1508 return miss_mshr;
1509 }
1510
1511 // fall through... no pending requests. Try a prefetch.
1512 assert(!miss_mshr && !write_mshr);
1513 if (prefetcher && !mshrQueue.isFull()) {
1514 // If we have a miss queue slot, we can try a prefetch
1515 PacketPtr pkt = prefetcher->getPacket();
1516 if (pkt) {
1517 Addr pf_addr = blockAlign(pkt->getAddr());
1518 if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
1519 !writeBuffer.findMatch(pf_addr)) {
1520 // Update statistic on number of prefetches issued
1521 // (hwpf_mshr_misses)
1522 assert(pkt->req->masterId() < system->maxMasters());
1523 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1524 // Don't request bus, since we already have it
1525 return allocateMissBuffer(pkt, curTick(), false);
1526 } else {
1527 // free the request and packet
1528 delete pkt->req;
1529 delete pkt;
1530 }
1531 }
1532 }
1533
1534 return NULL;
1535 }
1536
1537
1538 template<class TagStore>
1539 PacketPtr
1540 Cache<TagStore>::getTimingPacket()
1541 {
1542 MSHR *mshr = getNextMSHR();
1543
1544 if (mshr == NULL) {
1545 return NULL;
1546 }
1547
1548 // use request from 1st target
1549 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1550 PacketPtr pkt = NULL;
1551
1552 if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
1553 tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
1554 // SCUpgradeReq or StoreCondReq saw invalidation while queued
1555 // in MSHR, so now that we are getting around to processing
1556 // it, just treat it as if we got a failure response
1557 pkt = new Packet(tgt_pkt);
1558 pkt->cmd = MemCmd::UpgradeFailResp;
1559 pkt->senderState = mshr;
1560 pkt->firstWordTime = pkt->finishTime = curTick();
1561 handleResponse(pkt);
1562 return NULL;
1563 } else if (mshr->isForwardNoResponse()) {
1564 // no response expected, just forward packet as it is
1565 assert(tags->findBlock(mshr->addr) == NULL);
1566 pkt = tgt_pkt;
1567 } else {
1568 BlkType *blk = tags->findBlock(mshr->addr);
1569
1570 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1571 // It might be possible for a writeback to arrive between
1572 // the time the prefetch is placed in the MSHRs and when
1573 // it's selected to send... if so, this assert will catch
1574 // that, and then we'll have to figure out what to do.
1575 assert(blk == NULL);
1576
1577 // We need to check the caches above us to verify that they don't have
1578 // a copy of this block in the dirty state at the moment. Without this
1579 // check we could get a stale copy from memory that might get used
1580 // in place of the dirty one.
1581 PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
1582 snoop_pkt->setExpressSnoop();
1583 snoop_pkt->senderState = mshr;
1584 cpuSidePort->sendTimingSnoopReq(snoop_pkt);
1585
1586 if (snoop_pkt->memInhibitAsserted()) {
1587 markInService(mshr, snoop_pkt);
1588 DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
1589 tgt_pkt->getAddr());
1590 delete snoop_pkt;
1591 return NULL;
1592 }
1593 delete snoop_pkt;
1594 }
1595
1596 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
1597
1598 mshr->isForward = (pkt == NULL);
1599
1600 if (mshr->isForward) {
1601 // not a cache block request, but a response is expected
1602 // make copy of current packet to forward, keep current
1603 // copy for response handling
1604 pkt = new Packet(tgt_pkt);
1605 pkt->allocate();
1606 if (pkt->isWrite()) {
1607 pkt->setData(tgt_pkt->getPtr<uint8_t>());
1608 }
1609 }
1610 }
1611
1612 assert(pkt != NULL);
1613 pkt->senderState = mshr;
1614 return pkt;
1615 }
1616
1617
1618 template<class TagStore>
1619 Tick
1620 Cache<TagStore>::nextMSHRReadyTime()
1621 {
1622 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
1623 writeBuffer.nextMSHRReadyTime());
1624
1625 if (prefetcher) {
1626 nextReady = std::min(nextReady,
1627 prefetcher->nextPrefetchReadyTime());
1628 }
1629
1630 return nextReady;
1631 }
1632
1633 template<class TagStore>
1634 void
1635 Cache<TagStore>::serialize(std::ostream &os)
1636 {
1637 bool dirty(isDirty());
1638
1639 if (dirty) {
1640 warn("*** The cache still contains dirty data. ***\n");
1641 warn(" Make sure to drain the system using the correct flags.\n");
1642 warn(" This checkpoint will not restore correctly and dirty data in "
1643 "the cache will be lost!\n");
1644 }
1645
1646 // Since we don't checkpoint the data in the cache, any dirty data
1647 // will be lost when restoring from a checkpoint of a system that
1648 // wasn't drained properly. Flag the checkpoint as invalid if the
1649 // cache contains dirty data.
1650 bool bad_checkpoint(dirty);
1651 SERIALIZE_SCALAR(bad_checkpoint);
1652 }
1653
1654 template<class TagStore>
1655 void
1656 Cache<TagStore>::unserialize(Checkpoint *cp, const std::string &section)
1657 {
1658 bool bad_checkpoint;
1659 UNSERIALIZE_SCALAR(bad_checkpoint);
1660 if (bad_checkpoint) {
1661 fatal("Restoring from checkpoints with dirty caches is not supported "
1662 "in the classic memory system. Please remove any caches or "
1663 " drain them properly before taking checkpoints.\n");
1664 }
1665 }
1666
1667 ///////////////
1668 //
1669 // CpuSidePort
1670 //
1671 ///////////////
1672
1673 template<class TagStore>
1674 AddrRangeList
1675 Cache<TagStore>::CpuSidePort::getAddrRanges() const
1676 {
1677 return cache->getAddrRanges();
1678 }
1679
1680 template<class TagStore>
1681 bool
1682 Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
1683 {
1684 // always let inhibited requests through even if blocked
1685 if (!pkt->memInhibitAsserted() && blocked) {
1686 DPRINTF(Cache,"Scheduling a retry while blocked\n");
1687 mustSendRetry = true;
1688 return false;
1689 }
1690
1691 cache->timingAccess(pkt);
1692 return true;
1693 }
1694
1695 template<class TagStore>
1696 Tick
1697 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
1698 {
1699 // atomic request
1700 return cache->atomicAccess(pkt);
1701 }
1702
1703 template<class TagStore>
1704 void
1705 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
1706 {
1707 // functional request
1708 cache->functionalAccess(pkt, true);
1709 }
1710
1711 template<class TagStore>
1712 Cache<TagStore>::
1713 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
1714 const std::string &_label)
1715 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
1716 {
1717 }
1718
1719 ///////////////
1720 //
1721 // MemSidePort
1722 //
1723 ///////////////
1724
1725 template<class TagStore>
1726 bool
1727 Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
1728 {
1729 cache->handleResponse(pkt);
1730 return true;
1731 }
1732
1733 // Express snooping requests to memside port
1734 template<class TagStore>
1735 void
1736 Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
1737 {
1738 // handle snooping requests
1739 cache->snoopTiming(pkt);
1740 }
1741
1742 template<class TagStore>
1743 Tick
1744 Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
1745 {
1746 // atomic snoop
1747 return cache->snoopAtomic(pkt);
1748 }
1749
1750 template<class TagStore>
1751 void
1752 Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
1753 {
1754 // functional snoop (note that in contrast to atomic we don't have
1755 // a specific functionalSnoop method, as they have the same
1756 // behaviour regardless)
1757 cache->functionalAccess(pkt, false);
1758 }
1759
1760 template<class TagStore>
1761 void
1762 Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
1763 {
1764 // if we have a response packet waiting we have to start with that
1765 if (deferredPacketReady()) {
1766 // use the normal approach from the timing port
1767 trySendTiming();
1768 } else {
1769 // check for request packets (requests & writebacks)
1770 PacketPtr pkt = cache.getTimingPacket();
1771 if (pkt == NULL) {
1772 // can happen if e.g. we attempt a writeback and fail, but
1773 // before the retry, the writeback is eliminated because
1774 // we snoop another cache's ReadEx.
1775 waitingOnRetry = false;
1776 } else {
1777 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1778
1779 waitingOnRetry = !masterPort.sendTimingReq(pkt);
1780
1781 if (waitingOnRetry) {
1782 DPRINTF(CachePort, "now waiting on a retry\n");
1783 if (!mshr->isForwardNoResponse()) {
1784 // we are awaiting a retry, but we
1785 // delete the packet and will be creating a new packet
1786 // when we get the opportunity
1787 delete pkt;
1788 }
1789 // note that we have now masked any requestBus and
1790 // schedSendEvent (we will wait for a retry before
1791 // doing anything), and this is so even if we do not
1792 // care about this packet and might override it before
1793 // it gets retried
1794 } else {
1795 cache.markInService(mshr, pkt);
1796 }
1797 }
1798 }
1799
1800 // if we succeeded and are not waiting for a retry, schedule the
1801 // next send, not only looking at the response transmit list, but
1802 // also considering when the next MSHR is ready
1803 if (!waitingOnRetry) {
1804 scheduleSend(cache.nextMSHRReadyTime());
1805 }
1806 }
1807
1808 template<class TagStore>
1809 Cache<TagStore>::
1810 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
1811 const std::string &_label)
1812 : BaseCache::CacheMasterPort(_name, _cache, _queue),
1813 _queue(*_cache, *this, _label), cache(_cache)
1814 {
1815 }