73b23d63790b8df0c6c2f12d6a4ffbebea966c1d
[gem5.git] / src / mem / cache / cache_impl.hh
1 /*
2 * Copyright (c) 2010-2015 ARM Limited
3 * All rights reserved.
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2010 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 *
41 * Authors: Erik Hallnor
42 * Dave Greene
43 * Nathan Binkert
44 * Steve Reinhardt
45 * Ron Dreslinski
46 * Andreas Sandberg
47 */
48
49 #ifndef __MEM_CACHE_CACHE_IMPL_HH__
50 #define __MEM_CACHE_CACHE_IMPL_HH__
51
52 /**
53 * @file
54 * Cache definitions.
55 */
56
57 #include "base/misc.hh"
58 #include "base/types.hh"
59 #include "debug/Cache.hh"
60 #include "debug/CachePort.hh"
61 #include "debug/CacheTags.hh"
62 #include "mem/cache/prefetch/base.hh"
63 #include "mem/cache/blk.hh"
64 #include "mem/cache/cache.hh"
65 #include "mem/cache/mshr.hh"
66 #include "sim/sim_exit.hh"
67
68 template<class TagStore>
69 Cache<TagStore>::Cache(const Params *p)
70 : BaseCache(p),
71 tags(dynamic_cast<TagStore*>(p->tags)),
72 prefetcher(p->prefetcher),
73 doFastWrites(true),
74 prefetchOnAccess(p->prefetch_on_access)
75 {
76 tempBlock = new BlkType();
77 tempBlock->data = new uint8_t[blkSize];
78
79 cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
80 "CpuSidePort");
81 memSidePort = new MemSidePort(p->name + ".mem_side", this,
82 "MemSidePort");
83
84 tags->setCache(this);
85 if (prefetcher)
86 prefetcher->setCache(this);
87 }
88
89 template<class TagStore>
90 Cache<TagStore>::~Cache()
91 {
92 delete [] tempBlock->data;
93 delete tempBlock;
94
95 delete cpuSidePort;
96 delete memSidePort;
97 }
98
99 template<class TagStore>
100 void
101 Cache<TagStore>::regStats()
102 {
103 BaseCache::regStats();
104 }
105
106 template<class TagStore>
107 void
108 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
109 {
110 assert(pkt->isRequest());
111
112 uint64_t overwrite_val;
113 bool overwrite_mem;
114 uint64_t condition_val64;
115 uint32_t condition_val32;
116
117 int offset = tags->extractBlkOffset(pkt->getAddr());
118 uint8_t *blk_data = blk->data + offset;
119
120 assert(sizeof(uint64_t) >= pkt->getSize());
121
122 overwrite_mem = true;
123 // keep a copy of our possible write value, and copy what is at the
124 // memory address into the packet
125 pkt->writeData((uint8_t *)&overwrite_val);
126 pkt->setData(blk_data);
127
128 if (pkt->req->isCondSwap()) {
129 if (pkt->getSize() == sizeof(uint64_t)) {
130 condition_val64 = pkt->req->getExtraData();
131 overwrite_mem = !std::memcmp(&condition_val64, blk_data,
132 sizeof(uint64_t));
133 } else if (pkt->getSize() == sizeof(uint32_t)) {
134 condition_val32 = (uint32_t)pkt->req->getExtraData();
135 overwrite_mem = !std::memcmp(&condition_val32, blk_data,
136 sizeof(uint32_t));
137 } else
138 panic("Invalid size for conditional read/write\n");
139 }
140
141 if (overwrite_mem) {
142 std::memcpy(blk_data, &overwrite_val, pkt->getSize());
143 blk->status |= BlkDirty;
144 }
145 }
146
147
148 template<class TagStore>
149 void
150 Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
151 bool deferred_response,
152 bool pending_downgrade)
153 {
154 assert(pkt->isRequest());
155
156 assert(blk && blk->isValid());
157 // Occasionally this is not true... if we are a lower-level cache
158 // satisfying a string of Read and ReadEx requests from
159 // upper-level caches, a Read will mark the block as shared but we
160 // can satisfy a following ReadEx anyway since we can rely on the
161 // Read requester(s) to have buffered the ReadEx snoop and to
162 // invalidate their blocks after receiving them.
163 // assert(!pkt->needsExclusive() || blk->isWritable());
164 assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
165
166 // Check RMW operations first since both isRead() and
167 // isWrite() will be true for them
168 if (pkt->cmd == MemCmd::SwapReq) {
169 cmpAndSwap(blk, pkt);
170 } else if (pkt->isWrite() &&
171 (!pkt->isWriteInvalidate() || isTopLevel)) {
172 assert(blk->isWritable());
173 // Write or WriteInvalidate at the first cache with block in Exclusive
174 if (blk->checkWrite(pkt)) {
175 pkt->writeDataToBlock(blk->data, blkSize);
176 }
177 // Always mark the line as dirty even if we are a failed
178 // StoreCond so we supply data to any snoops that have
179 // appended themselves to this cache before knowing the store
180 // will fail.
181 blk->status |= BlkDirty;
182 DPRINTF(Cache, "%s for %s address %x size %d (write)\n", __func__,
183 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
184 } else if (pkt->isRead()) {
185 if (pkt->isLLSC()) {
186 blk->trackLoadLocked(pkt);
187 }
188 pkt->setDataFromBlock(blk->data, blkSize);
189 if (pkt->getSize() == blkSize) {
190 // special handling for coherent block requests from
191 // upper-level caches
192 if (pkt->needsExclusive()) {
193 // if we have a dirty copy, make sure the recipient
194 // keeps it marked dirty
195 if (blk->isDirty()) {
196 pkt->assertMemInhibit();
197 }
198 // on ReadExReq we give up our copy unconditionally
199 if (blk != tempBlock)
200 tags->invalidate(blk);
201 blk->invalidate();
202 } else if (blk->isWritable() && !pending_downgrade
203 && !pkt->sharedAsserted() && !pkt->req->isInstFetch()) {
204 // we can give the requester an exclusive copy (by not
205 // asserting shared line) on a read request if:
206 // - we have an exclusive copy at this level (& below)
207 // - we don't have a pending snoop from below
208 // signaling another read request
209 // - no other cache above has a copy (otherwise it
210 // would have asseretd shared line on request)
211 // - we are not satisfying an instruction fetch (this
212 // prevents dirty data in the i-cache)
213
214 if (blk->isDirty()) {
215 // special considerations if we're owner:
216 if (!deferred_response && !isTopLevel) {
217 // if we are responding immediately and can
218 // signal that we're transferring ownership
219 // along with exclusivity, do so
220 pkt->assertMemInhibit();
221 blk->status &= ~BlkDirty;
222 } else {
223 // if we're responding after our own miss,
224 // there's a window where the recipient didn't
225 // know it was getting ownership and may not
226 // have responded to snoops correctly, so we
227 // can't pass off ownership *or* exclusivity
228 pkt->assertShared();
229 }
230 }
231 } else {
232 // otherwise only respond with a shared copy
233 pkt->assertShared();
234 }
235 }
236 } else {
237 // Upgrade or WriteInvalidate at a different cache than received it.
238 // Since we have it Exclusively (E or M), we ack then invalidate.
239 assert(pkt->isUpgrade() ||
240 (pkt->isWriteInvalidate() && !isTopLevel));
241 assert(blk != tempBlock);
242 tags->invalidate(blk);
243 blk->invalidate();
244 DPRINTF(Cache, "%s for %s address %x size %d (invalidation)\n",
245 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
246 }
247 }
248
249
250 /////////////////////////////////////////////////////
251 //
252 // MSHR helper functions
253 //
254 /////////////////////////////////////////////////////
255
256
257 template<class TagStore>
258 void
259 Cache<TagStore>::markInService(MSHR *mshr, bool pending_dirty_resp)
260 {
261 markInServiceInternal(mshr, pending_dirty_resp);
262 #if 0
263 if (mshr->originalCmd == MemCmd::HardPFReq) {
264 DPRINTF(HWPrefetch, "Marking a HW_PF in service\n");
265 //Also clear pending if need be
266 if (!prefetcher->havePending())
267 {
268 deassertMemSideBusRequest(Request_PF);
269 }
270 }
271 #endif
272 }
273
274
275 template<class TagStore>
276 void
277 Cache<TagStore>::squash(int threadNum)
278 {
279 bool unblock = false;
280 BlockedCause cause = NUM_BLOCKED_CAUSES;
281
282 if (noTargetMSHR && noTargetMSHR->threadNum == threadNum) {
283 noTargetMSHR = NULL;
284 unblock = true;
285 cause = Blocked_NoTargets;
286 }
287 if (mshrQueue.isFull()) {
288 unblock = true;
289 cause = Blocked_NoMSHRs;
290 }
291 mshrQueue.squash(threadNum);
292 if (unblock && !mshrQueue.isFull()) {
293 clearBlocked(cause);
294 }
295 }
296
297 /////////////////////////////////////////////////////
298 //
299 // Access path: requests coming in from the CPU side
300 //
301 /////////////////////////////////////////////////////
302
303 template<class TagStore>
304 bool
305 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
306 Cycles &lat, PacketList &writebacks)
307 {
308 // sanity check
309 assert(pkt->isRequest());
310
311 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
312 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
313 if (pkt->req->isUncacheable()) {
314 uncacheableFlush(pkt);
315 blk = NULL;
316 // lookupLatency is the latency in case the request is uncacheable.
317 lat = lookupLatency;
318 return false;
319 }
320
321 int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
322 // Here lat is the value passed as parameter to accessBlock() function
323 // that can modify its value.
324 blk = tags->accessBlock(pkt->getAddr(), pkt->isSecure(), lat, id);
325
326 DPRINTF(Cache, "%s%s %x (%s) %s\n", pkt->cmdString(),
327 pkt->req->isInstFetch() ? " (ifetch)" : "",
328 pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
329 blk ? "hit " + blk->print() : "miss");
330
331 // Writeback handling is special case. We can write the block into
332 // the cache without having a writeable copy (or any copy at all).
333 if (pkt->cmd == MemCmd::Writeback) {
334 assert(blkSize == pkt->getSize());
335 if (blk == NULL) {
336 // need to do a replacement
337 blk = allocateBlock(pkt->getAddr(), pkt->isSecure(), writebacks);
338 if (blk == NULL) {
339 // no replaceable block available: give up, fwd to next level.
340 incMissCount(pkt);
341 return false;
342 }
343 tags->insertBlock(pkt, blk);
344
345 blk->status = (BlkValid | BlkReadable);
346 if (pkt->isSecure()) {
347 blk->status |= BlkSecure;
348 }
349 }
350 blk->status |= BlkDirty;
351 if (pkt->isSupplyExclusive()) {
352 blk->status |= BlkWritable;
353 }
354 // nothing else to do; writeback doesn't expect response
355 assert(!pkt->needsResponse());
356 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
357 DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
358 incHitCount(pkt);
359 return true;
360 } else if ((blk != NULL) &&
361 (pkt->needsExclusive() ? blk->isWritable()
362 : blk->isReadable())) {
363 // OK to satisfy access
364 incHitCount(pkt);
365 satisfyCpuSideRequest(pkt, blk);
366 return true;
367 }
368
369 // Can't satisfy access normally... either no block (blk == NULL)
370 // or have block but need exclusive & only have shared.
371
372 incMissCount(pkt);
373
374 if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
375 // complete miss on store conditional... just give up now
376 pkt->req->setExtraData(0);
377 return true;
378 }
379
380 return false;
381 }
382
383
384 class ForwardResponseRecord : public Packet::SenderState
385 {
386 public:
387
388 ForwardResponseRecord() {}
389 };
390
391 template<class TagStore>
392 void
393 Cache<TagStore>::recvTimingSnoopResp(PacketPtr pkt)
394 {
395 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
396 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
397
398 assert(pkt->isResponse());
399
400 // must be cache-to-cache response from upper to lower level
401 ForwardResponseRecord *rec =
402 dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
403 assert(!system->bypassCaches());
404
405 if (rec == NULL) {
406 // @todo What guarantee do we have that this HardPFResp is
407 // actually for this cache, and not a cache closer to the
408 // memory?
409 assert(pkt->cmd == MemCmd::HardPFResp);
410 // Check if it's a prefetch response and handle it. We shouldn't
411 // get any other kinds of responses without FRRs.
412 DPRINTF(Cache, "Got prefetch response from above for addr %#x (%s)\n",
413 pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
414 recvTimingResp(pkt);
415 return;
416 }
417
418 pkt->popSenderState();
419 delete rec;
420 // forwardLatency is set here because there is a response from an
421 // upper level cache.
422 // To pay the delay that occurs if the packet comes from the bus,
423 // we charge also headerDelay.
424 Tick snoop_resp_time = clockEdge(forwardLatency) + pkt->headerDelay;
425 // Reset the timing of the packet.
426 pkt->headerDelay = pkt->payloadDelay = 0;
427 memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
428 }
429
430 template<class TagStore>
431 void
432 Cache<TagStore>::promoteWholeLineWrites(PacketPtr pkt)
433 {
434 // Cache line clearing instructions
435 if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
436 (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
437 pkt->cmd = MemCmd::WriteInvalidateReq;
438 DPRINTF(Cache, "packet promoted from Write to WriteInvalidate\n");
439 assert(isTopLevel); // should only happen at L1 or I/O cache
440 }
441 }
442
443 template<class TagStore>
444 bool
445 Cache<TagStore>::recvTimingReq(PacketPtr pkt)
446 {
447 DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
448 //@todo Add back in MemDebug Calls
449 // MemDebug::cacheAccess(pkt);
450
451
452 /// @todo temporary hack to deal with memory corruption issue until
453 /// 4-phase transactions are complete
454 for (int x = 0; x < pendingDelete.size(); x++)
455 delete pendingDelete[x];
456 pendingDelete.clear();
457
458 assert(pkt->isRequest());
459
460 // Just forward the packet if caches are disabled.
461 if (system->bypassCaches()) {
462 // @todo This should really enqueue the packet rather
463 bool M5_VAR_USED success = memSidePort->sendTimingReq(pkt);
464 assert(success);
465 return true;
466 }
467
468 promoteWholeLineWrites(pkt);
469
470 if (pkt->memInhibitAsserted()) {
471 // a cache above us (but not where the packet came from) is
472 // responding to the request
473 DPRINTF(Cache, "mem inhibited on 0x%x (%s): not responding\n",
474 pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
475 assert(!pkt->req->isUncacheable());
476
477 // if the packet needs exclusive, and the cache that has
478 // promised to respond (setting the inhibit flag) is not
479 // providing exclusive (it is in O vs M state), we know that
480 // there may be other shared copies in the system; go out and
481 // invalidate them all
482 if (pkt->needsExclusive() && !pkt->isSupplyExclusive()) {
483 // create a downstream express snoop with cleared packet
484 // flags, there is no need to allocate any data as the
485 // packet is merely used to co-ordinate state transitions
486 Packet *snoop_pkt = new Packet(pkt, true, false);
487
488 // also reset the bus time that the original packet has
489 // not yet paid for
490 snoop_pkt->headerDelay = snoop_pkt->payloadDelay = 0;
491
492 // make this an instantaneous express snoop, and let the
493 // other caches in the system know that the packet is
494 // inhibited, because we have found the authorative copy
495 // (O) that will supply the right data
496 snoop_pkt->setExpressSnoop();
497 snoop_pkt->assertMemInhibit();
498
499 // this express snoop travels towards the memory, and at
500 // every crossbar it is snooped upwards thus reaching
501 // every cache in the system
502 bool M5_VAR_USED success = memSidePort->sendTimingReq(snoop_pkt);
503 // express snoops always succeed
504 assert(success);
505
506 // main memory will delete the packet
507 }
508
509 /// @todo nominally we should just delete the packet here,
510 /// however, until 4-phase stuff we can't because sending
511 /// cache is still relying on it
512 pendingDelete.push_back(pkt);
513
514 // no need to take any action in this particular cache as the
515 // caches along the path to memory are allowed to keep lines
516 // in a shared state, and a cache above us already committed
517 // to responding
518 return true;
519 }
520
521 if (pkt->req->isUncacheable()) {
522 uncacheableFlush(pkt);
523
524 // writes go in write buffer, reads use MSHR,
525 // prefetches are acknowledged (responded to) and dropped
526 if (pkt->cmd.isPrefetch()) {
527 // prefetching (cache loading) uncacheable data is nonsensical
528 pkt->makeTimingResponse();
529 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize());
530 // We use lookupLatency here because the request is uncacheable.
531 // We pay also for headerDelay that is charged of bus latencies if
532 // the packet comes from the bus.
533 Tick time = clockEdge(lookupLatency) + pkt->headerDelay;
534 // Reset the timing of the packet.
535 pkt->headerDelay = pkt->payloadDelay = 0;
536 cpuSidePort->schedTimingResp(pkt, time);
537 return true;
538 } else if (pkt->isWrite() && !pkt->isRead()) {
539 // We pay also for headerDelay that is charged of bus latencies if
540 // the packet comes from the bus.
541 Tick allocate_wr_buffer_time = clockEdge(forwardLatency) +
542 pkt->headerDelay;
543 // Reset the timing of the packet.
544 pkt->headerDelay = pkt->payloadDelay = 0;
545 allocateWriteBuffer(pkt, allocate_wr_buffer_time, true);
546 } else {
547 // We use forwardLatency here because there is an uncached
548 // memory read, allocateded to MSHR queue (it requires the same
549 // time of forwarding to WriteBuffer, in our assumption). It
550 // specifies the latency to allocate an internal buffer and to
551 // schedule an event to the queued port.
552 // We pay also for headerDelay that is charged of bus latencies if
553 // the packet comes from the bus.
554 Tick allocate_rd_buffer_time = clockEdge(forwardLatency) +
555 pkt->headerDelay;
556 // Reset the timing of the packet.
557 pkt->headerDelay = pkt->payloadDelay = 0;
558 allocateUncachedReadBuffer(pkt, allocate_rd_buffer_time, true);
559 }
560 assert(pkt->needsResponse()); // else we should delete it here??
561 return true;
562 }
563
564 // We use lookupLatency here because it is used to specify the latency
565 // to access.
566 Cycles lat = lookupLatency;
567 BlkType *blk = NULL;
568 PacketList writebacks;
569 // Note that lat is passed by reference here. The function access() calls
570 // accessBlock() which can modify lat value.
571 bool satisfied = access(pkt, blk, lat, writebacks);
572 // Here we charge the headerDelay that takes into account the latencies
573 // of the bus, if the packet comes from it.
574 // The latency charged it is just lat that is the value of lookupLatency
575 // modified by access() function, or if not just lookupLatency.
576 // In case of a hit we are neglecting response latency.
577 // In case of a miss we are neglecting forward latency.
578 Tick request_time = clockEdge(lat) + pkt->headerDelay;
579 // Here we condiser forward_time, paying for just forward latency and
580 // also charging the delay provided by the xbar.
581 // forward_time is used in allocateWriteBuffer() function, called
582 // in case of writeback.
583 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
584 // Here we reset the timing of the packet.
585 pkt->headerDelay = pkt->payloadDelay = 0;
586
587 // track time of availability of next prefetch, if any
588 Tick next_pf_time = MaxTick;
589
590 bool needsResponse = pkt->needsResponse();
591
592 if (satisfied) {
593 // hit (for all other request types)
594
595 if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
596 if (blk)
597 blk->status &= ~BlkHWPrefetched;
598
599 // Don't notify on SWPrefetch
600 if (!pkt->cmd.isSWPrefetch())
601 next_pf_time = prefetcher->notify(pkt);
602 }
603
604 if (needsResponse) {
605 pkt->makeTimingResponse();
606 // @todo: Make someone pay for this
607 pkt->headerDelay = pkt->payloadDelay = 0;
608
609 // In this case we are considering request_time that takes
610 // into account the delay of the xbar, if any, and just
611 // lat, neglecting responseLatency, modelling hit latency
612 // just as lookupLatency or or the value of lat overriden
613 // by access(), that calls accessBlock() function.
614 cpuSidePort->schedTimingResp(pkt, request_time);
615 } else {
616 /// @todo nominally we should just delete the packet here,
617 /// however, until 4-phase stuff we can't because sending
618 /// cache is still relying on it
619 pendingDelete.push_back(pkt);
620 }
621 } else {
622 // miss
623
624 Addr blk_addr = blockAlign(pkt->getAddr());
625 MSHR *mshr = mshrQueue.findMatch(blk_addr, pkt->isSecure());
626
627 // Software prefetch handling:
628 // To keep the core from waiting on data it won't look at
629 // anyway, send back a response with dummy data. Miss handling
630 // will continue asynchronously. Unfortunately, the core will
631 // insist upon freeing original Packet/Request, so we have to
632 // create a new pair with a different lifecycle. Note that this
633 // processing happens before any MSHR munging on the behalf of
634 // this request because this new Request will be the one stored
635 // into the MSHRs, not the original.
636 if (pkt->cmd.isSWPrefetch() && isTopLevel) {
637 assert(needsResponse);
638 assert(pkt->req->hasPaddr());
639
640 // There's no reason to add a prefetch as an additional target
641 // to an existing MSHR. If an outstanding request is already
642 // in progress, there is nothing for the prefetch to do.
643 // If this is the case, we don't even create a request at all.
644 PacketPtr pf = nullptr;
645
646 if (!mshr) {
647 // copy the request and create a new SoftPFReq packet
648 RequestPtr req = new Request(pkt->req->getPaddr(),
649 pkt->req->getSize(),
650 pkt->req->getFlags(),
651 pkt->req->masterId());
652 pf = new Packet(req, pkt->cmd);
653 pf->allocate();
654 assert(pf->getAddr() == pkt->getAddr());
655 assert(pf->getSize() == pkt->getSize());
656 }
657
658 pkt->makeTimingResponse();
659 // for debugging, set all the bits in the response data
660 // (also keeps valgrind from complaining when debugging settings
661 // print out instruction results)
662 std::memset(pkt->getPtr<uint8_t>(), 0xFF, pkt->getSize());
663 // request_time is used here, taking into account lat and the delay
664 // charged if the packet comes from the xbar.
665 cpuSidePort->schedTimingResp(pkt, request_time);
666
667 // If an outstanding request is in progress (we found an
668 // MSHR) this is set to null
669 pkt = pf;
670 }
671
672 if (mshr) {
673 /// MSHR hit
674 /// @note writebacks will be checked in getNextMSHR()
675 /// for any conflicting requests to the same block
676
677 //@todo remove hw_pf here
678
679 // Coalesce unless it was a software prefetch (see above).
680 if (pkt) {
681 assert(pkt->req->masterId() < system->maxMasters());
682 mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
683 if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
684 mshr->threadNum = -1;
685 }
686 // We use forward_time here because it is the same
687 // considering new targets. We have multiple requests for the
688 // same address here. It specifies the latency to allocate an
689 // internal buffer and to schedule an event to the queued
690 // port and also takes into account the additional delay of
691 // the xbar.
692 mshr->allocateTarget(pkt, forward_time, order++);
693 if (mshr->getNumTargets() == numTarget) {
694 noTargetMSHR = mshr;
695 setBlocked(Blocked_NoTargets);
696 // need to be careful with this... if this mshr isn't
697 // ready yet (i.e. time > curTick()), we don't want to
698 // move it ahead of mshrs that are ready
699 // mshrQueue.moveToFront(mshr);
700 }
701
702 // We should call the prefetcher reguardless if the request is
703 // satisfied or not, reguardless if the request is in the MSHR or
704 // not. The request could be a ReadReq hit, but still not
705 // satisfied (potentially because of a prior write to the same
706 // cache line. So, even when not satisfied, tehre is an MSHR
707 // already allocated for this, we need to let the prefetcher know
708 // about the request
709 if (prefetcher) {
710 // Don't notify on SWPrefetch
711 if (!pkt->cmd.isSWPrefetch())
712 next_pf_time = prefetcher->notify(pkt);
713 }
714 }
715 } else {
716 // no MSHR
717 assert(pkt->req->masterId() < system->maxMasters());
718 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
719 // always mark as cache fill for now... if we implement
720 // no-write-allocate or bypass accesses this will have to
721 // be changed.
722 if (pkt->cmd == MemCmd::Writeback) {
723 // We use forward_time here because there is an
724 // uncached memory write, forwarded to WriteBuffer. It
725 // specifies the latency to allocate an internal buffer and to
726 // schedule an event to the queued port and also takes into
727 // account the additional delay of the xbar.
728 allocateWriteBuffer(pkt, forward_time, true);
729 } else {
730 if (blk && blk->isValid()) {
731 // If we have a write miss to a valid block, we
732 // need to mark the block non-readable. Otherwise
733 // if we allow reads while there's an outstanding
734 // write miss, the read could return stale data
735 // out of the cache block... a more aggressive
736 // system could detect the overlap (if any) and
737 // forward data out of the MSHRs, but we don't do
738 // that yet. Note that we do need to leave the
739 // block valid so that it stays in the cache, in
740 // case we get an upgrade response (and hence no
741 // new data) when the write miss completes.
742 // As long as CPUs do proper store/load forwarding
743 // internally, and have a sufficiently weak memory
744 // model, this is probably unnecessary, but at some
745 // point it must have seemed like we needed it...
746 assert(pkt->needsExclusive());
747 assert(!blk->isWritable());
748 blk->status &= ~BlkReadable;
749 }
750 // Here we are using forward_time, modelling the latency of
751 // a miss (outbound) just as forwardLatency, neglecting the
752 // lookupLatency component. In this case this latency value
753 // specifies the latency to allocate an internal buffer and to
754 // schedule an event to the queued port, when a cacheable miss
755 // is forwarded to MSHR queue.
756 // We take also into account the additional delay of the xbar.
757 allocateMissBuffer(pkt, forward_time, true);
758 }
759
760 if (prefetcher) {
761 // Don't notify on SWPrefetch
762 if (!pkt->cmd.isSWPrefetch())
763 next_pf_time = prefetcher->notify(pkt);
764 }
765 }
766 }
767 // Here we condiser just forward_time.
768 if (next_pf_time != MaxTick)
769 requestMemSideBus(Request_PF, std::max(clockEdge(forwardLatency),
770 next_pf_time));
771 // copy writebacks to write buffer
772 while (!writebacks.empty()) {
773 PacketPtr wbPkt = writebacks.front();
774 // We use forwardLatency here because we are copying writebacks
775 // to write buffer. It specifies the latency to allocate an internal
776 // buffer and to schedule an event to the queued port.
777 allocateWriteBuffer(wbPkt, forward_time, true);
778 writebacks.pop_front();
779 }
780
781 return true;
782 }
783
784
785 // See comment in cache.hh.
786 template<class TagStore>
787 PacketPtr
788 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
789 bool needsExclusive) const
790 {
791 bool blkValid = blk && blk->isValid();
792
793 if (cpu_pkt->req->isUncacheable()) {
794 //assert(blk == NULL);
795 return NULL;
796 }
797
798 if (!blkValid &&
799 (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
800 // Writebacks that weren't allocated in access() and upgrades
801 // from upper-level caches that missed completely just go
802 // through.
803 return NULL;
804 }
805
806 assert(cpu_pkt->needsResponse());
807
808 MemCmd cmd;
809 // @TODO make useUpgrades a parameter.
810 // Note that ownership protocols require upgrade, otherwise a
811 // write miss on a shared owned block will generate a ReadExcl,
812 // which will clobber the owned copy.
813 const bool useUpgrades = true;
814 if (blkValid && useUpgrades) {
815 // only reason to be here is that blk is shared
816 // (read-only) and we need exclusive
817 assert(needsExclusive);
818 assert(!blk->isWritable());
819 cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
820 } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
821 cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
822 // Even though this SC will fail, we still need to send out the
823 // request and get the data to supply it to other snoopers in the case
824 // where the determination the StoreCond fails is delayed due to
825 // all caches not being on the same local bus.
826 cmd = MemCmd::SCUpgradeFailReq;
827 } else if (cpu_pkt->isWriteInvalidate()) {
828 cmd = cpu_pkt->cmd;
829 } else {
830 // block is invalid
831 cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
832 }
833 PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
834
835 pkt->allocate();
836 DPRINTF(Cache, "%s created %s address %x size %d\n",
837 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
838 return pkt;
839 }
840
841
842 template<class TagStore>
843 Tick
844 Cache<TagStore>::recvAtomic(PacketPtr pkt)
845 {
846 // We are in atomic mode so we pay just for lookupLatency here.
847 Cycles lat = lookupLatency;
848 // @TODO: make this a parameter
849 bool last_level_cache = false;
850
851 // Forward the request if the system is in cache bypass mode.
852 if (system->bypassCaches())
853 return ticksToCycles(memSidePort->sendAtomic(pkt));
854
855 promoteWholeLineWrites(pkt);
856
857 if (pkt->memInhibitAsserted()) {
858 assert(!pkt->req->isUncacheable());
859 // have to invalidate ourselves and any lower caches even if
860 // upper cache will be responding
861 if (pkt->isInvalidate()) {
862 BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
863 if (blk && blk->isValid()) {
864 tags->invalidate(blk);
865 blk->invalidate();
866 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x (%s):"
867 " invalidating\n",
868 pkt->cmdString(), pkt->getAddr(),
869 pkt->isSecure() ? "s" : "ns");
870 }
871 if (!last_level_cache) {
872 DPRINTF(Cache, "forwarding mem-inhibited %s on 0x%x (%s)\n",
873 pkt->cmdString(), pkt->getAddr(),
874 pkt->isSecure() ? "s" : "ns");
875 lat += ticksToCycles(memSidePort->sendAtomic(pkt));
876 }
877 } else {
878 DPRINTF(Cache, "rcvd mem-inhibited %s on 0x%x: not responding\n",
879 pkt->cmdString(), pkt->getAddr());
880 }
881
882 return lat * clockPeriod();
883 }
884
885 // should assert here that there are no outstanding MSHRs or
886 // writebacks... that would mean that someone used an atomic
887 // access in timing mode
888
889 BlkType *blk = NULL;
890 PacketList writebacks;
891
892 if (!access(pkt, blk, lat, writebacks)) {
893 // MISS
894
895 PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
896
897 bool is_forward = (bus_pkt == NULL);
898
899 if (is_forward) {
900 // just forwarding the same request to the next level
901 // no local cache operation involved
902 bus_pkt = pkt;
903 }
904
905 DPRINTF(Cache, "Sending an atomic %s for %x (%s)\n",
906 bus_pkt->cmdString(), bus_pkt->getAddr(),
907 bus_pkt->isSecure() ? "s" : "ns");
908
909 #if TRACING_ON
910 CacheBlk::State old_state = blk ? blk->status : 0;
911 #endif
912
913 lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
914
915 DPRINTF(Cache, "Receive response: %s for addr %x (%s) in state %i\n",
916 bus_pkt->cmdString(), bus_pkt->getAddr(),
917 bus_pkt->isSecure() ? "s" : "ns",
918 old_state);
919
920 // If packet was a forward, the response (if any) is already
921 // in place in the bus_pkt == pkt structure, so we don't need
922 // to do anything. Otherwise, use the separate bus_pkt to
923 // generate response to pkt and then delete it.
924 if (!is_forward) {
925 if (pkt->needsResponse()) {
926 assert(bus_pkt->isResponse());
927 if (bus_pkt->isError()) {
928 pkt->makeAtomicResponse();
929 pkt->copyError(bus_pkt);
930 } else if (pkt->isWriteInvalidate()) {
931 // note the use of pkt, not bus_pkt here.
932 if (isTopLevel) {
933 blk = handleFill(pkt, blk, writebacks);
934 satisfyCpuSideRequest(pkt, blk);
935 } else if (blk) {
936 satisfyCpuSideRequest(pkt, blk);
937 }
938 } else if (bus_pkt->isRead() ||
939 bus_pkt->cmd == MemCmd::UpgradeResp) {
940 // we're updating cache state to allow us to
941 // satisfy the upstream request from the cache
942 blk = handleFill(bus_pkt, blk, writebacks);
943 satisfyCpuSideRequest(pkt, blk);
944 } else {
945 // we're satisfying the upstream request without
946 // modifying cache state, e.g., a write-through
947 pkt->makeAtomicResponse();
948 }
949 }
950 delete bus_pkt;
951 }
952 }
953
954 // Note that we don't invoke the prefetcher at all in atomic mode.
955 // It's not clear how to do it properly, particularly for
956 // prefetchers that aggressively generate prefetch candidates and
957 // rely on bandwidth contention to throttle them; these will tend
958 // to pollute the cache in atomic mode since there is no bandwidth
959 // contention. If we ever do want to enable prefetching in atomic
960 // mode, though, this is the place to do it... see timingAccess()
961 // for an example (though we'd want to issue the prefetch(es)
962 // immediately rather than calling requestMemSideBus() as we do
963 // there).
964
965 // Handle writebacks if needed
966 while (!writebacks.empty()){
967 PacketPtr wbPkt = writebacks.front();
968 memSidePort->sendAtomic(wbPkt);
969 writebacks.pop_front();
970 delete wbPkt;
971 }
972
973 if (pkt->needsResponse()) {
974 pkt->makeAtomicResponse();
975 }
976
977 return lat * clockPeriod();
978 }
979
980
981 template<class TagStore>
982 void
983 Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
984 {
985 if (system->bypassCaches()) {
986 // Packets from the memory side are snoop request and
987 // shouldn't happen in bypass mode.
988 assert(fromCpuSide);
989
990 // The cache should be flushed if we are in cache bypass mode,
991 // so we don't need to check if we need to update anything.
992 memSidePort->sendFunctional(pkt);
993 return;
994 }
995
996 Addr blk_addr = blockAlign(pkt->getAddr());
997 bool is_secure = pkt->isSecure();
998 BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure);
999 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1000
1001 pkt->pushLabel(name());
1002
1003 CacheBlkPrintWrapper cbpw(blk);
1004
1005 // Note that just because an L2/L3 has valid data doesn't mean an
1006 // L1 doesn't have a more up-to-date modified copy that still
1007 // needs to be found. As a result we always update the request if
1008 // we have it, but only declare it satisfied if we are the owner.
1009
1010 // see if we have data at all (owned or otherwise)
1011 bool have_data = blk && blk->isValid()
1012 && pkt->checkFunctional(&cbpw, blk_addr, is_secure, blkSize,
1013 blk->data);
1014
1015 // data we have is dirty if marked as such or if valid & ownership
1016 // pending due to outstanding UpgradeReq
1017 bool have_dirty =
1018 have_data && (blk->isDirty() ||
1019 (mshr && mshr->inService && mshr->isPendingDirty()));
1020
1021 bool done = have_dirty
1022 || cpuSidePort->checkFunctional(pkt)
1023 || mshrQueue.checkFunctional(pkt, blk_addr)
1024 || writeBuffer.checkFunctional(pkt, blk_addr)
1025 || memSidePort->checkFunctional(pkt);
1026
1027 DPRINTF(Cache, "functional %s %x (%s) %s%s%s\n",
1028 pkt->cmdString(), pkt->getAddr(), is_secure ? "s" : "ns",
1029 (blk && blk->isValid()) ? "valid " : "",
1030 have_data ? "data " : "", done ? "done " : "");
1031
1032 // We're leaving the cache, so pop cache->name() label
1033 pkt->popLabel();
1034
1035 if (done) {
1036 pkt->makeResponse();
1037 } else {
1038 // if it came as a request from the CPU side then make sure it
1039 // continues towards the memory side
1040 if (fromCpuSide) {
1041 memSidePort->sendFunctional(pkt);
1042 } else if (forwardSnoops && cpuSidePort->isSnooping()) {
1043 // if it came from the memory side, it must be a snoop request
1044 // and we should only forward it if we are forwarding snoops
1045 cpuSidePort->sendFunctionalSnoop(pkt);
1046 }
1047 }
1048 }
1049
1050
1051 /////////////////////////////////////////////////////
1052 //
1053 // Response handling: responses from the memory side
1054 //
1055 /////////////////////////////////////////////////////
1056
1057
1058 template<class TagStore>
1059 void
1060 Cache<TagStore>::recvTimingResp(PacketPtr pkt)
1061 {
1062 assert(pkt->isResponse());
1063
1064 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
1065 bool is_error = pkt->isError();
1066
1067 assert(mshr);
1068
1069 if (is_error) {
1070 DPRINTF(Cache, "Cache received packet with error for address %x (%s), "
1071 "cmd: %s\n", pkt->getAddr(), pkt->isSecure() ? "s" : "ns",
1072 pkt->cmdString());
1073 }
1074
1075 DPRINTF(Cache, "Handling response to %s for address %x (%s)\n",
1076 pkt->cmdString(), pkt->getAddr(), pkt->isSecure() ? "s" : "ns");
1077
1078 MSHRQueue *mq = mshr->queue;
1079 bool wasFull = mq->isFull();
1080
1081 if (mshr == noTargetMSHR) {
1082 // we always clear at least one target
1083 clearBlocked(Blocked_NoTargets);
1084 noTargetMSHR = NULL;
1085 }
1086
1087 // Initial target is used just for stats
1088 MSHR::Target *initial_tgt = mshr->getTarget();
1089 BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1090 int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
1091 Tick miss_latency = curTick() - initial_tgt->recvTime;
1092 PacketList writebacks;
1093 // We need forward_time here because we have a call of
1094 // allocateWriteBuffer() that need this parameter to specify the
1095 // time to request the bus. In this case we use forward latency
1096 // because there is a writeback. We pay also here for headerDelay
1097 // that is charged of bus latencies if the packet comes from the
1098 // bus.
1099 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1100
1101 if (pkt->req->isUncacheable()) {
1102 assert(pkt->req->masterId() < system->maxMasters());
1103 mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
1104 miss_latency;
1105 } else {
1106 assert(pkt->req->masterId() < system->maxMasters());
1107 mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
1108 miss_latency;
1109 }
1110
1111 bool is_fill = !mshr->isForward &&
1112 (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
1113
1114 if (is_fill && !is_error) {
1115 DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
1116 pkt->getAddr());
1117
1118 // give mshr a chance to do some dirty work
1119 mshr->handleFill(pkt, blk);
1120
1121 blk = handleFill(pkt, blk, writebacks);
1122 assert(blk != NULL);
1123 }
1124
1125 // First offset for critical word first calculations
1126 int initial_offset = 0;
1127
1128 if (mshr->hasTargets()) {
1129 initial_offset = mshr->getTarget()->pkt->getOffset(blkSize);
1130 }
1131
1132 while (mshr->hasTargets()) {
1133 MSHR::Target *target = mshr->getTarget();
1134
1135 switch (target->source) {
1136 case MSHR::Target::FromCPU:
1137 Tick completion_time;
1138 // Here we charge on completion_time the delay of the xbar if the
1139 // packet comes from it, charged on headerDelay.
1140 completion_time = pkt->headerDelay;
1141
1142 // Software prefetch handling for cache closest to core
1143 if (target->pkt->cmd.isSWPrefetch() && isTopLevel) {
1144 // a software prefetch would have already been ack'd immediately
1145 // with dummy data so the core would be able to retire it.
1146 // this request completes right here, so we deallocate it.
1147 delete target->pkt->req;
1148 delete target->pkt;
1149 break; // skip response
1150 }
1151
1152 // unlike the other packet flows, where data is found in other
1153 // caches or memory and brought back, write invalidates always
1154 // have the data right away, so the above check for "is fill?"
1155 // cannot actually be determined until examining the stored MSHR
1156 // state. We "catch up" with that logic here, which is duplicated
1157 // from above.
1158 if (target->pkt->isWriteInvalidate() && isTopLevel) {
1159 assert(!is_error);
1160
1161 // NB: we use the original packet here and not the response!
1162 mshr->handleFill(target->pkt, blk);
1163 blk = handleFill(target->pkt, blk, writebacks);
1164 assert(blk != NULL);
1165
1166 is_fill = true;
1167 }
1168
1169 if (is_fill) {
1170 satisfyCpuSideRequest(target->pkt, blk,
1171 true, mshr->hasPostDowngrade());
1172
1173 // How many bytes past the first request is this one
1174 int transfer_offset =
1175 target->pkt->getOffset(blkSize) - initial_offset;
1176 if (transfer_offset < 0) {
1177 transfer_offset += blkSize;
1178 }
1179
1180 // If not critical word (offset) return payloadDelay.
1181 // responseLatency is the latency of the return path
1182 // from lower level caches/memory to an upper level cache or
1183 // the core.
1184 completion_time += clockEdge(responseLatency) +
1185 (transfer_offset ? pkt->payloadDelay : 0);
1186
1187 assert(!target->pkt->req->isUncacheable());
1188
1189 assert(target->pkt->req->masterId() < system->maxMasters());
1190 missLatency[target->pkt->cmdToIndex()][target->pkt->req->masterId()] +=
1191 completion_time - target->recvTime;
1192 } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
1193 // failed StoreCond upgrade
1194 assert(target->pkt->cmd == MemCmd::StoreCondReq ||
1195 target->pkt->cmd == MemCmd::StoreCondFailReq ||
1196 target->pkt->cmd == MemCmd::SCUpgradeFailReq);
1197 // responseLatency is the latency of the return path
1198 // from lower level caches/memory to an upper level cache or
1199 // the core.
1200 completion_time += clockEdge(responseLatency) +
1201 pkt->payloadDelay;
1202 target->pkt->req->setExtraData(0);
1203 } else {
1204 // not a cache fill, just forwarding response
1205 // responseLatency is the latency of the return path
1206 // from lower level cahces/memory to the core.
1207 completion_time += clockEdge(responseLatency) +
1208 pkt->payloadDelay;
1209 if (pkt->isRead() && !is_error) {
1210 target->pkt->setData(pkt->getConstPtr<uint8_t>());
1211 }
1212 }
1213 target->pkt->makeTimingResponse();
1214 // if this packet is an error copy that to the new packet
1215 if (is_error)
1216 target->pkt->copyError(pkt);
1217 if (target->pkt->cmd == MemCmd::ReadResp &&
1218 (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
1219 // If intermediate cache got ReadRespWithInvalidate,
1220 // propagate that. Response should not have
1221 // isInvalidate() set otherwise.
1222 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
1223 DPRINTF(Cache, "%s updated cmd to %s for address %x\n",
1224 __func__, target->pkt->cmdString(),
1225 target->pkt->getAddr());
1226 }
1227 // Reset the bus additional time as it is now accounted for
1228 target->pkt->headerDelay = target->pkt->payloadDelay = 0;
1229 cpuSidePort->schedTimingResp(target->pkt, completion_time);
1230 break;
1231
1232 case MSHR::Target::FromPrefetcher:
1233 assert(target->pkt->cmd == MemCmd::HardPFReq);
1234 if (blk)
1235 blk->status |= BlkHWPrefetched;
1236 delete target->pkt->req;
1237 delete target->pkt;
1238 break;
1239
1240 case MSHR::Target::FromSnoop:
1241 // I don't believe that a snoop can be in an error state
1242 assert(!is_error);
1243 // response to snoop request
1244 DPRINTF(Cache, "processing deferred snoop...\n");
1245 assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
1246 handleSnoop(target->pkt, blk, true, true,
1247 mshr->hasPostInvalidate());
1248 break;
1249
1250 default:
1251 panic("Illegal target->source enum %d\n", target->source);
1252 }
1253
1254 mshr->popTarget();
1255 }
1256
1257 if (blk && blk->isValid()) {
1258 if ((pkt->isInvalidate() || mshr->hasPostInvalidate()) &&
1259 (!pkt->isWriteInvalidate() || !isTopLevel)) {
1260 assert(blk != tempBlock);
1261 tags->invalidate(blk);
1262 blk->invalidate();
1263 } else if (mshr->hasPostDowngrade()) {
1264 blk->status &= ~BlkWritable;
1265 }
1266 }
1267
1268 if (mshr->promoteDeferredTargets()) {
1269 // avoid later read getting stale data while write miss is
1270 // outstanding.. see comment in timingAccess()
1271 if (blk) {
1272 blk->status &= ~BlkReadable;
1273 }
1274 mq = mshr->queue;
1275 mq->markPending(mshr);
1276 requestMemSideBus((RequestCause)mq->index, clockEdge() +
1277 pkt->payloadDelay);
1278 } else {
1279 mq->deallocate(mshr);
1280 if (wasFull && !mq->isFull()) {
1281 clearBlocked((BlockedCause)mq->index);
1282 }
1283
1284 // Request the bus for a prefetch if this deallocation freed enough
1285 // MSHRs for a prefetch to take place
1286 if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) {
1287 Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
1288 curTick());
1289 if (next_pf_time != MaxTick)
1290 requestMemSideBus(Request_PF, next_pf_time);
1291 }
1292 }
1293 // reset the xbar additional timinig as it is now accounted for
1294 pkt->headerDelay = pkt->payloadDelay = 0;
1295
1296 // copy writebacks to write buffer
1297 while (!writebacks.empty()) {
1298 PacketPtr wbPkt = writebacks.front();
1299 allocateWriteBuffer(wbPkt, clockEdge(forwardLatency), true);
1300 writebacks.pop_front();
1301 }
1302 // if we used temp block, clear it out
1303 if (blk == tempBlock) {
1304 if (blk->isDirty()) {
1305 // We use forwardLatency here because we are copying
1306 // writebacks to write buffer. It specifies the latency to
1307 // allocate an internal buffer and to schedule an event to the
1308 // queued port.
1309 allocateWriteBuffer(writebackBlk(blk), forward_time, true);
1310 }
1311 blk->invalidate();
1312 }
1313
1314 DPRINTF(Cache, "Leaving %s with %s for address %x\n", __func__,
1315 pkt->cmdString(), pkt->getAddr());
1316 delete pkt;
1317 }
1318
1319
1320
1321
1322 template<class TagStore>
1323 PacketPtr
1324 Cache<TagStore>::writebackBlk(BlkType *blk)
1325 {
1326 assert(blk && blk->isValid() && blk->isDirty());
1327
1328 writebacks[Request::wbMasterId]++;
1329
1330 Request *writebackReq =
1331 new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
1332 Request::wbMasterId);
1333 if (blk->isSecure())
1334 writebackReq->setFlags(Request::SECURE);
1335
1336 writebackReq->taskId(blk->task_id);
1337 blk->task_id= ContextSwitchTaskId::Unknown;
1338 blk->tickInserted = curTick();
1339
1340 PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback);
1341 if (blk->isWritable()) {
1342 writeback->setSupplyExclusive();
1343 }
1344 writeback->allocate();
1345 std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
1346
1347 blk->status &= ~BlkDirty;
1348 return writeback;
1349 }
1350
1351 template<class TagStore>
1352 void
1353 Cache<TagStore>::memWriteback()
1354 {
1355 WrappedBlkVisitor visitor(*this, &Cache<TagStore>::writebackVisitor);
1356 tags->forEachBlk(visitor);
1357 }
1358
1359 template<class TagStore>
1360 void
1361 Cache<TagStore>::memInvalidate()
1362 {
1363 WrappedBlkVisitor visitor(*this, &Cache<TagStore>::invalidateVisitor);
1364 tags->forEachBlk(visitor);
1365 }
1366
1367 template<class TagStore>
1368 bool
1369 Cache<TagStore>::isDirty() const
1370 {
1371 CacheBlkIsDirtyVisitor<BlkType> visitor;
1372 tags->forEachBlk(visitor);
1373
1374 return visitor.isDirty();
1375 }
1376
1377 template<class TagStore>
1378 bool
1379 Cache<TagStore>::writebackVisitor(BlkType &blk)
1380 {
1381 if (blk.isDirty()) {
1382 assert(blk.isValid());
1383
1384 Request request(tags->regenerateBlkAddr(blk.tag, blk.set),
1385 blkSize, 0, Request::funcMasterId);
1386 request.taskId(blk.task_id);
1387
1388 Packet packet(&request, MemCmd::WriteReq);
1389 packet.dataStatic(blk.data);
1390
1391 memSidePort->sendFunctional(&packet);
1392
1393 blk.status &= ~BlkDirty;
1394 }
1395
1396 return true;
1397 }
1398
1399 template<class TagStore>
1400 bool
1401 Cache<TagStore>::invalidateVisitor(BlkType &blk)
1402 {
1403
1404 if (blk.isDirty())
1405 warn_once("Invalidating dirty cache lines. Expect things to break.\n");
1406
1407 if (blk.isValid()) {
1408 assert(!blk.isDirty());
1409 tags->invalidate(dynamic_cast< BlkType *>(&blk));
1410 blk.invalidate();
1411 }
1412
1413 return true;
1414 }
1415
1416 template<class TagStore>
1417 void
1418 Cache<TagStore>::uncacheableFlush(PacketPtr pkt)
1419 {
1420 DPRINTF(Cache, "%s%s %x uncacheable\n", pkt->cmdString(),
1421 pkt->req->isInstFetch() ? " (ifetch)" : "",
1422 pkt->getAddr());
1423
1424 if (pkt->req->isClearLL())
1425 tags->clearLocks();
1426
1427 BlkType *blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
1428 if (blk) {
1429 writebackVisitor(*blk);
1430 invalidateVisitor(*blk);
1431 }
1432 }
1433
1434
1435 template<class TagStore>
1436 typename Cache<TagStore>::BlkType*
1437 Cache<TagStore>::allocateBlock(Addr addr, bool is_secure,
1438 PacketList &writebacks)
1439 {
1440 BlkType *blk = tags->findVictim(addr);
1441
1442 if (blk->isValid()) {
1443 Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
1444 MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
1445 if (repl_mshr) {
1446 // must be an outstanding upgrade request
1447 // on a block we're about to replace...
1448 assert(!blk->isWritable() || blk->isDirty());
1449 assert(repl_mshr->needsExclusive());
1450 // too hard to replace block with transient state
1451 // allocation failed, block not inserted
1452 return NULL;
1453 } else {
1454 DPRINTF(Cache, "replacement: replacing %x (%s) with %x (%s): %s\n",
1455 repl_addr, blk->isSecure() ? "s" : "ns",
1456 addr, is_secure ? "s" : "ns",
1457 blk->isDirty() ? "writeback" : "clean");
1458
1459 if (blk->isDirty()) {
1460 // Save writeback packet for handling by caller
1461 writebacks.push_back(writebackBlk(blk));
1462 }
1463 }
1464 }
1465
1466 return blk;
1467 }
1468
1469
1470 // Note that the reason we return a list of writebacks rather than
1471 // inserting them directly in the write buffer is that this function
1472 // is called by both atomic and timing-mode accesses, and in atomic
1473 // mode we don't mess with the write buffer (we just perform the
1474 // writebacks atomically once the original request is complete).
1475 template<class TagStore>
1476 typename Cache<TagStore>::BlkType*
1477 Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
1478 PacketList &writebacks)
1479 {
1480 assert(pkt->isResponse() || pkt->isWriteInvalidate());
1481 Addr addr = pkt->getAddr();
1482 bool is_secure = pkt->isSecure();
1483 #if TRACING_ON
1484 CacheBlk::State old_state = blk ? blk->status : 0;
1485 #endif
1486
1487 if (blk == NULL) {
1488 // better have read new data...
1489 assert(pkt->hasData());
1490
1491 // only read responses and (original) write invalidate req's have data;
1492 // note that we don't write the data here for write invalidate - that
1493 // happens in the subsequent satisfyCpuSideRequest.
1494 assert(pkt->isRead() || pkt->isWriteInvalidate());
1495
1496 // need to do a replacement
1497 blk = allocateBlock(addr, is_secure, writebacks);
1498 if (blk == NULL) {
1499 // No replaceable block... just use temporary storage to
1500 // complete the current request and then get rid of it
1501 assert(!tempBlock->isValid());
1502 blk = tempBlock;
1503 tempBlock->set = tags->extractSet(addr);
1504 tempBlock->tag = tags->extractTag(addr);
1505 // @todo: set security state as well...
1506 DPRINTF(Cache, "using temp block for %x (%s)\n", addr,
1507 is_secure ? "s" : "ns");
1508 } else {
1509 tags->insertBlock(pkt, blk);
1510 }
1511
1512 // we should never be overwriting a valid block
1513 assert(!blk->isValid());
1514 } else {
1515 // existing block... probably an upgrade
1516 assert(blk->tag == tags->extractTag(addr));
1517 // either we're getting new data or the block should already be valid
1518 assert(pkt->hasData() || blk->isValid());
1519 // don't clear block status... if block is already dirty we
1520 // don't want to lose that
1521 }
1522
1523 if (is_secure)
1524 blk->status |= BlkSecure;
1525 blk->status |= BlkValid | BlkReadable;
1526
1527 if (!pkt->sharedAsserted()) {
1528 blk->status |= BlkWritable;
1529 // If we got this via cache-to-cache transfer (i.e., from a
1530 // cache that was an owner) and took away that owner's copy,
1531 // then we need to write it back. Normally this happens
1532 // anyway as a side effect of getting a copy to write it, but
1533 // there are cases (such as failed store conditionals or
1534 // compare-and-swaps) where we'll demand an exclusive copy but
1535 // end up not writing it.
1536 if (pkt->memInhibitAsserted())
1537 blk->status |= BlkDirty;
1538 }
1539
1540 DPRINTF(Cache, "Block addr %x (%s) moving from state %x to %s\n",
1541 addr, is_secure ? "s" : "ns", old_state, blk->print());
1542
1543 // if we got new data, copy it in (checking for a read response
1544 // and a response that has data is the same in the end)
1545 if (pkt->isRead()) {
1546 assert(pkt->hasData());
1547 std::memcpy(blk->data, pkt->getConstPtr<uint8_t>(), blkSize);
1548 }
1549 // We pay for fillLatency here.
1550 blk->whenReady = clockEdge() + fillLatency * clockPeriod() +
1551 pkt->payloadDelay;
1552
1553 return blk;
1554 }
1555
1556
1557 /////////////////////////////////////////////////////
1558 //
1559 // Snoop path: requests coming in from the memory side
1560 //
1561 /////////////////////////////////////////////////////
1562
1563 template<class TagStore>
1564 void
1565 Cache<TagStore>::
1566 doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
1567 bool already_copied, bool pending_inval)
1568 {
1569 // sanity check
1570 assert(req_pkt->isRequest());
1571 assert(req_pkt->needsResponse());
1572
1573 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
1574 req_pkt->cmdString(), req_pkt->getAddr(), req_pkt->getSize());
1575 // timing-mode snoop responses require a new packet, unless we
1576 // already made a copy...
1577 PacketPtr pkt = req_pkt;
1578 if (!already_copied)
1579 // do not clear flags, and allocate space for data if the
1580 // packet needs it (the only packets that carry data are read
1581 // responses)
1582 pkt = new Packet(req_pkt, false, req_pkt->isRead());
1583
1584 assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
1585 pkt->makeTimingResponse();
1586 if (pkt->isRead()) {
1587 pkt->setDataFromBlock(blk_data, blkSize);
1588 }
1589 if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
1590 // Assume we defer a response to a read from a far-away cache
1591 // A, then later defer a ReadExcl from a cache B on the same
1592 // bus as us. We'll assert MemInhibit in both cases, but in
1593 // the latter case MemInhibit will keep the invalidation from
1594 // reaching cache A. This special response tells cache A that
1595 // it gets the block to satisfy its read, but must immediately
1596 // invalidate it.
1597 pkt->cmd = MemCmd::ReadRespWithInvalidate;
1598 }
1599 DPRINTF(Cache, "%s created response: %s address %x size %d\n",
1600 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1601 // Here we condiser forward_time, paying for just forward latency and
1602 // also charging the delay provided by the xbar.
1603 // forward_time is used as send_time in next allocateWriteBuffer().
1604 Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
1605 // Here we reset the timing of the packet.
1606 pkt->headerDelay = pkt->payloadDelay = 0;
1607 memSidePort->schedTimingSnoopResp(pkt, forward_time);
1608 }
1609
1610 template<class TagStore>
1611 void
1612 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
1613 bool is_timing, bool is_deferred,
1614 bool pending_inval)
1615 {
1616 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
1617 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1618 // deferred snoops can only happen in timing mode
1619 assert(!(is_deferred && !is_timing));
1620 // pending_inval only makes sense on deferred snoops
1621 assert(!(pending_inval && !is_deferred));
1622 assert(pkt->isRequest());
1623
1624 // the packet may get modified if we or a forwarded snooper
1625 // responds in atomic mode, so remember a few things about the
1626 // original packet up front
1627 bool invalidate = pkt->isInvalidate();
1628 bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
1629
1630 if (forwardSnoops) {
1631 // first propagate snoop upward to see if anyone above us wants to
1632 // handle it. save & restore packet src since it will get
1633 // rewritten to be relative to cpu-side bus (if any)
1634 bool alreadyResponded = pkt->memInhibitAsserted();
1635 if (is_timing) {
1636 Packet snoopPkt(pkt, true, false); // clear flags, no allocation
1637 snoopPkt.setExpressSnoop();
1638 snoopPkt.pushSenderState(new ForwardResponseRecord());
1639 // the snoop packet does not need to wait any additional
1640 // time
1641 snoopPkt.headerDelay = snoopPkt.payloadDelay = 0;
1642 cpuSidePort->sendTimingSnoopReq(&snoopPkt);
1643 if (snoopPkt.memInhibitAsserted()) {
1644 // cache-to-cache response from some upper cache
1645 assert(!alreadyResponded);
1646 pkt->assertMemInhibit();
1647 } else {
1648 delete snoopPkt.popSenderState();
1649 }
1650 if (snoopPkt.sharedAsserted()) {
1651 pkt->assertShared();
1652 }
1653 // If this request is a prefetch and an
1654 // upper level squashes the prefetch request,
1655 // make sure to propogate the squash to the requester.
1656 if (snoopPkt.prefetchSquashed()) {
1657 pkt->setPrefetchSquashed();
1658 }
1659 } else {
1660 cpuSidePort->sendAtomicSnoop(pkt);
1661 if (!alreadyResponded && pkt->memInhibitAsserted()) {
1662 // cache-to-cache response from some upper cache:
1663 // forward response to original requester
1664 assert(pkt->isResponse());
1665 }
1666 }
1667 }
1668
1669 if (!blk || !blk->isValid()) {
1670 DPRINTF(Cache, "%s snoop miss for %s address %x size %d\n",
1671 __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1672 return;
1673 } else {
1674 DPRINTF(Cache, "%s snoop hit for %s for address %x size %d, "
1675 "old state is %s\n", __func__, pkt->cmdString(),
1676 pkt->getAddr(), pkt->getSize(), blk->print());
1677 }
1678
1679 // we may end up modifying both the block state and the packet (if
1680 // we respond in atomic mode), so just figure out what to do now
1681 // and then do it later. If we find dirty data while snooping for a
1682 // WriteInvalidate, we don't care, since no merging needs to take place.
1683 // We need the eviction to happen as normal, but the data needn't be
1684 // sent anywhere. nor should the writeback be inhibited at the memory
1685 // controller for any reason.
1686 bool respond = blk->isDirty() && pkt->needsResponse()
1687 && !pkt->isWriteInvalidate();
1688 bool have_exclusive = blk->isWritable();
1689
1690 // Invalidate any prefetch's from below that would strip write permissions
1691 // MemCmd::HardPFReq is only observed by upstream caches. After missing
1692 // above and in it's own cache, a new MemCmd::ReadReq is created that
1693 // downstream caches observe.
1694 if (pkt->cmd == MemCmd::HardPFReq) {
1695 DPRINTF(Cache, "Squashing prefetch from lower cache %#x\n",
1696 pkt->getAddr());
1697 pkt->setPrefetchSquashed();
1698 return;
1699 }
1700
1701 if (pkt->isRead() && !invalidate) {
1702 assert(!needs_exclusive);
1703 pkt->assertShared();
1704 int bits_to_clear = BlkWritable;
1705 const bool haveOwnershipState = true; // for now
1706 if (!haveOwnershipState) {
1707 // if we don't support pure ownership (dirty && !writable),
1708 // have to clear dirty bit here, assume memory snarfs data
1709 // on cache-to-cache xfer
1710 bits_to_clear |= BlkDirty;
1711 }
1712 blk->status &= ~bits_to_clear;
1713 }
1714
1715 if (respond) {
1716 // prevent anyone else from responding, cache as well as
1717 // memory, and also prevent any memory from even seeing the
1718 // request (with current inhibited semantics), note that this
1719 // applies both to reads and writes and that for writes it
1720 // works thanks to the fact that we still have dirty data and
1721 // will write it back at a later point
1722 pkt->assertMemInhibit();
1723 if (have_exclusive) {
1724 pkt->setSupplyExclusive();
1725 }
1726 if (is_timing) {
1727 doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
1728 } else {
1729 pkt->makeAtomicResponse();
1730 pkt->setDataFromBlock(blk->data, blkSize);
1731 }
1732 } else if (is_timing && is_deferred) {
1733 // if it's a deferred timing snoop then we've made a copy of
1734 // the packet, and so if we're not using that copy to respond
1735 // then we need to delete it here.
1736 delete pkt;
1737 }
1738
1739 // Do this last in case it deallocates block data or something
1740 // like that
1741 if (invalidate) {
1742 if (blk != tempBlock)
1743 tags->invalidate(blk);
1744 blk->invalidate();
1745 }
1746
1747 DPRINTF(Cache, "new state is %s\n", blk->print());
1748 }
1749
1750
1751 template<class TagStore>
1752 void
1753 Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
1754 {
1755 DPRINTF(Cache, "%s for %s address %x size %d\n", __func__,
1756 pkt->cmdString(), pkt->getAddr(), pkt->getSize());
1757
1758 // Snoops shouldn't happen when bypassing caches
1759 assert(!system->bypassCaches());
1760
1761 // check if the packet is for an address range covered by this
1762 // cache, partly to not waste time looking for it, but also to
1763 // ensure that we only forward the snoop upwards if it is within
1764 // our address ranges
1765 bool in_range = false;
1766 for (AddrRangeList::const_iterator r = addrRanges.begin();
1767 r != addrRanges.end(); ++r) {
1768 if (r->contains(pkt->getAddr())) {
1769 in_range = true;
1770 break;
1771 }
1772 }
1773
1774 // Note that some deferred snoops don't have requests, since the
1775 // original access may have already completed
1776 if ((pkt->req && pkt->req->isUncacheable()) ||
1777 pkt->cmd == MemCmd::Writeback || !in_range) {
1778 //Can't get a hit on an uncacheable address
1779 //Revisit this for multi level coherence
1780 return;
1781 }
1782
1783 bool is_secure = pkt->isSecure();
1784 BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure);
1785
1786 Addr blk_addr = blockAlign(pkt->getAddr());
1787 MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
1788
1789 // Squash any prefetch requests from below on MSHR hits
1790 if (mshr && pkt->cmd == MemCmd::HardPFReq) {
1791 DPRINTF(Cache, "Squashing prefetch from lower cache on mshr hit %#x\n",
1792 pkt->getAddr());
1793 pkt->setPrefetchSquashed();
1794 return;
1795 }
1796
1797 // Let the MSHR itself track the snoop and decide whether we want
1798 // to go ahead and do the regular cache snoop
1799 if (mshr && mshr->handleSnoop(pkt, order++)) {
1800 DPRINTF(Cache, "Deferring snoop on in-service MSHR to blk %x (%s)."
1801 "mshrs: %s\n", blk_addr, is_secure ? "s" : "ns",
1802 mshr->print());
1803
1804 if (mshr->getNumTargets() > numTarget)
1805 warn("allocating bonus target for snoop"); //handle later
1806 return;
1807 }
1808
1809 //We also need to check the writeback buffers and handle those
1810 std::vector<MSHR *> writebacks;
1811 if (writeBuffer.findMatches(blk_addr, is_secure, writebacks)) {
1812 DPRINTF(Cache, "Snoop hit in writeback to addr: %x (%s)\n",
1813 pkt->getAddr(), is_secure ? "s" : "ns");
1814
1815 //Look through writebacks for any non-uncachable writes, use that
1816 if (writebacks.size()) {
1817 // We should only ever find a single match
1818 assert(writebacks.size() == 1);
1819 mshr = writebacks[0];
1820 assert(!mshr->isUncacheable());
1821 assert(mshr->getNumTargets() == 1);
1822 PacketPtr wb_pkt = mshr->getTarget()->pkt;
1823 assert(wb_pkt->cmd == MemCmd::Writeback);
1824
1825 assert(!pkt->memInhibitAsserted());
1826 pkt->assertMemInhibit();
1827 if (!pkt->needsExclusive()) {
1828 pkt->assertShared();
1829 // the writeback is no longer the exclusive copy in the system
1830 wb_pkt->clearSupplyExclusive();
1831 } else {
1832 // if we're not asserting the shared line, we need to
1833 // invalidate our copy. we'll do that below as long as
1834 // the packet's invalidate flag is set...
1835 assert(pkt->isInvalidate());
1836 }
1837 doTimingSupplyResponse(pkt, wb_pkt->getConstPtr<uint8_t>(),
1838 false, false);
1839
1840 if (pkt->isInvalidate()) {
1841 // Invalidation trumps our writeback... discard here
1842 markInService(mshr, false);
1843 delete wb_pkt;
1844 }
1845 } // writebacks.size()
1846 }
1847
1848 // If this was a shared writeback, there may still be
1849 // other shared copies above that require invalidation.
1850 // We could be more selective and return here if the
1851 // request is non-exclusive or if the writeback is
1852 // exclusive.
1853 handleSnoop(pkt, blk, true, false, false);
1854 }
1855
1856 template<class TagStore>
1857 bool
1858 Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
1859 {
1860 // Express snoop responses from master to slave, e.g., from L1 to L2
1861 cache->recvTimingSnoopResp(pkt);
1862 return true;
1863 }
1864
1865 template<class TagStore>
1866 Tick
1867 Cache<TagStore>::recvAtomicSnoop(PacketPtr pkt)
1868 {
1869 // Snoops shouldn't happen when bypassing caches
1870 assert(!system->bypassCaches());
1871
1872 if (pkt->req->isUncacheable() || pkt->cmd == MemCmd::Writeback) {
1873 // Can't get a hit on an uncacheable address
1874 // Revisit this for multi level coherence
1875 return 0;
1876 }
1877
1878 BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
1879 handleSnoop(pkt, blk, false, false, false);
1880 // We consider forwardLatency here because a snoop occurs in atomic mode
1881 return forwardLatency * clockPeriod();
1882 }
1883
1884
1885 template<class TagStore>
1886 MSHR *
1887 Cache<TagStore>::getNextMSHR()
1888 {
1889 // Check both MSHR queue and write buffer for potential requests
1890 MSHR *miss_mshr = mshrQueue.getNextMSHR();
1891 MSHR *write_mshr = writeBuffer.getNextMSHR();
1892
1893 // Now figure out which one to send... some cases are easy
1894 if (miss_mshr && !write_mshr) {
1895 return miss_mshr;
1896 }
1897 if (write_mshr && !miss_mshr) {
1898 return write_mshr;
1899 }
1900
1901 if (miss_mshr && write_mshr) {
1902 // We have one of each... normally we favor the miss request
1903 // unless the write buffer is full
1904 if (writeBuffer.isFull() && writeBuffer.inServiceEntries == 0) {
1905 // Write buffer is full, so we'd like to issue a write;
1906 // need to search MSHR queue for conflicting earlier miss.
1907 MSHR *conflict_mshr =
1908 mshrQueue.findPending(write_mshr->addr, write_mshr->size,
1909 write_mshr->isSecure);
1910
1911 if (conflict_mshr && conflict_mshr->order < write_mshr->order) {
1912 // Service misses in order until conflict is cleared.
1913 return conflict_mshr;
1914 }
1915
1916 // No conflicts; issue write
1917 return write_mshr;
1918 }
1919
1920 // Write buffer isn't full, but need to check it for
1921 // conflicting earlier writeback
1922 MSHR *conflict_mshr =
1923 writeBuffer.findPending(miss_mshr->addr, miss_mshr->size,
1924 miss_mshr->isSecure);
1925 if (conflict_mshr) {
1926 // not sure why we don't check order here... it was in the
1927 // original code but commented out.
1928
1929 // The only way this happens is if we are
1930 // doing a write and we didn't have permissions
1931 // then subsequently saw a writeback (owned got evicted)
1932 // We need to make sure to perform the writeback first
1933 // To preserve the dirty data, then we can issue the write
1934
1935 // should we return write_mshr here instead? I.e. do we
1936 // have to flush writes in order? I don't think so... not
1937 // for Alpha anyway. Maybe for x86?
1938 return conflict_mshr;
1939 }
1940
1941 // No conflicts; issue read
1942 return miss_mshr;
1943 }
1944
1945 // fall through... no pending requests. Try a prefetch.
1946 assert(!miss_mshr && !write_mshr);
1947 if (prefetcher && mshrQueue.canPrefetch()) {
1948 // If we have a miss queue slot, we can try a prefetch
1949 PacketPtr pkt = prefetcher->getPacket();
1950 if (pkt) {
1951 Addr pf_addr = blockAlign(pkt->getAddr());
1952 if (!tags->findBlock(pf_addr, pkt->isSecure()) &&
1953 !mshrQueue.findMatch(pf_addr, pkt->isSecure()) &&
1954 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
1955 // Update statistic on number of prefetches issued
1956 // (hwpf_mshr_misses)
1957 assert(pkt->req->masterId() < system->maxMasters());
1958 mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
1959 // Don't request bus, since we already have it
1960 return allocateMissBuffer(pkt, curTick(), false);
1961 } else {
1962 // free the request and packet
1963 delete pkt->req;
1964 delete pkt;
1965 }
1966 }
1967 }
1968
1969 return NULL;
1970 }
1971
1972
1973 template<class TagStore>
1974 PacketPtr
1975 Cache<TagStore>::getTimingPacket()
1976 {
1977 MSHR *mshr = getNextMSHR();
1978
1979 if (mshr == NULL) {
1980 return NULL;
1981 }
1982
1983 // use request from 1st target
1984 PacketPtr tgt_pkt = mshr->getTarget()->pkt;
1985 PacketPtr pkt = NULL;
1986
1987 DPRINTF(CachePort, "%s %s for address %x size %d\n", __func__,
1988 tgt_pkt->cmdString(), tgt_pkt->getAddr(), tgt_pkt->getSize());
1989
1990 if (mshr->isForwardNoResponse()) {
1991 // no response expected, just forward packet as it is
1992 assert(tags->findBlock(mshr->addr, mshr->isSecure) == NULL);
1993 pkt = tgt_pkt;
1994 } else {
1995 BlkType *blk = tags->findBlock(mshr->addr, mshr->isSecure);
1996
1997 if (tgt_pkt->cmd == MemCmd::HardPFReq) {
1998 // We need to check the caches above us to verify that
1999 // they don't have a copy of this block in the dirty state
2000 // at the moment. Without this check we could get a stale
2001 // copy from memory that might get used in place of the
2002 // dirty one.
2003 Packet snoop_pkt(tgt_pkt, true, false);
2004 snoop_pkt.setExpressSnoop();
2005 snoop_pkt.senderState = mshr;
2006 cpuSidePort->sendTimingSnoopReq(&snoop_pkt);
2007
2008 // Check to see if the prefetch was squashed by an upper cache (to
2009 // prevent us from grabbing the line) or if a Check to see if a
2010 // writeback arrived between the time the prefetch was placed in
2011 // the MSHRs and when it was selected to be sent or if the
2012 // prefetch was squashed by an upper cache.
2013
2014 // It is important to check msmInhibitAsserted before
2015 // prefetchSquashed. If another cache has asserted MEM_INGIBIT, it
2016 // will be sending a response which will arrive at the MSHR
2017 // allocated ofr this request. Checking the prefetchSquash first
2018 // may result in the MSHR being prematurely deallocated.
2019
2020 if (snoop_pkt.memInhibitAsserted()) {
2021 // If we are getting a non-shared response it is dirty
2022 bool pending_dirty_resp = !snoop_pkt.sharedAsserted();
2023 markInService(mshr, pending_dirty_resp);
2024 DPRINTF(Cache, "Upward snoop of prefetch for addr"
2025 " %#x (%s) hit\n",
2026 tgt_pkt->getAddr(), tgt_pkt->isSecure()? "s": "ns");
2027 return NULL;
2028 }
2029
2030 if (snoop_pkt.prefetchSquashed() || blk != NULL) {
2031 DPRINTF(Cache, "Prefetch squashed by cache. "
2032 "Deallocating mshr target %#x.\n", mshr->addr);
2033
2034 // Deallocate the mshr target
2035 if (mshr->queue->forceDeallocateTarget(mshr)) {
2036 // Clear block if this deallocation resulted freed an
2037 // mshr when all had previously been utilized
2038 clearBlocked((BlockedCause)(mshr->queue->index));
2039 }
2040 return NULL;
2041 }
2042
2043 }
2044
2045 pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
2046
2047 mshr->isForward = (pkt == NULL);
2048
2049 if (mshr->isForward) {
2050 // not a cache block request, but a response is expected
2051 // make copy of current packet to forward, keep current
2052 // copy for response handling
2053 pkt = new Packet(tgt_pkt, false, true);
2054 if (pkt->isWrite()) {
2055 pkt->setData(tgt_pkt->getConstPtr<uint8_t>());
2056 }
2057 }
2058 }
2059
2060 assert(pkt != NULL);
2061 pkt->senderState = mshr;
2062 return pkt;
2063 }
2064
2065
2066 template<class TagStore>
2067 Tick
2068 Cache<TagStore>::nextMSHRReadyTime() const
2069 {
2070 Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
2071 writeBuffer.nextMSHRReadyTime());
2072
2073 // Don't signal prefetch ready time if no MSHRs available
2074 // Will signal once enoguh MSHRs are deallocated
2075 if (prefetcher && mshrQueue.canPrefetch()) {
2076 nextReady = std::min(nextReady,
2077 prefetcher->nextPrefetchReadyTime());
2078 }
2079
2080 return nextReady;
2081 }
2082
2083 template<class TagStore>
2084 void
2085 Cache<TagStore>::serialize(std::ostream &os)
2086 {
2087 bool dirty(isDirty());
2088
2089 if (dirty) {
2090 warn("*** The cache still contains dirty data. ***\n");
2091 warn(" Make sure to drain the system using the correct flags.\n");
2092 warn(" This checkpoint will not restore correctly and dirty data in "
2093 "the cache will be lost!\n");
2094 }
2095
2096 // Since we don't checkpoint the data in the cache, any dirty data
2097 // will be lost when restoring from a checkpoint of a system that
2098 // wasn't drained properly. Flag the checkpoint as invalid if the
2099 // cache contains dirty data.
2100 bool bad_checkpoint(dirty);
2101 SERIALIZE_SCALAR(bad_checkpoint);
2102 }
2103
2104 template<class TagStore>
2105 void
2106 Cache<TagStore>::unserialize(Checkpoint *cp, const std::string &section)
2107 {
2108 bool bad_checkpoint;
2109 UNSERIALIZE_SCALAR(bad_checkpoint);
2110 if (bad_checkpoint) {
2111 fatal("Restoring from checkpoints with dirty caches is not supported "
2112 "in the classic memory system. Please remove any caches or "
2113 " drain them properly before taking checkpoints.\n");
2114 }
2115 }
2116
2117 ///////////////
2118 //
2119 // CpuSidePort
2120 //
2121 ///////////////
2122
2123 template<class TagStore>
2124 AddrRangeList
2125 Cache<TagStore>::CpuSidePort::getAddrRanges() const
2126 {
2127 return cache->getAddrRanges();
2128 }
2129
2130 template<class TagStore>
2131 bool
2132 Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
2133 {
2134 assert(!cache->system->bypassCaches());
2135
2136 bool success = false;
2137
2138 // always let inhibited requests through, even if blocked,
2139 // ultimately we should check if this is an express snoop, but at
2140 // the moment that flag is only set in the cache itself
2141 if (pkt->memInhibitAsserted()) {
2142 // do not change the current retry state
2143 bool M5_VAR_USED bypass_success = cache->recvTimingReq(pkt);
2144 assert(bypass_success);
2145 return true;
2146 } else if (blocked || mustSendRetry) {
2147 // either already committed to send a retry, or blocked
2148 success = false;
2149 } else {
2150 // pass it on to the cache, and let the cache decide if we
2151 // have to retry or not
2152 success = cache->recvTimingReq(pkt);
2153 }
2154
2155 // remember if we have to retry
2156 mustSendRetry = !success;
2157 return success;
2158 }
2159
2160 template<class TagStore>
2161 Tick
2162 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
2163 {
2164 return cache->recvAtomic(pkt);
2165 }
2166
2167 template<class TagStore>
2168 void
2169 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
2170 {
2171 // functional request
2172 cache->functionalAccess(pkt, true);
2173 }
2174
2175 template<class TagStore>
2176 Cache<TagStore>::
2177 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
2178 const std::string &_label)
2179 : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
2180 {
2181 }
2182
2183 ///////////////
2184 //
2185 // MemSidePort
2186 //
2187 ///////////////
2188
2189 template<class TagStore>
2190 bool
2191 Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
2192 {
2193 cache->recvTimingResp(pkt);
2194 return true;
2195 }
2196
2197 // Express snooping requests to memside port
2198 template<class TagStore>
2199 void
2200 Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
2201 {
2202 // handle snooping requests
2203 cache->recvTimingSnoopReq(pkt);
2204 }
2205
2206 template<class TagStore>
2207 Tick
2208 Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
2209 {
2210 return cache->recvAtomicSnoop(pkt);
2211 }
2212
2213 template<class TagStore>
2214 void
2215 Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
2216 {
2217 // functional snoop (note that in contrast to atomic we don't have
2218 // a specific functionalSnoop method, as they have the same
2219 // behaviour regardless)
2220 cache->functionalAccess(pkt, false);
2221 }
2222
2223 template<class TagStore>
2224 void
2225 Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket()
2226 {
2227 // sanity check
2228 assert(!waitingOnRetry);
2229
2230 // there should never be any deferred request packets in the
2231 // queue, instead we resly on the cache to provide the packets
2232 // from the MSHR queue or write queue
2233 assert(deferredPacketReadyTime() == MaxTick);
2234
2235 // check for request packets (requests & writebacks)
2236 PacketPtr pkt = cache.getTimingPacket();
2237 if (pkt == NULL) {
2238 // can happen if e.g. we attempt a writeback and fail, but
2239 // before the retry, the writeback is eliminated because
2240 // we snoop another cache's ReadEx.
2241 } else {
2242 MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
2243 // in most cases getTimingPacket allocates a new packet, and
2244 // we must delete it unless it is successfully sent
2245 bool delete_pkt = !mshr->isForwardNoResponse();
2246
2247 // let our snoop responses go first if there are responses to
2248 // the same addresses we are about to writeback, note that
2249 // this creates a dependency between requests and snoop
2250 // responses, but that should not be a problem since there is
2251 // a chain already and the key is that the snoop responses can
2252 // sink unconditionally
2253 if (snoopRespQueue.hasAddr(pkt->getAddr())) {
2254 DPRINTF(CachePort, "Waiting for snoop response to be sent\n");
2255 Tick when = snoopRespQueue.deferredPacketReadyTime();
2256 schedSendEvent(when);
2257
2258 if (delete_pkt)
2259 delete pkt;
2260
2261 return;
2262 }
2263
2264
2265 waitingOnRetry = !masterPort.sendTimingReq(pkt);
2266
2267 if (waitingOnRetry) {
2268 DPRINTF(CachePort, "now waiting on a retry\n");
2269 if (delete_pkt) {
2270 // we are awaiting a retry, but we
2271 // delete the packet and will be creating a new packet
2272 // when we get the opportunity
2273 delete pkt;
2274 }
2275 // note that we have now masked any requestBus and
2276 // schedSendEvent (we will wait for a retry before
2277 // doing anything), and this is so even if we do not
2278 // care about this packet and might override it before
2279 // it gets retried
2280 } else {
2281 // As part of the call to sendTimingReq the packet is
2282 // forwarded to all neighbouring caches (and any
2283 // caches above them) as a snoop. The packet is also
2284 // sent to any potential cache below as the
2285 // interconnect is not allowed to buffer the
2286 // packet. Thus at this point we know if any of the
2287 // neighbouring, or the downstream cache is
2288 // responding, and if so, if it is with a dirty line
2289 // or not.
2290 bool pending_dirty_resp = !pkt->sharedAsserted() &&
2291 pkt->memInhibitAsserted();
2292
2293 cache.markInService(mshr, pending_dirty_resp);
2294 }
2295 }
2296
2297 // if we succeeded and are not waiting for a retry, schedule the
2298 // next send considering when the next MSHR is ready, note that
2299 // snoop responses have their own packet queue and thus schedule
2300 // their own events
2301 if (!waitingOnRetry) {
2302 schedSendEvent(cache.nextMSHRReadyTime());
2303 }
2304 }
2305
2306 template<class TagStore>
2307 Cache<TagStore>::
2308 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
2309 const std::string &_label)
2310 : BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
2311 _reqQueue(*_cache, *this, _snoopRespQueue, _label),
2312 _snoopRespQueue(*_cache, *this, _label), cache(_cache)
2313 {
2314 }
2315
2316 #endif//__MEM_CACHE_CACHE_IMPL_HH__