Use PacketPtr everywhere
[gem5.git] / src / mem / cache / base_cache.cc
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Definition of BaseCache functions.
34 */
35
36 #include "cpu/base.hh"
37 #include "cpu/smt.hh"
38 #include "mem/cache/base_cache.hh"
39 #include "mem/cache/miss/mshr.hh"
40
41 using namespace std;
42
43 BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
44 bool _isCpuSide)
45 : Port(_name), cache(_cache), isCpuSide(_isCpuSide)
46 {
47 blocked = false;
48 waitingOnRetry = false;
49 //Start ports at null if more than one is created we should panic
50 //cpuSidePort = NULL;
51 //memSidePort = NULL;
52 }
53
54 void
55 BaseCache::CachePort::recvStatusChange(Port::Status status)
56 {
57 cache->recvStatusChange(status, isCpuSide);
58 }
59
60 void
61 BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
62 AddrRangeList &snoop)
63 {
64 cache->getAddressRanges(resp, snoop, isCpuSide);
65 }
66
67 int
68 BaseCache::CachePort::deviceBlockSize()
69 {
70 return cache->getBlockSize();
71 }
72
73 bool
74 BaseCache::CachePort::recvTiming(PacketPtr pkt)
75 {
76 if (isCpuSide
77 && !pkt->req->isUncacheable()
78 && pkt->isInvalidate()
79 && !pkt->isRead() && !pkt->isWrite()) {
80 //Upgrade or Invalidate
81 //Look into what happens if two slave caches on bus
82 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
83 pkt->getAddr() & (((ULL(1))<<48)-1),
84 pkt->getAddr() & ~((Addr)cache->blkSize - 1));
85
86 assert(!(pkt->flags & SATISFIED));
87 pkt->flags |= SATISFIED;
88 //Invalidates/Upgrades need no response if they get the bus
89 return true;
90 }
91
92 if (pkt->isRequest() && blocked)
93 {
94 DPRINTF(Cache,"Scheduling a retry while blocked\n");
95 mustSendRetry = true;
96 return false;
97 }
98 return cache->doTimingAccess(pkt, this, isCpuSide);
99 }
100
101 Tick
102 BaseCache::CachePort::recvAtomic(PacketPtr pkt)
103 {
104 return cache->doAtomicAccess(pkt, isCpuSide);
105 }
106
107 void
108 BaseCache::CachePort::recvFunctional(PacketPtr pkt)
109 {
110 //Check storage here first
111 list<PacketPtr>::iterator i = drainList.begin();
112 list<PacketPtr>::iterator end = drainList.end();
113 for (; i != end; ++i) {
114 PacketPtr target = *i;
115 // If the target contains data, and it overlaps the
116 // probed request, need to update data
117 if (target->intersect(pkt)) {
118 uint8_t* pkt_data;
119 uint8_t* write_data;
120 int data_size;
121 if (target->getAddr() < pkt->getAddr()) {
122 int offset = pkt->getAddr() - target->getAddr();
123 pkt_data = pkt->getPtr<uint8_t>();
124 write_data = target->getPtr<uint8_t>() + offset;
125 data_size = target->getSize() - offset;
126 assert(data_size > 0);
127 if (data_size > pkt->getSize())
128 data_size = pkt->getSize();
129 } else {
130 int offset = target->getAddr() - pkt->getAddr();
131 pkt_data = pkt->getPtr<uint8_t>() + offset;
132 write_data = target->getPtr<uint8_t>();
133 data_size = pkt->getSize() - offset;
134 assert(data_size >= pkt->getSize());
135 if (data_size > target->getSize())
136 data_size = target->getSize();
137 }
138
139 if (pkt->isWrite()) {
140 memcpy(pkt_data, write_data, data_size);
141 } else {
142 memcpy(write_data, pkt_data, data_size);
143 }
144 }
145 }
146 cache->doFunctionalAccess(pkt, isCpuSide);
147 }
148
149 void
150 BaseCache::CachePort::recvRetry()
151 {
152 PacketPtr pkt;
153 assert(waitingOnRetry);
154 if (!drainList.empty()) {
155 DPRINTF(CachePort, "%s attempting to send a retry for response\n", name());
156 //We have some responses to drain first
157 if (sendTiming(drainList.front())) {
158 DPRINTF(CachePort, "%s sucessful in sending a retry for response\n", name());
159 drainList.pop_front();
160 if (!drainList.empty() ||
161 !isCpuSide && cache->doMasterRequest() ||
162 isCpuSide && cache->doSlaveRequest()) {
163
164 DPRINTF(CachePort, "%s has more responses/requests\n", name());
165 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
166 reqCpu->schedule(curTick + 1);
167 }
168 waitingOnRetry = false;
169 }
170 }
171 else if (!isCpuSide)
172 {
173 DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
174 if (!cache->doMasterRequest()) {
175 //This can happen if I am the owner of a block and see an upgrade
176 //while the block was in my WB Buffers. I just remove the
177 //wb and de-assert the masterRequest
178 waitingOnRetry = false;
179 return;
180 }
181 pkt = cache->getPacket();
182 MSHR* mshr = (MSHR*) pkt->senderState;
183 //Copy the packet, it may be modified/destroyed elsewhere
184 PacketPtr copyPkt = new Packet(*pkt);
185 copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
186 mshr->pkt = copyPkt;
187
188 bool success = sendTiming(pkt);
189 DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
190 pkt->getAddr(), success ? "succesful" : "unsuccesful");
191
192 waitingOnRetry = !success;
193 if (waitingOnRetry) {
194 DPRINTF(CachePort, "%s now waiting on a retry\n", name());
195 }
196
197 cache->sendResult(pkt, mshr, success);
198
199 if (success && cache->doMasterRequest())
200 {
201 DPRINTF(CachePort, "%s has more requests\n", name());
202 //Still more to issue, rerequest in 1 cycle
203 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
204 reqCpu->schedule(curTick + 1);
205 }
206 }
207 else
208 {
209 assert(cache->doSlaveRequest());
210 //pkt = cache->getCoherencePacket();
211 //We save the packet, no reordering on CSHRS
212 pkt = cache->getCoherencePacket();
213 MSHR* cshr = (MSHR*)pkt->senderState;
214 bool success = sendTiming(pkt);
215 cache->sendCoherenceResult(pkt, cshr, success);
216 waitingOnRetry = !success;
217 if (success && cache->doSlaveRequest())
218 {
219 DPRINTF(CachePort, "%s has more requests\n", name());
220 //Still more to issue, rerequest in 1 cycle
221 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
222 reqCpu->schedule(curTick + 1);
223 }
224 }
225 if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
226 else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
227 return;
228 }
229 void
230 BaseCache::CachePort::setBlocked()
231 {
232 assert(!blocked);
233 DPRINTF(Cache, "Cache Blocking\n");
234 blocked = true;
235 //Clear the retry flag
236 mustSendRetry = false;
237 }
238
239 void
240 BaseCache::CachePort::clearBlocked()
241 {
242 assert(blocked);
243 DPRINTF(Cache, "Cache Unblocking\n");
244 blocked = false;
245 if (mustSendRetry)
246 {
247 DPRINTF(Cache, "Cache Sending Retry\n");
248 mustSendRetry = false;
249 sendRetry();
250 }
251 }
252
253 BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort)
254 : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
255 {
256 this->setFlags(AutoDelete);
257 pkt = NULL;
258 }
259
260 BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, PacketPtr _pkt)
261 : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort), pkt(_pkt)
262 {
263 this->setFlags(AutoDelete);
264 }
265
266 void
267 BaseCache::CacheEvent::process()
268 {
269 if (!pkt)
270 {
271 if (cachePort->waitingOnRetry) return;
272 //We have some responses to drain first
273 if (!cachePort->drainList.empty()) {
274 DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
275 if (cachePort->sendTiming(cachePort->drainList.front())) {
276 DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
277 cachePort->drainList.pop_front();
278 if (!cachePort->drainList.empty() ||
279 !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
280 cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
281
282 DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
283 this->schedule(curTick + 1);
284 }
285 }
286 else {
287 cachePort->waitingOnRetry = true;
288 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
289 }
290 }
291 else if (!cachePort->isCpuSide)
292 { //MSHR
293 DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
294 if (!cachePort->cache->doMasterRequest()) {
295 //This can happen if I am the owner of a block and see an upgrade
296 //while the block was in my WB Buffers. I just remove the
297 //wb and de-assert the masterRequest
298 return;
299 }
300
301 pkt = cachePort->cache->getPacket();
302 MSHR* mshr = (MSHR*) pkt->senderState;
303 //Copy the packet, it may be modified/destroyed elsewhere
304 PacketPtr copyPkt = new Packet(*pkt);
305 copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
306 mshr->pkt = copyPkt;
307
308 bool success = cachePort->sendTiming(pkt);
309 DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
310 pkt->getAddr(), success ? "succesful" : "unsuccesful");
311
312 cachePort->waitingOnRetry = !success;
313 if (cachePort->waitingOnRetry) {
314 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
315 }
316
317 cachePort->cache->sendResult(pkt, mshr, success);
318 if (success && cachePort->cache->doMasterRequest())
319 {
320 DPRINTF(CachePort, "%s still more MSHR requests to send\n",
321 cachePort->name());
322 //Still more to issue, rerequest in 1 cycle
323 pkt = NULL;
324 this->schedule(curTick+1);
325 }
326 }
327 else
328 {
329 //CSHR
330 assert(cachePort->cache->doSlaveRequest());
331 pkt = cachePort->cache->getCoherencePacket();
332 MSHR* cshr = (MSHR*) pkt->senderState;
333 bool success = cachePort->sendTiming(pkt);
334 cachePort->cache->sendCoherenceResult(pkt, cshr, success);
335 cachePort->waitingOnRetry = !success;
336 if (cachePort->waitingOnRetry)
337 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
338 if (success && cachePort->cache->doSlaveRequest())
339 {
340 DPRINTF(CachePort, "%s still more CSHR requests to send\n",
341 cachePort->name());
342 //Still more to issue, rerequest in 1 cycle
343 pkt = NULL;
344 this->schedule(curTick+1);
345 }
346 }
347 return;
348 }
349 //Response
350 //Know the packet to send
351 if (pkt->flags & NACKED_LINE)
352 pkt->result = Packet::Nacked;
353 else
354 pkt->result = Packet::Success;
355 pkt->makeTimingResponse();
356 DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
357 if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
358 //Already have a list, just append
359 cachePort->drainList.push_back(pkt);
360 DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
361 }
362 else if (!cachePort->sendTiming(pkt)) {
363 //It failed, save it to list of drain events
364 DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
365 cachePort->drainList.push_back(pkt);
366 cachePort->waitingOnRetry = true;
367 }
368 }
369
370 const char *
371 BaseCache::CacheEvent::description()
372 {
373 return "timing event\n";
374 }
375
376 Port*
377 BaseCache::getPort(const std::string &if_name, int idx)
378 {
379 if (if_name == "")
380 {
381 if(cpuSidePort == NULL)
382 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
383 return cpuSidePort;
384 }
385 else if (if_name == "functional")
386 {
387 if(cpuSidePort == NULL)
388 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
389 return cpuSidePort;
390 }
391 else if (if_name == "cpu_side")
392 {
393 if(cpuSidePort == NULL)
394 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
395 return cpuSidePort;
396 }
397 else if (if_name == "mem_side")
398 {
399 if (memSidePort != NULL)
400 panic("Already have a mem side for this cache\n");
401 memSidePort = new CachePort(name() + "-mem_side_port", this, false);
402 return memSidePort;
403 }
404 else panic("Port name %s unrecognized\n", if_name);
405 }
406
407 void
408 BaseCache::init()
409 {
410 if (!cpuSidePort || !memSidePort)
411 panic("Cache not hooked up on both sides\n");
412 cpuSidePort->sendStatusChange(Port::RangeChange);
413 }
414
415 void
416 BaseCache::regStats()
417 {
418 Request temp_req((Addr) NULL, 4, 0);
419 Packet::Command temp_cmd = Packet::ReadReq;
420 Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
421 temp_pkt.allocate(); //Temp allocate, all need data
422
423 using namespace Stats;
424
425 // Hit statistics
426 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
427 Packet::Command cmd = (Packet::Command)access_idx;
428 const string &cstr = temp_pkt.cmdIdxToString(cmd);
429
430 hits[access_idx]
431 .init(maxThreadsPerCPU)
432 .name(name() + "." + cstr + "_hits")
433 .desc("number of " + cstr + " hits")
434 .flags(total | nozero | nonan)
435 ;
436 }
437
438 demandHits
439 .name(name() + ".demand_hits")
440 .desc("number of demand (read+write) hits")
441 .flags(total)
442 ;
443 demandHits = hits[Packet::ReadReq] + hits[Packet::WriteReq];
444
445 overallHits
446 .name(name() + ".overall_hits")
447 .desc("number of overall hits")
448 .flags(total)
449 ;
450 overallHits = demandHits + hits[Packet::SoftPFReq] + hits[Packet::HardPFReq]
451 + hits[Packet::Writeback];
452
453 // Miss statistics
454 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
455 Packet::Command cmd = (Packet::Command)access_idx;
456 const string &cstr = temp_pkt.cmdIdxToString(cmd);
457
458 misses[access_idx]
459 .init(maxThreadsPerCPU)
460 .name(name() + "." + cstr + "_misses")
461 .desc("number of " + cstr + " misses")
462 .flags(total | nozero | nonan)
463 ;
464 }
465
466 demandMisses
467 .name(name() + ".demand_misses")
468 .desc("number of demand (read+write) misses")
469 .flags(total)
470 ;
471 demandMisses = misses[Packet::ReadReq] + misses[Packet::WriteReq];
472
473 overallMisses
474 .name(name() + ".overall_misses")
475 .desc("number of overall misses")
476 .flags(total)
477 ;
478 overallMisses = demandMisses + misses[Packet::SoftPFReq] +
479 misses[Packet::HardPFReq] + misses[Packet::Writeback];
480
481 // Miss latency statistics
482 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
483 Packet::Command cmd = (Packet::Command)access_idx;
484 const string &cstr = temp_pkt.cmdIdxToString(cmd);
485
486 missLatency[access_idx]
487 .init(maxThreadsPerCPU)
488 .name(name() + "." + cstr + "_miss_latency")
489 .desc("number of " + cstr + " miss cycles")
490 .flags(total | nozero | nonan)
491 ;
492 }
493
494 demandMissLatency
495 .name(name() + ".demand_miss_latency")
496 .desc("number of demand (read+write) miss cycles")
497 .flags(total)
498 ;
499 demandMissLatency = missLatency[Packet::ReadReq] + missLatency[Packet::WriteReq];
500
501 overallMissLatency
502 .name(name() + ".overall_miss_latency")
503 .desc("number of overall miss cycles")
504 .flags(total)
505 ;
506 overallMissLatency = demandMissLatency + missLatency[Packet::SoftPFReq] +
507 missLatency[Packet::HardPFReq];
508
509 // access formulas
510 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
511 Packet::Command cmd = (Packet::Command)access_idx;
512 const string &cstr = temp_pkt.cmdIdxToString(cmd);
513
514 accesses[access_idx]
515 .name(name() + "." + cstr + "_accesses")
516 .desc("number of " + cstr + " accesses(hits+misses)")
517 .flags(total | nozero | nonan)
518 ;
519
520 accesses[access_idx] = hits[access_idx] + misses[access_idx];
521 }
522
523 demandAccesses
524 .name(name() + ".demand_accesses")
525 .desc("number of demand (read+write) accesses")
526 .flags(total)
527 ;
528 demandAccesses = demandHits + demandMisses;
529
530 overallAccesses
531 .name(name() + ".overall_accesses")
532 .desc("number of overall (read+write) accesses")
533 .flags(total)
534 ;
535 overallAccesses = overallHits + overallMisses;
536
537 // miss rate formulas
538 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
539 Packet::Command cmd = (Packet::Command)access_idx;
540 const string &cstr = temp_pkt.cmdIdxToString(cmd);
541
542 missRate[access_idx]
543 .name(name() + "." + cstr + "_miss_rate")
544 .desc("miss rate for " + cstr + " accesses")
545 .flags(total | nozero | nonan)
546 ;
547
548 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
549 }
550
551 demandMissRate
552 .name(name() + ".demand_miss_rate")
553 .desc("miss rate for demand accesses")
554 .flags(total)
555 ;
556 demandMissRate = demandMisses / demandAccesses;
557
558 overallMissRate
559 .name(name() + ".overall_miss_rate")
560 .desc("miss rate for overall accesses")
561 .flags(total)
562 ;
563 overallMissRate = overallMisses / overallAccesses;
564
565 // miss latency formulas
566 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
567 Packet::Command cmd = (Packet::Command)access_idx;
568 const string &cstr = temp_pkt.cmdIdxToString(cmd);
569
570 avgMissLatency[access_idx]
571 .name(name() + "." + cstr + "_avg_miss_latency")
572 .desc("average " + cstr + " miss latency")
573 .flags(total | nozero | nonan)
574 ;
575
576 avgMissLatency[access_idx] =
577 missLatency[access_idx] / misses[access_idx];
578 }
579
580 demandAvgMissLatency
581 .name(name() + ".demand_avg_miss_latency")
582 .desc("average overall miss latency")
583 .flags(total)
584 ;
585 demandAvgMissLatency = demandMissLatency / demandMisses;
586
587 overallAvgMissLatency
588 .name(name() + ".overall_avg_miss_latency")
589 .desc("average overall miss latency")
590 .flags(total)
591 ;
592 overallAvgMissLatency = overallMissLatency / overallMisses;
593
594 blocked_cycles.init(NUM_BLOCKED_CAUSES);
595 blocked_cycles
596 .name(name() + ".blocked_cycles")
597 .desc("number of cycles access was blocked")
598 .subname(Blocked_NoMSHRs, "no_mshrs")
599 .subname(Blocked_NoTargets, "no_targets")
600 ;
601
602
603 blocked_causes.init(NUM_BLOCKED_CAUSES);
604 blocked_causes
605 .name(name() + ".blocked")
606 .desc("number of cycles access was blocked")
607 .subname(Blocked_NoMSHRs, "no_mshrs")
608 .subname(Blocked_NoTargets, "no_targets")
609 ;
610
611 avg_blocked
612 .name(name() + ".avg_blocked_cycles")
613 .desc("average number of cycles each access was blocked")
614 .subname(Blocked_NoMSHRs, "no_mshrs")
615 .subname(Blocked_NoTargets, "no_targets")
616 ;
617
618 avg_blocked = blocked_cycles / blocked_causes;
619
620 fastWrites
621 .name(name() + ".fast_writes")
622 .desc("number of fast writes performed")
623 ;
624
625 cacheCopies
626 .name(name() + ".cache_copies")
627 .desc("number of cache copies performed")
628 ;
629
630 }