mem: use single BadAddr responder per system.
[gem5.git] / src / mem / cache / base.cc
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Definition of BaseCache functions.
34 */
35
36 #include "cpu/base.hh"
37 #include "cpu/smt.hh"
38 #include "mem/cache/base.hh"
39 #include "mem/cache/mshr.hh"
40
41 using namespace std;
42
43 BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
44 const std::string &_label)
45 : SimpleTimingPort(_name, _cache), cache(_cache),
46 label(_label), otherPort(NULL),
47 blocked(false), mustSendRetry(false)
48 {
49 }
50
51
52 BaseCache::BaseCache(const Params *p)
53 : MemObject(p),
54 mshrQueue("MSHRs", p->mshrs, 4, MSHRQueue_MSHRs),
55 writeBuffer("write buffer", p->write_buffers, p->mshrs+1000,
56 MSHRQueue_WriteBuffer),
57 blkSize(p->block_size),
58 hitLatency(p->latency),
59 numTarget(p->tgts_per_mshr),
60 forwardSnoops(p->forward_snoops),
61 blocked(0),
62 noTargetMSHR(NULL),
63 missCount(p->max_miss_count),
64 drainEvent(NULL),
65 addrRange(p->addr_range)
66 {
67 }
68
69 void
70 BaseCache::CachePort::recvStatusChange(Port::Status status)
71 {
72 if (status == Port::RangeChange) {
73 otherPort->sendStatusChange(Port::RangeChange);
74 }
75 }
76
77
78 bool
79 BaseCache::CachePort::checkFunctional(PacketPtr pkt)
80 {
81 pkt->pushLabel(label);
82 bool done = SimpleTimingPort::checkFunctional(pkt);
83 pkt->popLabel();
84 return done;
85 }
86
87
88 int
89 BaseCache::CachePort::deviceBlockSize()
90 {
91 return cache->getBlockSize();
92 }
93
94
95 bool
96 BaseCache::CachePort::recvRetryCommon()
97 {
98 assert(waitingOnRetry);
99 waitingOnRetry = false;
100 return false;
101 }
102
103
104 void
105 BaseCache::CachePort::setBlocked()
106 {
107 assert(!blocked);
108 DPRINTF(Cache, "Cache Blocking\n");
109 blocked = true;
110 //Clear the retry flag
111 mustSendRetry = false;
112 }
113
114 void
115 BaseCache::CachePort::clearBlocked()
116 {
117 assert(blocked);
118 DPRINTF(Cache, "Cache Unblocking\n");
119 blocked = false;
120 if (mustSendRetry)
121 {
122 DPRINTF(Cache, "Cache Sending Retry\n");
123 mustSendRetry = false;
124 SendRetryEvent *ev = new SendRetryEvent(this, true);
125 // @TODO: need to find a better time (next bus cycle?)
126 schedule(ev, curTick + 1);
127 }
128 }
129
130
131 void
132 BaseCache::init()
133 {
134 if (!cpuSidePort || !memSidePort)
135 panic("Cache not hooked up on both sides\n");
136 cpuSidePort->sendStatusChange(Port::RangeChange);
137 }
138
139
140 void
141 BaseCache::regStats()
142 {
143 using namespace Stats;
144
145 // Hit statistics
146 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
147 MemCmd cmd(access_idx);
148 const string &cstr = cmd.toString();
149
150 hits[access_idx]
151 .init(maxThreadsPerCPU)
152 .name(name() + "." + cstr + "_hits")
153 .desc("number of " + cstr + " hits")
154 .flags(total | nozero | nonan)
155 ;
156 }
157
158 // These macros make it easier to sum the right subset of commands and
159 // to change the subset of commands that are considered "demand" vs
160 // "non-demand"
161 #define SUM_DEMAND(s) \
162 (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::ReadExReq])
163
164 // should writebacks be included here? prior code was inconsistent...
165 #define SUM_NON_DEMAND(s) \
166 (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq])
167
168 demandHits
169 .name(name() + ".demand_hits")
170 .desc("number of demand (read+write) hits")
171 .flags(total)
172 ;
173 demandHits = SUM_DEMAND(hits);
174
175 overallHits
176 .name(name() + ".overall_hits")
177 .desc("number of overall hits")
178 .flags(total)
179 ;
180 overallHits = demandHits + SUM_NON_DEMAND(hits);
181
182 // Miss statistics
183 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
184 MemCmd cmd(access_idx);
185 const string &cstr = cmd.toString();
186
187 misses[access_idx]
188 .init(maxThreadsPerCPU)
189 .name(name() + "." + cstr + "_misses")
190 .desc("number of " + cstr + " misses")
191 .flags(total | nozero | nonan)
192 ;
193 }
194
195 demandMisses
196 .name(name() + ".demand_misses")
197 .desc("number of demand (read+write) misses")
198 .flags(total)
199 ;
200 demandMisses = SUM_DEMAND(misses);
201
202 overallMisses
203 .name(name() + ".overall_misses")
204 .desc("number of overall misses")
205 .flags(total)
206 ;
207 overallMisses = demandMisses + SUM_NON_DEMAND(misses);
208
209 // Miss latency statistics
210 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
211 MemCmd cmd(access_idx);
212 const string &cstr = cmd.toString();
213
214 missLatency[access_idx]
215 .init(maxThreadsPerCPU)
216 .name(name() + "." + cstr + "_miss_latency")
217 .desc("number of " + cstr + " miss cycles")
218 .flags(total | nozero | nonan)
219 ;
220 }
221
222 demandMissLatency
223 .name(name() + ".demand_miss_latency")
224 .desc("number of demand (read+write) miss cycles")
225 .flags(total)
226 ;
227 demandMissLatency = SUM_DEMAND(missLatency);
228
229 overallMissLatency
230 .name(name() + ".overall_miss_latency")
231 .desc("number of overall miss cycles")
232 .flags(total)
233 ;
234 overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
235
236 // access formulas
237 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
238 MemCmd cmd(access_idx);
239 const string &cstr = cmd.toString();
240
241 accesses[access_idx]
242 .name(name() + "." + cstr + "_accesses")
243 .desc("number of " + cstr + " accesses(hits+misses)")
244 .flags(total | nozero | nonan)
245 ;
246
247 accesses[access_idx] = hits[access_idx] + misses[access_idx];
248 }
249
250 demandAccesses
251 .name(name() + ".demand_accesses")
252 .desc("number of demand (read+write) accesses")
253 .flags(total)
254 ;
255 demandAccesses = demandHits + demandMisses;
256
257 overallAccesses
258 .name(name() + ".overall_accesses")
259 .desc("number of overall (read+write) accesses")
260 .flags(total)
261 ;
262 overallAccesses = overallHits + overallMisses;
263
264 // miss rate formulas
265 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
266 MemCmd cmd(access_idx);
267 const string &cstr = cmd.toString();
268
269 missRate[access_idx]
270 .name(name() + "." + cstr + "_miss_rate")
271 .desc("miss rate for " + cstr + " accesses")
272 .flags(total | nozero | nonan)
273 ;
274
275 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
276 }
277
278 demandMissRate
279 .name(name() + ".demand_miss_rate")
280 .desc("miss rate for demand accesses")
281 .flags(total)
282 ;
283 demandMissRate = demandMisses / demandAccesses;
284
285 overallMissRate
286 .name(name() + ".overall_miss_rate")
287 .desc("miss rate for overall accesses")
288 .flags(total)
289 ;
290 overallMissRate = overallMisses / overallAccesses;
291
292 // miss latency formulas
293 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
294 MemCmd cmd(access_idx);
295 const string &cstr = cmd.toString();
296
297 avgMissLatency[access_idx]
298 .name(name() + "." + cstr + "_avg_miss_latency")
299 .desc("average " + cstr + " miss latency")
300 .flags(total | nozero | nonan)
301 ;
302
303 avgMissLatency[access_idx] =
304 missLatency[access_idx] / misses[access_idx];
305 }
306
307 demandAvgMissLatency
308 .name(name() + ".demand_avg_miss_latency")
309 .desc("average overall miss latency")
310 .flags(total)
311 ;
312 demandAvgMissLatency = demandMissLatency / demandMisses;
313
314 overallAvgMissLatency
315 .name(name() + ".overall_avg_miss_latency")
316 .desc("average overall miss latency")
317 .flags(total)
318 ;
319 overallAvgMissLatency = overallMissLatency / overallMisses;
320
321 blocked_cycles.init(NUM_BLOCKED_CAUSES);
322 blocked_cycles
323 .name(name() + ".blocked_cycles")
324 .desc("number of cycles access was blocked")
325 .subname(Blocked_NoMSHRs, "no_mshrs")
326 .subname(Blocked_NoTargets, "no_targets")
327 ;
328
329
330 blocked_causes.init(NUM_BLOCKED_CAUSES);
331 blocked_causes
332 .name(name() + ".blocked")
333 .desc("number of cycles access was blocked")
334 .subname(Blocked_NoMSHRs, "no_mshrs")
335 .subname(Blocked_NoTargets, "no_targets")
336 ;
337
338 avg_blocked
339 .name(name() + ".avg_blocked_cycles")
340 .desc("average number of cycles each access was blocked")
341 .subname(Blocked_NoMSHRs, "no_mshrs")
342 .subname(Blocked_NoTargets, "no_targets")
343 ;
344
345 avg_blocked = blocked_cycles / blocked_causes;
346
347 fastWrites
348 .name(name() + ".fast_writes")
349 .desc("number of fast writes performed")
350 ;
351
352 cacheCopies
353 .name(name() + ".cache_copies")
354 .desc("number of cache copies performed")
355 ;
356
357 writebacks
358 .init(maxThreadsPerCPU)
359 .name(name() + ".writebacks")
360 .desc("number of writebacks")
361 .flags(total)
362 ;
363
364 // MSHR statistics
365 // MSHR hit statistics
366 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
367 MemCmd cmd(access_idx);
368 const string &cstr = cmd.toString();
369
370 mshr_hits[access_idx]
371 .init(maxThreadsPerCPU)
372 .name(name() + "." + cstr + "_mshr_hits")
373 .desc("number of " + cstr + " MSHR hits")
374 .flags(total | nozero | nonan)
375 ;
376 }
377
378 demandMshrHits
379 .name(name() + ".demand_mshr_hits")
380 .desc("number of demand (read+write) MSHR hits")
381 .flags(total)
382 ;
383 demandMshrHits = SUM_DEMAND(mshr_hits);
384
385 overallMshrHits
386 .name(name() + ".overall_mshr_hits")
387 .desc("number of overall MSHR hits")
388 .flags(total)
389 ;
390 overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
391
392 // MSHR miss statistics
393 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
394 MemCmd cmd(access_idx);
395 const string &cstr = cmd.toString();
396
397 mshr_misses[access_idx]
398 .init(maxThreadsPerCPU)
399 .name(name() + "." + cstr + "_mshr_misses")
400 .desc("number of " + cstr + " MSHR misses")
401 .flags(total | nozero | nonan)
402 ;
403 }
404
405 demandMshrMisses
406 .name(name() + ".demand_mshr_misses")
407 .desc("number of demand (read+write) MSHR misses")
408 .flags(total)
409 ;
410 demandMshrMisses = SUM_DEMAND(mshr_misses);
411
412 overallMshrMisses
413 .name(name() + ".overall_mshr_misses")
414 .desc("number of overall MSHR misses")
415 .flags(total)
416 ;
417 overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
418
419 // MSHR miss latency statistics
420 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
421 MemCmd cmd(access_idx);
422 const string &cstr = cmd.toString();
423
424 mshr_miss_latency[access_idx]
425 .init(maxThreadsPerCPU)
426 .name(name() + "." + cstr + "_mshr_miss_latency")
427 .desc("number of " + cstr + " MSHR miss cycles")
428 .flags(total | nozero | nonan)
429 ;
430 }
431
432 demandMshrMissLatency
433 .name(name() + ".demand_mshr_miss_latency")
434 .desc("number of demand (read+write) MSHR miss cycles")
435 .flags(total)
436 ;
437 demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
438
439 overallMshrMissLatency
440 .name(name() + ".overall_mshr_miss_latency")
441 .desc("number of overall MSHR miss cycles")
442 .flags(total)
443 ;
444 overallMshrMissLatency =
445 demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
446
447 // MSHR uncacheable statistics
448 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
449 MemCmd cmd(access_idx);
450 const string &cstr = cmd.toString();
451
452 mshr_uncacheable[access_idx]
453 .init(maxThreadsPerCPU)
454 .name(name() + "." + cstr + "_mshr_uncacheable")
455 .desc("number of " + cstr + " MSHR uncacheable")
456 .flags(total | nozero | nonan)
457 ;
458 }
459
460 overallMshrUncacheable
461 .name(name() + ".overall_mshr_uncacheable_misses")
462 .desc("number of overall MSHR uncacheable misses")
463 .flags(total)
464 ;
465 overallMshrUncacheable =
466 SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
467
468 // MSHR miss latency statistics
469 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
470 MemCmd cmd(access_idx);
471 const string &cstr = cmd.toString();
472
473 mshr_uncacheable_lat[access_idx]
474 .init(maxThreadsPerCPU)
475 .name(name() + "." + cstr + "_mshr_uncacheable_latency")
476 .desc("number of " + cstr + " MSHR uncacheable cycles")
477 .flags(total | nozero | nonan)
478 ;
479 }
480
481 overallMshrUncacheableLatency
482 .name(name() + ".overall_mshr_uncacheable_latency")
483 .desc("number of overall MSHR uncacheable cycles")
484 .flags(total)
485 ;
486 overallMshrUncacheableLatency =
487 SUM_DEMAND(mshr_uncacheable_lat) +
488 SUM_NON_DEMAND(mshr_uncacheable_lat);
489
490 #if 0
491 // MSHR access formulas
492 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
493 MemCmd cmd(access_idx);
494 const string &cstr = cmd.toString();
495
496 mshrAccesses[access_idx]
497 .name(name() + "." + cstr + "_mshr_accesses")
498 .desc("number of " + cstr + " mshr accesses(hits+misses)")
499 .flags(total | nozero | nonan)
500 ;
501 mshrAccesses[access_idx] =
502 mshr_hits[access_idx] + mshr_misses[access_idx]
503 + mshr_uncacheable[access_idx];
504 }
505
506 demandMshrAccesses
507 .name(name() + ".demand_mshr_accesses")
508 .desc("number of demand (read+write) mshr accesses")
509 .flags(total | nozero | nonan)
510 ;
511 demandMshrAccesses = demandMshrHits + demandMshrMisses;
512
513 overallMshrAccesses
514 .name(name() + ".overall_mshr_accesses")
515 .desc("number of overall (read+write) mshr accesses")
516 .flags(total | nozero | nonan)
517 ;
518 overallMshrAccesses = overallMshrHits + overallMshrMisses
519 + overallMshrUncacheable;
520 #endif
521
522 // MSHR miss rate formulas
523 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
524 MemCmd cmd(access_idx);
525 const string &cstr = cmd.toString();
526
527 mshrMissRate[access_idx]
528 .name(name() + "." + cstr + "_mshr_miss_rate")
529 .desc("mshr miss rate for " + cstr + " accesses")
530 .flags(total | nozero | nonan)
531 ;
532
533 mshrMissRate[access_idx] =
534 mshr_misses[access_idx] / accesses[access_idx];
535 }
536
537 demandMshrMissRate
538 .name(name() + ".demand_mshr_miss_rate")
539 .desc("mshr miss rate for demand accesses")
540 .flags(total)
541 ;
542 demandMshrMissRate = demandMshrMisses / demandAccesses;
543
544 overallMshrMissRate
545 .name(name() + ".overall_mshr_miss_rate")
546 .desc("mshr miss rate for overall accesses")
547 .flags(total)
548 ;
549 overallMshrMissRate = overallMshrMisses / overallAccesses;
550
551 // mshrMiss latency formulas
552 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
553 MemCmd cmd(access_idx);
554 const string &cstr = cmd.toString();
555
556 avgMshrMissLatency[access_idx]
557 .name(name() + "." + cstr + "_avg_mshr_miss_latency")
558 .desc("average " + cstr + " mshr miss latency")
559 .flags(total | nozero | nonan)
560 ;
561
562 avgMshrMissLatency[access_idx] =
563 mshr_miss_latency[access_idx] / mshr_misses[access_idx];
564 }
565
566 demandAvgMshrMissLatency
567 .name(name() + ".demand_avg_mshr_miss_latency")
568 .desc("average overall mshr miss latency")
569 .flags(total)
570 ;
571 demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
572
573 overallAvgMshrMissLatency
574 .name(name() + ".overall_avg_mshr_miss_latency")
575 .desc("average overall mshr miss latency")
576 .flags(total)
577 ;
578 overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
579
580 // mshrUncacheable latency formulas
581 for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
582 MemCmd cmd(access_idx);
583 const string &cstr = cmd.toString();
584
585 avgMshrUncacheableLatency[access_idx]
586 .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
587 .desc("average " + cstr + " mshr uncacheable latency")
588 .flags(total | nozero | nonan)
589 ;
590
591 avgMshrUncacheableLatency[access_idx] =
592 mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
593 }
594
595 overallAvgMshrUncacheableLatency
596 .name(name() + ".overall_avg_mshr_uncacheable_latency")
597 .desc("average overall mshr uncacheable latency")
598 .flags(total)
599 ;
600 overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable;
601
602 mshr_cap_events
603 .init(maxThreadsPerCPU)
604 .name(name() + ".mshr_cap_events")
605 .desc("number of times MSHR cap was activated")
606 .flags(total)
607 ;
608
609 //software prefetching stats
610 soft_prefetch_mshr_full
611 .init(maxThreadsPerCPU)
612 .name(name() + ".soft_prefetch_mshr_full")
613 .desc("number of mshr full events for SW prefetching instrutions")
614 .flags(total)
615 ;
616
617 mshr_no_allocate_misses
618 .name(name() +".no_allocate_misses")
619 .desc("Number of misses that were no-allocate")
620 ;
621
622 }
623
624 unsigned int
625 BaseCache::drain(Event *de)
626 {
627 int count = memSidePort->drain(de) + cpuSidePort->drain(de);
628
629 // Set status
630 if (count != 0) {
631 drainEvent = de;
632
633 changeState(SimObject::Draining);
634 return count;
635 }
636
637 changeState(SimObject::Drained);
638 return 0;
639 }