Only issue responses if we aren;t already blocked
[gem5.git] / src / mem / cache / base_cache.cc
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Definition of BaseCache functions.
34 */
35
36 #include "mem/cache/base_cache.hh"
37 #include "cpu/smt.hh"
38 #include "cpu/base.hh"
39
40 using namespace std;
41
42 BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
43 bool _isCpuSide)
44 : Port(_name), cache(_cache), isCpuSide(_isCpuSide)
45 {
46 blocked = false;
47 cshrRetry = NULL;
48 waitingOnRetry = false;
49 //Start ports at null if more than one is created we should panic
50 //cpuSidePort = NULL;
51 //memSidePort = NULL;
52 }
53
54 void
55 BaseCache::CachePort::recvStatusChange(Port::Status status)
56 {
57 cache->recvStatusChange(status, isCpuSide);
58 }
59
60 void
61 BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
62 AddrRangeList &snoop)
63 {
64 cache->getAddressRanges(resp, snoop, isCpuSide);
65 }
66
67 int
68 BaseCache::CachePort::deviceBlockSize()
69 {
70 return cache->getBlockSize();
71 }
72
73 bool
74 BaseCache::CachePort::recvTiming(Packet *pkt)
75 {
76 if (isCpuSide
77 && !pkt->req->isUncacheable()
78 && pkt->isInvalidate()
79 && !pkt->isRead() && !pkt->isWrite()) {
80 //Upgrade or Invalidate
81 //Look into what happens if two slave caches on bus
82 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
83 pkt->getAddr() & (((ULL(1))<<48)-1),
84 pkt->getAddr() & ~((Addr)cache->blkSize - 1));
85
86 assert(!(pkt->flags & SATISFIED));
87 pkt->flags |= SATISFIED;
88 //Invalidates/Upgrades need no response if they get the bus
89 return true;
90 }
91
92 if (pkt->isRequest() && blocked)
93 {
94 DPRINTF(Cache,"Scheduling a retry while blocked\n");
95 mustSendRetry = true;
96 return false;
97 }
98 return cache->doTimingAccess(pkt, this, isCpuSide);
99 }
100
101 Tick
102 BaseCache::CachePort::recvAtomic(Packet *pkt)
103 {
104 return cache->doAtomicAccess(pkt, isCpuSide);
105 }
106
107 void
108 BaseCache::CachePort::recvFunctional(Packet *pkt)
109 {
110 cache->doFunctionalAccess(pkt, isCpuSide);
111 }
112
113 void
114 BaseCache::CachePort::recvRetry()
115 {
116 Packet *pkt;
117 assert(waitingOnRetry);
118 if (!drainList.empty()) {
119 DPRINTF(CachePort, "%s attempting to send a retry for response\n", name());
120 //We have some responses to drain first
121 if (sendTiming(drainList.front())) {
122 DPRINTF(CachePort, "%s sucessful in sending a retry for response\n", name());
123 drainList.pop_front();
124 if (!drainList.empty() ||
125 !isCpuSide && cache->doMasterRequest() ||
126 isCpuSide && cache->doSlaveRequest()) {
127
128 DPRINTF(CachePort, "%s has more responses/requests\n", name());
129 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
130 reqCpu->schedule(curTick + 1);
131 }
132 waitingOnRetry = false;
133 }
134 }
135 else if (!isCpuSide)
136 {
137 DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
138 assert(cache->doMasterRequest());
139 pkt = cache->getPacket();
140 MSHR* mshr = (MSHR*)pkt->senderState;
141 bool success = sendTiming(pkt);
142 DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
143 pkt->getAddr(), success ? "succesful" : "unsuccesful");
144 cache->sendResult(pkt, mshr, success);
145 waitingOnRetry = !success;
146 if (success && cache->doMasterRequest())
147 {
148 DPRINTF(CachePort, "%s has more requests\n", name());
149 //Still more to issue, rerequest in 1 cycle
150 pkt = NULL;
151 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
152 reqCpu->schedule(curTick + 1);
153 }
154 }
155 else
156 {
157 assert(cshrRetry);
158 //pkt = cache->getCoherencePacket();
159 //We save the packet, no reordering on CSHRS
160 pkt = cshrRetry;
161 bool success = sendTiming(pkt);
162 waitingOnRetry = !success;
163 if (success && cache->doSlaveRequest())
164 {
165 //Still more to issue, rerequest in 1 cycle
166 pkt = NULL;
167 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
168 reqCpu->schedule(curTick + 1);
169 cshrRetry = NULL;
170 }
171 }
172 if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
173 else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
174 return;
175 }
176 void
177 BaseCache::CachePort::setBlocked()
178 {
179 assert(!blocked);
180 DPRINTF(Cache, "Cache Blocking\n");
181 blocked = true;
182 //Clear the retry flag
183 mustSendRetry = false;
184 }
185
186 void
187 BaseCache::CachePort::clearBlocked()
188 {
189 assert(blocked);
190 DPRINTF(Cache, "Cache Unblocking\n");
191 blocked = false;
192 if (mustSendRetry)
193 {
194 DPRINTF(Cache, "Cache Sending Retry\n");
195 mustSendRetry = false;
196 sendRetry();
197 }
198 }
199
200 BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort)
201 : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
202 {
203 this->setFlags(AutoDelete);
204 pkt = NULL;
205 }
206
207 BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, Packet *_pkt)
208 : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort), pkt(_pkt)
209 {
210 this->setFlags(AutoDelete);
211 }
212
213 void
214 BaseCache::CacheEvent::process()
215 {
216 if (!pkt)
217 {
218 if (cachePort->waitingOnRetry) return;
219 //We have some responses to drain first
220 if (!cachePort->drainList.empty()) {
221 DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
222 if (cachePort->sendTiming(cachePort->drainList.front())) {
223 DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
224 cachePort->drainList.pop_front();
225 if (!cachePort->drainList.empty() ||
226 !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
227 cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
228
229 DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
230 this->schedule(curTick + 1);
231 }
232 }
233 else {
234 cachePort->waitingOnRetry = true;
235 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
236 }
237 }
238 else if (!cachePort->isCpuSide)
239 {
240 DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
241 assert(cachePort->cache->doMasterRequest());
242 //MSHR
243 pkt = cachePort->cache->getPacket();
244 MSHR* mshr = (MSHR*) pkt->senderState;
245 bool success = cachePort->sendTiming(pkt);
246 DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
247 pkt->getAddr(), success ? "succesful" : "unsuccesful");
248 cachePort->cache->sendResult(pkt, mshr, success);
249 cachePort->waitingOnRetry = !success;
250 if (cachePort->waitingOnRetry) DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
251 if (success && cachePort->cache->doMasterRequest())
252 {
253 DPRINTF(CachePort, "%s still more MSHR requests to send\n", cachePort->name());
254 //Still more to issue, rerequest in 1 cycle
255 pkt = NULL;
256 this->schedule(curTick+1);
257 }
258 }
259 else
260 {
261 assert(cachePort->cache->doSlaveRequest());
262 //CSHR
263 pkt = cachePort->cache->getCoherencePacket();
264 bool success = cachePort->sendTiming(pkt);
265 if (!success) {
266 //Need to send on a retry
267 cachePort->cshrRetry = pkt;
268 cachePort->waitingOnRetry = true;
269 }
270 else if (cachePort->cache->doSlaveRequest())
271 {
272 //Still more to issue, rerequest in 1 cycle
273 pkt = NULL;
274 this->schedule(curTick+1);
275 }
276 }
277 return;
278 }
279 //Response
280 //Know the packet to send
281 if (pkt->flags & NACKED_LINE)
282 pkt->result = Packet::Nacked;
283 else
284 pkt->result = Packet::Success;
285 pkt->makeTimingResponse();
286 DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
287 if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
288 //Already have a list, just append
289 cachePort->drainList.push_back(pkt);
290 DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
291 }
292 else if (!cachePort->sendTiming(pkt)) {
293 //It failed, save it to list of drain events
294 DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
295 cachePort->drainList.push_back(pkt);
296 cachePort->waitingOnRetry = true;
297 }
298 }
299
300 const char *
301 BaseCache::CacheEvent::description()
302 {
303 return "timing event\n";
304 }
305
306 Port*
307 BaseCache::getPort(const std::string &if_name, int idx)
308 {
309 if (if_name == "")
310 {
311 if(cpuSidePort == NULL)
312 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
313 return cpuSidePort;
314 }
315 else if (if_name == "functional")
316 {
317 if(cpuSidePort == NULL)
318 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
319 return cpuSidePort;
320 }
321 else if (if_name == "cpu_side")
322 {
323 if(cpuSidePort == NULL)
324 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
325 return cpuSidePort;
326 }
327 else if (if_name == "mem_side")
328 {
329 if (memSidePort != NULL)
330 panic("Already have a mem side for this cache\n");
331 memSidePort = new CachePort(name() + "-mem_side_port", this, false);
332 return memSidePort;
333 }
334 else panic("Port name %s unrecognized\n", if_name);
335 }
336
337 void
338 BaseCache::init()
339 {
340 if (!cpuSidePort || !memSidePort)
341 panic("Cache not hooked up on both sides\n");
342 cpuSidePort->sendStatusChange(Port::RangeChange);
343 }
344
345 void
346 BaseCache::regStats()
347 {
348 Request temp_req((Addr) NULL, 4, 0);
349 Packet::Command temp_cmd = Packet::ReadReq;
350 Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
351 temp_pkt.allocate(); //Temp allocate, all need data
352
353 using namespace Stats;
354
355 // Hit statistics
356 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
357 Packet::Command cmd = (Packet::Command)access_idx;
358 const string &cstr = temp_pkt.cmdIdxToString(cmd);
359
360 hits[access_idx]
361 .init(maxThreadsPerCPU)
362 .name(name() + "." + cstr + "_hits")
363 .desc("number of " + cstr + " hits")
364 .flags(total | nozero | nonan)
365 ;
366 }
367
368 demandHits
369 .name(name() + ".demand_hits")
370 .desc("number of demand (read+write) hits")
371 .flags(total)
372 ;
373 demandHits = hits[Packet::ReadReq] + hits[Packet::WriteReq];
374
375 overallHits
376 .name(name() + ".overall_hits")
377 .desc("number of overall hits")
378 .flags(total)
379 ;
380 overallHits = demandHits + hits[Packet::SoftPFReq] + hits[Packet::HardPFReq]
381 + hits[Packet::Writeback];
382
383 // Miss statistics
384 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
385 Packet::Command cmd = (Packet::Command)access_idx;
386 const string &cstr = temp_pkt.cmdIdxToString(cmd);
387
388 misses[access_idx]
389 .init(maxThreadsPerCPU)
390 .name(name() + "." + cstr + "_misses")
391 .desc("number of " + cstr + " misses")
392 .flags(total | nozero | nonan)
393 ;
394 }
395
396 demandMisses
397 .name(name() + ".demand_misses")
398 .desc("number of demand (read+write) misses")
399 .flags(total)
400 ;
401 demandMisses = misses[Packet::ReadReq] + misses[Packet::WriteReq];
402
403 overallMisses
404 .name(name() + ".overall_misses")
405 .desc("number of overall misses")
406 .flags(total)
407 ;
408 overallMisses = demandMisses + misses[Packet::SoftPFReq] +
409 misses[Packet::HardPFReq] + misses[Packet::Writeback];
410
411 // Miss latency statistics
412 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
413 Packet::Command cmd = (Packet::Command)access_idx;
414 const string &cstr = temp_pkt.cmdIdxToString(cmd);
415
416 missLatency[access_idx]
417 .init(maxThreadsPerCPU)
418 .name(name() + "." + cstr + "_miss_latency")
419 .desc("number of " + cstr + " miss cycles")
420 .flags(total | nozero | nonan)
421 ;
422 }
423
424 demandMissLatency
425 .name(name() + ".demand_miss_latency")
426 .desc("number of demand (read+write) miss cycles")
427 .flags(total)
428 ;
429 demandMissLatency = missLatency[Packet::ReadReq] + missLatency[Packet::WriteReq];
430
431 overallMissLatency
432 .name(name() + ".overall_miss_latency")
433 .desc("number of overall miss cycles")
434 .flags(total)
435 ;
436 overallMissLatency = demandMissLatency + missLatency[Packet::SoftPFReq] +
437 missLatency[Packet::HardPFReq];
438
439 // access formulas
440 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
441 Packet::Command cmd = (Packet::Command)access_idx;
442 const string &cstr = temp_pkt.cmdIdxToString(cmd);
443
444 accesses[access_idx]
445 .name(name() + "." + cstr + "_accesses")
446 .desc("number of " + cstr + " accesses(hits+misses)")
447 .flags(total | nozero | nonan)
448 ;
449
450 accesses[access_idx] = hits[access_idx] + misses[access_idx];
451 }
452
453 demandAccesses
454 .name(name() + ".demand_accesses")
455 .desc("number of demand (read+write) accesses")
456 .flags(total)
457 ;
458 demandAccesses = demandHits + demandMisses;
459
460 overallAccesses
461 .name(name() + ".overall_accesses")
462 .desc("number of overall (read+write) accesses")
463 .flags(total)
464 ;
465 overallAccesses = overallHits + overallMisses;
466
467 // miss rate formulas
468 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
469 Packet::Command cmd = (Packet::Command)access_idx;
470 const string &cstr = temp_pkt.cmdIdxToString(cmd);
471
472 missRate[access_idx]
473 .name(name() + "." + cstr + "_miss_rate")
474 .desc("miss rate for " + cstr + " accesses")
475 .flags(total | nozero | nonan)
476 ;
477
478 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
479 }
480
481 demandMissRate
482 .name(name() + ".demand_miss_rate")
483 .desc("miss rate for demand accesses")
484 .flags(total)
485 ;
486 demandMissRate = demandMisses / demandAccesses;
487
488 overallMissRate
489 .name(name() + ".overall_miss_rate")
490 .desc("miss rate for overall accesses")
491 .flags(total)
492 ;
493 overallMissRate = overallMisses / overallAccesses;
494
495 // miss latency formulas
496 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
497 Packet::Command cmd = (Packet::Command)access_idx;
498 const string &cstr = temp_pkt.cmdIdxToString(cmd);
499
500 avgMissLatency[access_idx]
501 .name(name() + "." + cstr + "_avg_miss_latency")
502 .desc("average " + cstr + " miss latency")
503 .flags(total | nozero | nonan)
504 ;
505
506 avgMissLatency[access_idx] =
507 missLatency[access_idx] / misses[access_idx];
508 }
509
510 demandAvgMissLatency
511 .name(name() + ".demand_avg_miss_latency")
512 .desc("average overall miss latency")
513 .flags(total)
514 ;
515 demandAvgMissLatency = demandMissLatency / demandMisses;
516
517 overallAvgMissLatency
518 .name(name() + ".overall_avg_miss_latency")
519 .desc("average overall miss latency")
520 .flags(total)
521 ;
522 overallAvgMissLatency = overallMissLatency / overallMisses;
523
524 blocked_cycles.init(NUM_BLOCKED_CAUSES);
525 blocked_cycles
526 .name(name() + ".blocked_cycles")
527 .desc("number of cycles access was blocked")
528 .subname(Blocked_NoMSHRs, "no_mshrs")
529 .subname(Blocked_NoTargets, "no_targets")
530 ;
531
532
533 blocked_causes.init(NUM_BLOCKED_CAUSES);
534 blocked_causes
535 .name(name() + ".blocked")
536 .desc("number of cycles access was blocked")
537 .subname(Blocked_NoMSHRs, "no_mshrs")
538 .subname(Blocked_NoTargets, "no_targets")
539 ;
540
541 avg_blocked
542 .name(name() + ".avg_blocked_cycles")
543 .desc("average number of cycles each access was blocked")
544 .subname(Blocked_NoMSHRs, "no_mshrs")
545 .subname(Blocked_NoTargets, "no_targets")
546 ;
547
548 avg_blocked = blocked_cycles / blocked_causes;
549
550 fastWrites
551 .name(name() + ".fast_writes")
552 .desc("number of fast writes performed")
553 ;
554
555 cacheCopies
556 .name(name() + ".cache_copies")
557 .desc("number of cache copies performed")
558 ;
559
560 }