Use fixPacket function everywhere.
[gem5.git] / src / mem / cache / base_cache.cc
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Definition of BaseCache functions.
34 */
35
36 #include "mem/cache/base_cache.hh"
37 #include "mem/cache/miss/mshr.hh"
38 #include "mem/packet_impl.hh"
39 #include "cpu/smt.hh"
40 #include "cpu/base.hh"
41
42 using namespace std;
43
44 BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
45 bool _isCpuSide)
46 : Port(_name), cache(_cache), isCpuSide(_isCpuSide)
47 {
48 blocked = false;
49 waitingOnRetry = false;
50 //Start ports at null if more than one is created we should panic
51 //cpuSidePort = NULL;
52 //memSidePort = NULL;
53 }
54
55 void
56 BaseCache::CachePort::recvStatusChange(Port::Status status)
57 {
58 cache->recvStatusChange(status, isCpuSide);
59 }
60
61 void
62 BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
63 AddrRangeList &snoop)
64 {
65 cache->getAddressRanges(resp, snoop, isCpuSide);
66 }
67
68 int
69 BaseCache::CachePort::deviceBlockSize()
70 {
71 return cache->getBlockSize();
72 }
73
74 bool
75 BaseCache::CachePort::recvTiming(Packet *pkt)
76 {
77 if (isCpuSide
78 && !pkt->req->isUncacheable()
79 && pkt->isInvalidate()
80 && !pkt->isRead() && !pkt->isWrite()) {
81 //Upgrade or Invalidate
82 //Look into what happens if two slave caches on bus
83 DPRINTF(Cache, "%s %x ? blk_addr: %x\n", pkt->cmdString(),
84 pkt->getAddr() & (((ULL(1))<<48)-1),
85 pkt->getAddr() & ~((Addr)cache->blkSize - 1));
86
87 assert(!(pkt->flags & SATISFIED));
88 pkt->flags |= SATISFIED;
89 //Invalidates/Upgrades need no response if they get the bus
90 return true;
91 }
92
93 if (pkt->isRequest() && blocked)
94 {
95 DPRINTF(Cache,"Scheduling a retry while blocked\n");
96 mustSendRetry = true;
97 return false;
98 }
99 return cache->doTimingAccess(pkt, this, isCpuSide);
100 }
101
102 Tick
103 BaseCache::CachePort::recvAtomic(Packet *pkt)
104 {
105 return cache->doAtomicAccess(pkt, isCpuSide);
106 }
107
108 void
109 BaseCache::CachePort::recvFunctional(Packet *pkt)
110 {
111 //Check storage here first
112 list<Packet *>::iterator i = drainList.begin();
113 list<Packet *>::iterator end = drainList.end();
114 for (; i != end; ++i) {
115 Packet * target = *i;
116 // If the target contains data, and it overlaps the
117 // probed request, need to update data
118 if (target->intersect(pkt)) {
119 fixPacket(pkt, target);
120 }
121 }
122 cache->doFunctionalAccess(pkt, isCpuSide);
123 }
124
125 void
126 BaseCache::CachePort::recvRetry()
127 {
128 Packet *pkt;
129 assert(waitingOnRetry);
130 if (!drainList.empty()) {
131 DPRINTF(CachePort, "%s attempting to send a retry for response\n", name());
132 //We have some responses to drain first
133 if (sendTiming(drainList.front())) {
134 DPRINTF(CachePort, "%s sucessful in sending a retry for response\n", name());
135 drainList.pop_front();
136 if (!drainList.empty() ||
137 !isCpuSide && cache->doMasterRequest() ||
138 isCpuSide && cache->doSlaveRequest()) {
139
140 DPRINTF(CachePort, "%s has more responses/requests\n", name());
141 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
142 reqCpu->schedule(curTick + 1);
143 }
144 waitingOnRetry = false;
145 }
146 }
147 else if (!isCpuSide)
148 {
149 DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
150 if (!cache->doMasterRequest()) {
151 //This can happen if I am the owner of a block and see an upgrade
152 //while the block was in my WB Buffers. I just remove the
153 //wb and de-assert the masterRequest
154 waitingOnRetry = false;
155 return;
156 }
157 pkt = cache->getPacket();
158 MSHR* mshr = (MSHR*) pkt->senderState;
159 //Copy the packet, it may be modified/destroyed elsewhere
160 Packet * copyPkt = new Packet(*pkt);
161 copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
162 mshr->pkt = copyPkt;
163
164 bool success = sendTiming(pkt);
165 DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
166 pkt->getAddr(), success ? "succesful" : "unsuccesful");
167
168 waitingOnRetry = !success;
169 if (waitingOnRetry) {
170 DPRINTF(CachePort, "%s now waiting on a retry\n", name());
171 }
172
173 cache->sendResult(pkt, mshr, success);
174
175 if (success && cache->doMasterRequest())
176 {
177 DPRINTF(CachePort, "%s has more requests\n", name());
178 //Still more to issue, rerequest in 1 cycle
179 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
180 reqCpu->schedule(curTick + 1);
181 }
182 }
183 else
184 {
185 assert(cache->doSlaveRequest());
186 //pkt = cache->getCoherencePacket();
187 //We save the packet, no reordering on CSHRS
188 pkt = cache->getCoherencePacket();
189 MSHR* cshr = (MSHR*)pkt->senderState;
190 bool success = sendTiming(pkt);
191 cache->sendCoherenceResult(pkt, cshr, success);
192 waitingOnRetry = !success;
193 if (success && cache->doSlaveRequest())
194 {
195 DPRINTF(CachePort, "%s has more requests\n", name());
196 //Still more to issue, rerequest in 1 cycle
197 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
198 reqCpu->schedule(curTick + 1);
199 }
200 }
201 if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
202 else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
203 return;
204 }
205 void
206 BaseCache::CachePort::setBlocked()
207 {
208 assert(!blocked);
209 DPRINTF(Cache, "Cache Blocking\n");
210 blocked = true;
211 //Clear the retry flag
212 mustSendRetry = false;
213 }
214
215 void
216 BaseCache::CachePort::clearBlocked()
217 {
218 assert(blocked);
219 DPRINTF(Cache, "Cache Unblocking\n");
220 blocked = false;
221 if (mustSendRetry)
222 {
223 DPRINTF(Cache, "Cache Sending Retry\n");
224 mustSendRetry = false;
225 sendRetry();
226 }
227 }
228
229 BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort)
230 : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
231 {
232 this->setFlags(AutoDelete);
233 pkt = NULL;
234 }
235
236 BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, Packet *_pkt)
237 : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort), pkt(_pkt)
238 {
239 this->setFlags(AutoDelete);
240 }
241
242 void
243 BaseCache::CacheEvent::process()
244 {
245 if (!pkt)
246 {
247 if (cachePort->waitingOnRetry) return;
248 //We have some responses to drain first
249 if (!cachePort->drainList.empty()) {
250 DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
251 if (cachePort->sendTiming(cachePort->drainList.front())) {
252 DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
253 cachePort->drainList.pop_front();
254 if (!cachePort->drainList.empty() ||
255 !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
256 cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
257
258 DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
259 this->schedule(curTick + 1);
260 }
261 }
262 else {
263 cachePort->waitingOnRetry = true;
264 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
265 }
266 }
267 else if (!cachePort->isCpuSide)
268 { //MSHR
269 DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
270 if (!cachePort->cache->doMasterRequest()) {
271 //This can happen if I am the owner of a block and see an upgrade
272 //while the block was in my WB Buffers. I just remove the
273 //wb and de-assert the masterRequest
274 return;
275 }
276
277 pkt = cachePort->cache->getPacket();
278 MSHR* mshr = (MSHR*) pkt->senderState;
279 //Copy the packet, it may be modified/destroyed elsewhere
280 Packet * copyPkt = new Packet(*pkt);
281 copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
282 mshr->pkt = copyPkt;
283
284 bool success = cachePort->sendTiming(pkt);
285 DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
286 pkt->getAddr(), success ? "succesful" : "unsuccesful");
287
288 cachePort->waitingOnRetry = !success;
289 if (cachePort->waitingOnRetry) {
290 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
291 }
292
293 cachePort->cache->sendResult(pkt, mshr, success);
294 if (success && cachePort->cache->doMasterRequest())
295 {
296 DPRINTF(CachePort, "%s still more MSHR requests to send\n",
297 cachePort->name());
298 //Still more to issue, rerequest in 1 cycle
299 pkt = NULL;
300 this->schedule(curTick+1);
301 }
302 }
303 else
304 {
305 //CSHR
306 assert(cachePort->cache->doSlaveRequest());
307 pkt = cachePort->cache->getCoherencePacket();
308 MSHR* cshr = (MSHR*) pkt->senderState;
309 bool success = cachePort->sendTiming(pkt);
310 cachePort->cache->sendCoherenceResult(pkt, cshr, success);
311 cachePort->waitingOnRetry = !success;
312 if (cachePort->waitingOnRetry)
313 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
314 if (success && cachePort->cache->doSlaveRequest())
315 {
316 DPRINTF(CachePort, "%s still more CSHR requests to send\n",
317 cachePort->name());
318 //Still more to issue, rerequest in 1 cycle
319 pkt = NULL;
320 this->schedule(curTick+1);
321 }
322 }
323 return;
324 }
325 //Response
326 //Know the packet to send
327 if (pkt->flags & NACKED_LINE)
328 pkt->result = Packet::Nacked;
329 else
330 pkt->result = Packet::Success;
331 pkt->makeTimingResponse();
332 DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
333 if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
334 //Already have a list, just append
335 cachePort->drainList.push_back(pkt);
336 DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
337 }
338 else if (!cachePort->sendTiming(pkt)) {
339 //It failed, save it to list of drain events
340 DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
341 cachePort->drainList.push_back(pkt);
342 cachePort->waitingOnRetry = true;
343 }
344 }
345
346 const char *
347 BaseCache::CacheEvent::description()
348 {
349 return "timing event\n";
350 }
351
352 Port*
353 BaseCache::getPort(const std::string &if_name, int idx)
354 {
355 if (if_name == "")
356 {
357 if(cpuSidePort == NULL)
358 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
359 return cpuSidePort;
360 }
361 else if (if_name == "functional")
362 {
363 if(cpuSidePort == NULL)
364 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
365 return cpuSidePort;
366 }
367 else if (if_name == "cpu_side")
368 {
369 if(cpuSidePort == NULL)
370 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
371 return cpuSidePort;
372 }
373 else if (if_name == "mem_side")
374 {
375 if (memSidePort != NULL)
376 panic("Already have a mem side for this cache\n");
377 memSidePort = new CachePort(name() + "-mem_side_port", this, false);
378 return memSidePort;
379 }
380 else panic("Port name %s unrecognized\n", if_name);
381 }
382
383 void
384 BaseCache::init()
385 {
386 if (!cpuSidePort || !memSidePort)
387 panic("Cache not hooked up on both sides\n");
388 cpuSidePort->sendStatusChange(Port::RangeChange);
389 }
390
391 void
392 BaseCache::regStats()
393 {
394 Request temp_req((Addr) NULL, 4, 0);
395 Packet::Command temp_cmd = Packet::ReadReq;
396 Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
397 temp_pkt.allocate(); //Temp allocate, all need data
398
399 using namespace Stats;
400
401 // Hit statistics
402 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
403 Packet::Command cmd = (Packet::Command)access_idx;
404 const string &cstr = temp_pkt.cmdIdxToString(cmd);
405
406 hits[access_idx]
407 .init(maxThreadsPerCPU)
408 .name(name() + "." + cstr + "_hits")
409 .desc("number of " + cstr + " hits")
410 .flags(total | nozero | nonan)
411 ;
412 }
413
414 demandHits
415 .name(name() + ".demand_hits")
416 .desc("number of demand (read+write) hits")
417 .flags(total)
418 ;
419 demandHits = hits[Packet::ReadReq] + hits[Packet::WriteReq];
420
421 overallHits
422 .name(name() + ".overall_hits")
423 .desc("number of overall hits")
424 .flags(total)
425 ;
426 overallHits = demandHits + hits[Packet::SoftPFReq] + hits[Packet::HardPFReq]
427 + hits[Packet::Writeback];
428
429 // Miss statistics
430 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
431 Packet::Command cmd = (Packet::Command)access_idx;
432 const string &cstr = temp_pkt.cmdIdxToString(cmd);
433
434 misses[access_idx]
435 .init(maxThreadsPerCPU)
436 .name(name() + "." + cstr + "_misses")
437 .desc("number of " + cstr + " misses")
438 .flags(total | nozero | nonan)
439 ;
440 }
441
442 demandMisses
443 .name(name() + ".demand_misses")
444 .desc("number of demand (read+write) misses")
445 .flags(total)
446 ;
447 demandMisses = misses[Packet::ReadReq] + misses[Packet::WriteReq];
448
449 overallMisses
450 .name(name() + ".overall_misses")
451 .desc("number of overall misses")
452 .flags(total)
453 ;
454 overallMisses = demandMisses + misses[Packet::SoftPFReq] +
455 misses[Packet::HardPFReq] + misses[Packet::Writeback];
456
457 // Miss latency statistics
458 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
459 Packet::Command cmd = (Packet::Command)access_idx;
460 const string &cstr = temp_pkt.cmdIdxToString(cmd);
461
462 missLatency[access_idx]
463 .init(maxThreadsPerCPU)
464 .name(name() + "." + cstr + "_miss_latency")
465 .desc("number of " + cstr + " miss cycles")
466 .flags(total | nozero | nonan)
467 ;
468 }
469
470 demandMissLatency
471 .name(name() + ".demand_miss_latency")
472 .desc("number of demand (read+write) miss cycles")
473 .flags(total)
474 ;
475 demandMissLatency = missLatency[Packet::ReadReq] + missLatency[Packet::WriteReq];
476
477 overallMissLatency
478 .name(name() + ".overall_miss_latency")
479 .desc("number of overall miss cycles")
480 .flags(total)
481 ;
482 overallMissLatency = demandMissLatency + missLatency[Packet::SoftPFReq] +
483 missLatency[Packet::HardPFReq];
484
485 // access formulas
486 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
487 Packet::Command cmd = (Packet::Command)access_idx;
488 const string &cstr = temp_pkt.cmdIdxToString(cmd);
489
490 accesses[access_idx]
491 .name(name() + "." + cstr + "_accesses")
492 .desc("number of " + cstr + " accesses(hits+misses)")
493 .flags(total | nozero | nonan)
494 ;
495
496 accesses[access_idx] = hits[access_idx] + misses[access_idx];
497 }
498
499 demandAccesses
500 .name(name() + ".demand_accesses")
501 .desc("number of demand (read+write) accesses")
502 .flags(total)
503 ;
504 demandAccesses = demandHits + demandMisses;
505
506 overallAccesses
507 .name(name() + ".overall_accesses")
508 .desc("number of overall (read+write) accesses")
509 .flags(total)
510 ;
511 overallAccesses = overallHits + overallMisses;
512
513 // miss rate formulas
514 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
515 Packet::Command cmd = (Packet::Command)access_idx;
516 const string &cstr = temp_pkt.cmdIdxToString(cmd);
517
518 missRate[access_idx]
519 .name(name() + "." + cstr + "_miss_rate")
520 .desc("miss rate for " + cstr + " accesses")
521 .flags(total | nozero | nonan)
522 ;
523
524 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
525 }
526
527 demandMissRate
528 .name(name() + ".demand_miss_rate")
529 .desc("miss rate for demand accesses")
530 .flags(total)
531 ;
532 demandMissRate = demandMisses / demandAccesses;
533
534 overallMissRate
535 .name(name() + ".overall_miss_rate")
536 .desc("miss rate for overall accesses")
537 .flags(total)
538 ;
539 overallMissRate = overallMisses / overallAccesses;
540
541 // miss latency formulas
542 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
543 Packet::Command cmd = (Packet::Command)access_idx;
544 const string &cstr = temp_pkt.cmdIdxToString(cmd);
545
546 avgMissLatency[access_idx]
547 .name(name() + "." + cstr + "_avg_miss_latency")
548 .desc("average " + cstr + " miss latency")
549 .flags(total | nozero | nonan)
550 ;
551
552 avgMissLatency[access_idx] =
553 missLatency[access_idx] / misses[access_idx];
554 }
555
556 demandAvgMissLatency
557 .name(name() + ".demand_avg_miss_latency")
558 .desc("average overall miss latency")
559 .flags(total)
560 ;
561 demandAvgMissLatency = demandMissLatency / demandMisses;
562
563 overallAvgMissLatency
564 .name(name() + ".overall_avg_miss_latency")
565 .desc("average overall miss latency")
566 .flags(total)
567 ;
568 overallAvgMissLatency = overallMissLatency / overallMisses;
569
570 blocked_cycles.init(NUM_BLOCKED_CAUSES);
571 blocked_cycles
572 .name(name() + ".blocked_cycles")
573 .desc("number of cycles access was blocked")
574 .subname(Blocked_NoMSHRs, "no_mshrs")
575 .subname(Blocked_NoTargets, "no_targets")
576 ;
577
578
579 blocked_causes.init(NUM_BLOCKED_CAUSES);
580 blocked_causes
581 .name(name() + ".blocked")
582 .desc("number of cycles access was blocked")
583 .subname(Blocked_NoMSHRs, "no_mshrs")
584 .subname(Blocked_NoTargets, "no_targets")
585 ;
586
587 avg_blocked
588 .name(name() + ".avg_blocked_cycles")
589 .desc("average number of cycles each access was blocked")
590 .subname(Blocked_NoMSHRs, "no_mshrs")
591 .subname(Blocked_NoTargets, "no_targets")
592 ;
593
594 avg_blocked = blocked_cycles / blocked_causes;
595
596 fastWrites
597 .name(name() + ".fast_writes")
598 .desc("number of fast writes performed")
599 ;
600
601 cacheCopies
602 .name(name() + ".cache_copies")
603 .desc("number of cache copies performed")
604 ;
605
606 }