Fix a bug to handle the fact that a CPU can send Functional accesses while a sendTimi...
[gem5.git] / src / mem / cache / base_cache.cc
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Definition of BaseCache functions.
34 */
35
36 #include "cpu/base.hh"
37 #include "cpu/smt.hh"
38 #include "mem/cache/base_cache.hh"
39 #include "mem/cache/miss/mshr.hh"
40
41 using namespace std;
42
43 BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
44 bool _isCpuSide)
45 : Port(_name, _cache), cache(_cache), isCpuSide(_isCpuSide)
46 {
47 blocked = false;
48 waitingOnRetry = false;
49 //Start ports at null if more than one is created we should panic
50 //cpuSidePort = NULL;
51 //memSidePort = NULL;
52 }
53
54 void
55 BaseCache::CachePort::recvStatusChange(Port::Status status)
56 {
57 cache->recvStatusChange(status, isCpuSide);
58 }
59
60 void
61 BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
62 AddrRangeList &snoop)
63 {
64 cache->getAddressRanges(resp, snoop, isCpuSide);
65 }
66
67 int
68 BaseCache::CachePort::deviceBlockSize()
69 {
70 return cache->getBlockSize();
71 }
72
73 bool
74 BaseCache::CachePort::recvTiming(PacketPtr pkt)
75 {
76 if (isCpuSide
77 && !pkt->req->isUncacheable()
78 && pkt->isInvalidate()
79 && !pkt->isRead() && !pkt->isWrite()) {
80 //Upgrade or Invalidate
81 //Look into what happens if two slave caches on bus
82 DPRINTF(Cache, "%s %x ?\n", pkt->cmdString(), pkt->getAddr());
83
84 assert(!(pkt->flags & SATISFIED));
85 pkt->flags |= SATISFIED;
86 //Invalidates/Upgrades need no response if they get the bus
87 return true;
88 }
89
90 if (pkt->isRequest() && blocked)
91 {
92 DPRINTF(Cache,"Scheduling a retry while blocked\n");
93 mustSendRetry = true;
94 return false;
95 }
96 return cache->doTimingAccess(pkt, this, isCpuSide);
97 }
98
99 Tick
100 BaseCache::CachePort::recvAtomic(PacketPtr pkt)
101 {
102 return cache->doAtomicAccess(pkt, isCpuSide);
103 }
104
105 bool
106 BaseCache::CachePort::checkFunctional(PacketPtr pkt)
107 {
108 //Check storage here first
109 list<PacketPtr>::iterator i = drainList.begin();
110 list<PacketPtr>::iterator iend = drainList.end();
111 bool notDone = true;
112 while (i != iend && notDone) {
113 PacketPtr target = *i;
114 // If the target contains data, and it overlaps the
115 // probed request, need to update data
116 if (target->intersect(pkt)) {
117 DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a drain\n",
118 pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
119 notDone = fixPacket(pkt, target);
120 }
121 i++;
122 }
123 //Also check the response not yet ready to be on the list
124 std::list<std::pair<Tick,PacketPtr> >::iterator j = transmitList.begin();
125 std::list<std::pair<Tick,PacketPtr> >::iterator jend = transmitList.end();
126
127 while (j != jend && notDone) {
128 PacketPtr target = j->second;
129 // If the target contains data, and it overlaps the
130 // probed request, need to update data
131 if (target->intersect(pkt)) {
132 DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a response\n",
133 pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
134 notDone = fixDelayedResponsePacket(pkt, target);
135 }
136 j++;
137 }
138 return notDone;
139 }
140
141 void
142 BaseCache::CachePort::recvFunctional(PacketPtr pkt)
143 {
144 bool notDone = checkFunctional(pkt);
145 if (notDone)
146 cache->doFunctionalAccess(pkt, isCpuSide);
147 }
148
149 void
150 BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
151 {
152 bool notDone = checkFunctional(pkt);
153 if (notDone)
154 sendFunctional(pkt);
155 }
156
157 void
158 BaseCache::CachePort::recvRetry()
159 {
160 PacketPtr pkt;
161 assert(waitingOnRetry);
162 if (!drainList.empty()) {
163 DPRINTF(CachePort, "%s attempting to send a retry for response (%i waiting)\n"
164 , name(), drainList.size());
165 //We have some responses to drain first
166 pkt = drainList.front();
167 drainList.pop_front();
168 if (sendTiming(pkt)) {
169 DPRINTF(CachePort, "%s sucessful in sending a retry for"
170 "response (%i still waiting)\n", name(), drainList.size());
171 if (!drainList.empty() ||
172 !isCpuSide && cache->doMasterRequest() ||
173 isCpuSide && cache->doSlaveRequest()) {
174
175 DPRINTF(CachePort, "%s has more responses/requests\n", name());
176 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
177 reqCpu->schedule(curTick + 1);
178 }
179 waitingOnRetry = false;
180 }
181 else {
182 drainList.push_front(pkt);
183 }
184 // Check if we're done draining once this list is empty
185 if (drainList.empty())
186 cache->checkDrain();
187 }
188 else if (!isCpuSide)
189 {
190 DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
191 if (!cache->doMasterRequest()) {
192 //This can happen if I am the owner of a block and see an upgrade
193 //while the block was in my WB Buffers. I just remove the
194 //wb and de-assert the masterRequest
195 waitingOnRetry = false;
196 return;
197 }
198 pkt = cache->getPacket();
199 MSHR* mshr = (MSHR*) pkt->senderState;
200 //Copy the packet, it may be modified/destroyed elsewhere
201 PacketPtr copyPkt = new Packet(*pkt);
202 copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
203 mshr->pkt = copyPkt;
204
205 bool success = sendTiming(pkt);
206 DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
207 pkt->getAddr(), success ? "succesful" : "unsuccesful");
208
209 waitingOnRetry = !success;
210 if (waitingOnRetry) {
211 DPRINTF(CachePort, "%s now waiting on a retry\n", name());
212 }
213
214 cache->sendResult(pkt, mshr, success);
215
216 if (success && cache->doMasterRequest())
217 {
218 DPRINTF(CachePort, "%s has more requests\n", name());
219 //Still more to issue, rerequest in 1 cycle
220 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
221 reqCpu->schedule(curTick + 1);
222 }
223 }
224 else
225 {
226 assert(cache->doSlaveRequest());
227 //pkt = cache->getCoherencePacket();
228 //We save the packet, no reordering on CSHRS
229 pkt = cache->getCoherencePacket();
230 MSHR* cshr = (MSHR*)pkt->senderState;
231 bool success = sendTiming(pkt);
232 cache->sendCoherenceResult(pkt, cshr, success);
233 waitingOnRetry = !success;
234 if (success && cache->doSlaveRequest())
235 {
236 DPRINTF(CachePort, "%s has more requests\n", name());
237 //Still more to issue, rerequest in 1 cycle
238 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
239 reqCpu->schedule(curTick + 1);
240 }
241 }
242 if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
243 else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
244 return;
245 }
246 void
247 BaseCache::CachePort::setBlocked()
248 {
249 assert(!blocked);
250 DPRINTF(Cache, "Cache Blocking\n");
251 blocked = true;
252 //Clear the retry flag
253 mustSendRetry = false;
254 }
255
256 void
257 BaseCache::CachePort::clearBlocked()
258 {
259 assert(blocked);
260 DPRINTF(Cache, "Cache Unblocking\n");
261 blocked = false;
262 if (mustSendRetry)
263 {
264 DPRINTF(Cache, "Cache Sending Retry\n");
265 mustSendRetry = false;
266 sendRetry();
267 }
268 }
269
270 BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, bool _newResponse)
271 : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort),
272 newResponse(_newResponse)
273 {
274 if (!newResponse)
275 this->setFlags(AutoDelete);
276 pkt = NULL;
277 }
278
279 void
280 BaseCache::CacheEvent::process()
281 {
282 if (!newResponse)
283 {
284 if (cachePort->waitingOnRetry) return;
285 //We have some responses to drain first
286 if (!cachePort->drainList.empty()) {
287 DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
288 if (cachePort->sendTiming(cachePort->drainList.front())) {
289 DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
290 cachePort->drainList.pop_front();
291 if (!cachePort->drainList.empty() ||
292 !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
293 cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
294
295 DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
296 this->schedule(curTick + 1);
297 }
298 }
299 else {
300 cachePort->waitingOnRetry = true;
301 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
302 }
303 }
304 else if (!cachePort->isCpuSide)
305 { //MSHR
306 DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
307 if (!cachePort->cache->doMasterRequest()) {
308 //This can happen if I am the owner of a block and see an upgrade
309 //while the block was in my WB Buffers. I just remove the
310 //wb and de-assert the masterRequest
311 return;
312 }
313
314 pkt = cachePort->cache->getPacket();
315 MSHR* mshr = (MSHR*) pkt->senderState;
316 //Copy the packet, it may be modified/destroyed elsewhere
317 PacketPtr copyPkt = new Packet(*pkt);
318 copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
319 mshr->pkt = copyPkt;
320
321 bool success = cachePort->sendTiming(pkt);
322 DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
323 pkt->getAddr(), success ? "succesful" : "unsuccesful");
324
325 cachePort->waitingOnRetry = !success;
326 if (cachePort->waitingOnRetry) {
327 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
328 }
329
330 cachePort->cache->sendResult(pkt, mshr, success);
331 if (success && cachePort->cache->doMasterRequest())
332 {
333 DPRINTF(CachePort, "%s still more MSHR requests to send\n",
334 cachePort->name());
335 //Still more to issue, rerequest in 1 cycle
336 pkt = NULL;
337 this->schedule(curTick+1);
338 }
339 }
340 else
341 {
342 //CSHR
343 assert(cachePort->cache->doSlaveRequest());
344 pkt = cachePort->cache->getCoherencePacket();
345 MSHR* cshr = (MSHR*) pkt->senderState;
346 bool success = cachePort->sendTiming(pkt);
347 cachePort->cache->sendCoherenceResult(pkt, cshr, success);
348 cachePort->waitingOnRetry = !success;
349 if (cachePort->waitingOnRetry)
350 DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
351 if (success && cachePort->cache->doSlaveRequest())
352 {
353 DPRINTF(CachePort, "%s still more CSHR requests to send\n",
354 cachePort->name());
355 //Still more to issue, rerequest in 1 cycle
356 pkt = NULL;
357 this->schedule(curTick+1);
358 }
359 }
360 return;
361 }
362 //Else it's a response
363 assert(cachePort->transmitList.size());
364 assert(cachePort->transmitList.front().first <= curTick);
365 pkt = cachePort->transmitList.front().second;
366 cachePort->transmitList.pop_front();
367 if (!cachePort->transmitList.empty()) {
368 Tick time = cachePort->transmitList.front().first;
369 schedule(time <= curTick ? curTick+1 : time);
370 }
371
372 if (pkt->flags & NACKED_LINE)
373 pkt->result = Packet::Nacked;
374 else
375 pkt->result = Packet::Success;
376 pkt->makeTimingResponse();
377 DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
378 if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
379 //Already have a list, just append
380 cachePort->drainList.push_back(pkt);
381 DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
382 }
383 else if (!cachePort->sendTiming(pkt)) {
384 //It failed, save it to list of drain events
385 DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
386 cachePort->drainList.push_back(pkt);
387 cachePort->waitingOnRetry = true;
388 }
389
390 // Check if we're done draining once this list is empty
391 if (cachePort->drainList.empty() && cachePort->transmitList.empty())
392 cachePort->cache->checkDrain();
393 }
394
395 const char *
396 BaseCache::CacheEvent::description()
397 {
398 return "timing event\n";
399 }
400
401 Port*
402 BaseCache::getPort(const std::string &if_name, int idx)
403 {
404 if (if_name == "")
405 {
406 if(cpuSidePort == NULL) {
407 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
408 sendEvent = new CacheEvent(cpuSidePort, true);
409 }
410 return cpuSidePort;
411 }
412 else if (if_name == "functional")
413 {
414 return new CachePort(name() + "-cpu_side_port", this, true);
415 }
416 else if (if_name == "cpu_side")
417 {
418 if(cpuSidePort == NULL) {
419 cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
420 sendEvent = new CacheEvent(cpuSidePort, true);
421 }
422 return cpuSidePort;
423 }
424 else if (if_name == "mem_side")
425 {
426 if (memSidePort != NULL)
427 panic("Already have a mem side for this cache\n");
428 memSidePort = new CachePort(name() + "-mem_side_port", this, false);
429 memSendEvent = new CacheEvent(memSidePort, true);
430 return memSidePort;
431 }
432 else panic("Port name %s unrecognized\n", if_name);
433 }
434
435 void
436 BaseCache::init()
437 {
438 if (!cpuSidePort || !memSidePort)
439 panic("Cache not hooked up on both sides\n");
440 cpuSidePort->sendStatusChange(Port::RangeChange);
441 }
442
443 void
444 BaseCache::regStats()
445 {
446 Request temp_req((Addr) NULL, 4, 0);
447 Packet::Command temp_cmd = Packet::ReadReq;
448 Packet temp_pkt(&temp_req, temp_cmd, 0); //@todo FIx command strings so this isn't neccessary
449 temp_pkt.allocate(); //Temp allocate, all need data
450
451 using namespace Stats;
452
453 // Hit statistics
454 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
455 Packet::Command cmd = (Packet::Command)access_idx;
456 const string &cstr = temp_pkt.cmdIdxToString(cmd);
457
458 hits[access_idx]
459 .init(maxThreadsPerCPU)
460 .name(name() + "." + cstr + "_hits")
461 .desc("number of " + cstr + " hits")
462 .flags(total | nozero | nonan)
463 ;
464 }
465
466 demandHits
467 .name(name() + ".demand_hits")
468 .desc("number of demand (read+write) hits")
469 .flags(total)
470 ;
471 demandHits = hits[Packet::ReadReq] + hits[Packet::WriteReq];
472
473 overallHits
474 .name(name() + ".overall_hits")
475 .desc("number of overall hits")
476 .flags(total)
477 ;
478 overallHits = demandHits + hits[Packet::SoftPFReq] + hits[Packet::HardPFReq]
479 + hits[Packet::Writeback];
480
481 // Miss statistics
482 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
483 Packet::Command cmd = (Packet::Command)access_idx;
484 const string &cstr = temp_pkt.cmdIdxToString(cmd);
485
486 misses[access_idx]
487 .init(maxThreadsPerCPU)
488 .name(name() + "." + cstr + "_misses")
489 .desc("number of " + cstr + " misses")
490 .flags(total | nozero | nonan)
491 ;
492 }
493
494 demandMisses
495 .name(name() + ".demand_misses")
496 .desc("number of demand (read+write) misses")
497 .flags(total)
498 ;
499 demandMisses = misses[Packet::ReadReq] + misses[Packet::WriteReq];
500
501 overallMisses
502 .name(name() + ".overall_misses")
503 .desc("number of overall misses")
504 .flags(total)
505 ;
506 overallMisses = demandMisses + misses[Packet::SoftPFReq] +
507 misses[Packet::HardPFReq] + misses[Packet::Writeback];
508
509 // Miss latency statistics
510 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
511 Packet::Command cmd = (Packet::Command)access_idx;
512 const string &cstr = temp_pkt.cmdIdxToString(cmd);
513
514 missLatency[access_idx]
515 .init(maxThreadsPerCPU)
516 .name(name() + "." + cstr + "_miss_latency")
517 .desc("number of " + cstr + " miss cycles")
518 .flags(total | nozero | nonan)
519 ;
520 }
521
522 demandMissLatency
523 .name(name() + ".demand_miss_latency")
524 .desc("number of demand (read+write) miss cycles")
525 .flags(total)
526 ;
527 demandMissLatency = missLatency[Packet::ReadReq] + missLatency[Packet::WriteReq];
528
529 overallMissLatency
530 .name(name() + ".overall_miss_latency")
531 .desc("number of overall miss cycles")
532 .flags(total)
533 ;
534 overallMissLatency = demandMissLatency + missLatency[Packet::SoftPFReq] +
535 missLatency[Packet::HardPFReq];
536
537 // access formulas
538 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
539 Packet::Command cmd = (Packet::Command)access_idx;
540 const string &cstr = temp_pkt.cmdIdxToString(cmd);
541
542 accesses[access_idx]
543 .name(name() + "." + cstr + "_accesses")
544 .desc("number of " + cstr + " accesses(hits+misses)")
545 .flags(total | nozero | nonan)
546 ;
547
548 accesses[access_idx] = hits[access_idx] + misses[access_idx];
549 }
550
551 demandAccesses
552 .name(name() + ".demand_accesses")
553 .desc("number of demand (read+write) accesses")
554 .flags(total)
555 ;
556 demandAccesses = demandHits + demandMisses;
557
558 overallAccesses
559 .name(name() + ".overall_accesses")
560 .desc("number of overall (read+write) accesses")
561 .flags(total)
562 ;
563 overallAccesses = overallHits + overallMisses;
564
565 // miss rate formulas
566 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
567 Packet::Command cmd = (Packet::Command)access_idx;
568 const string &cstr = temp_pkt.cmdIdxToString(cmd);
569
570 missRate[access_idx]
571 .name(name() + "." + cstr + "_miss_rate")
572 .desc("miss rate for " + cstr + " accesses")
573 .flags(total | nozero | nonan)
574 ;
575
576 missRate[access_idx] = misses[access_idx] / accesses[access_idx];
577 }
578
579 demandMissRate
580 .name(name() + ".demand_miss_rate")
581 .desc("miss rate for demand accesses")
582 .flags(total)
583 ;
584 demandMissRate = demandMisses / demandAccesses;
585
586 overallMissRate
587 .name(name() + ".overall_miss_rate")
588 .desc("miss rate for overall accesses")
589 .flags(total)
590 ;
591 overallMissRate = overallMisses / overallAccesses;
592
593 // miss latency formulas
594 for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
595 Packet::Command cmd = (Packet::Command)access_idx;
596 const string &cstr = temp_pkt.cmdIdxToString(cmd);
597
598 avgMissLatency[access_idx]
599 .name(name() + "." + cstr + "_avg_miss_latency")
600 .desc("average " + cstr + " miss latency")
601 .flags(total | nozero | nonan)
602 ;
603
604 avgMissLatency[access_idx] =
605 missLatency[access_idx] / misses[access_idx];
606 }
607
608 demandAvgMissLatency
609 .name(name() + ".demand_avg_miss_latency")
610 .desc("average overall miss latency")
611 .flags(total)
612 ;
613 demandAvgMissLatency = demandMissLatency / demandMisses;
614
615 overallAvgMissLatency
616 .name(name() + ".overall_avg_miss_latency")
617 .desc("average overall miss latency")
618 .flags(total)
619 ;
620 overallAvgMissLatency = overallMissLatency / overallMisses;
621
622 blocked_cycles.init(NUM_BLOCKED_CAUSES);
623 blocked_cycles
624 .name(name() + ".blocked_cycles")
625 .desc("number of cycles access was blocked")
626 .subname(Blocked_NoMSHRs, "no_mshrs")
627 .subname(Blocked_NoTargets, "no_targets")
628 ;
629
630
631 blocked_causes.init(NUM_BLOCKED_CAUSES);
632 blocked_causes
633 .name(name() + ".blocked")
634 .desc("number of cycles access was blocked")
635 .subname(Blocked_NoMSHRs, "no_mshrs")
636 .subname(Blocked_NoTargets, "no_targets")
637 ;
638
639 avg_blocked
640 .name(name() + ".avg_blocked_cycles")
641 .desc("average number of cycles each access was blocked")
642 .subname(Blocked_NoMSHRs, "no_mshrs")
643 .subname(Blocked_NoTargets, "no_targets")
644 ;
645
646 avg_blocked = blocked_cycles / blocked_causes;
647
648 fastWrites
649 .name(name() + ".fast_writes")
650 .desc("number of fast writes performed")
651 ;
652
653 cacheCopies
654 .name(name() + ".cache_copies")
655 .desc("number of cache copies performed")
656 ;
657
658 }
659
660 unsigned int
661 BaseCache::drain(Event *de)
662 {
663 // Set status
664 if (!canDrain()) {
665 drainEvent = de;
666
667 changeState(SimObject::Draining);
668 return 1;
669 }
670
671 changeState(SimObject::Drained);
672 return 0;
673 }