2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
33 * Definition of BaseCache functions.
36 #include "mem/cache/base_cache.hh"
38 #include "cpu/base.hh"
42 BaseCache::CachePort::CachePort(const std::string
&_name
, BaseCache
*_cache
,
44 : Port(_name
), cache(_cache
), isCpuSide(_isCpuSide
)
47 //Start ports at null if more than one is created we should panic
53 BaseCache::CachePort::recvStatusChange(Port::Status status
)
55 cache
->recvStatusChange(status
, isCpuSide
);
59 BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList
&resp
,
62 cache
->getAddressRanges(resp
, snoop
, isCpuSide
);
66 BaseCache::CachePort::deviceBlockSize()
68 return cache
->getBlockSize();
72 BaseCache::CachePort::recvTiming(Packet
*pkt
)
74 if (pkt
->isRequest() && blocked
)
76 DPRINTF(Cache
,"Scheduling a retry while blocked\n");
80 return cache
->doTimingAccess(pkt
, this, isCpuSide
);
84 BaseCache::CachePort::recvAtomic(Packet
*pkt
)
86 return cache
->doAtomicAccess(pkt
, isCpuSide
);
90 BaseCache::CachePort::recvFunctional(Packet
*pkt
)
92 cache
->doFunctionalAccess(pkt
, isCpuSide
);
96 BaseCache::CachePort::recvRetry()
99 if (!drainList
.empty()) {
100 //We have some responses to drain first
102 while (result
&& !drainList
.empty()) {
103 result
= sendTiming(drainList
.front());
105 drainList
.pop_front();
111 pkt
= cache
->getPacket();
112 MSHR
* mshr
= (MSHR
*)pkt
->senderState
;
113 bool success
= sendTiming(pkt
);
114 DPRINTF(Cache
, "Address %x was %s in sending the timing request\n",
115 pkt
->getAddr(), success
? "succesful" : "unsuccesful");
116 cache
->sendResult(pkt
, mshr
, success
);
117 if (success
&& cache
->doMasterRequest())
119 //Still more to issue, rerequest in 1 cycle
121 BaseCache::CacheEvent
* reqCpu
= new BaseCache::CacheEvent(this);
122 reqCpu
->schedule(curTick
+ 1);
127 //pkt = cache->getCoherencePacket();
128 //We save the packet, no reordering on CSHRS
130 bool success
= sendTiming(pkt
);
131 if (success
&& cache
->doSlaveRequest())
133 //Still more to issue, rerequest in 1 cycle
135 BaseCache::CacheEvent
* reqCpu
= new BaseCache::CacheEvent(this);
136 reqCpu
->schedule(curTick
+ 1);
143 BaseCache::CachePort::setBlocked()
146 DPRINTF(Cache
, "Cache Blocking\n");
148 //Clear the retry flag
149 mustSendRetry
= false;
153 BaseCache::CachePort::clearBlocked()
156 DPRINTF(Cache
, "Cache Unblocking\n");
160 DPRINTF(Cache
, "Cache Sending Retry\n");
161 mustSendRetry
= false;
166 BaseCache::CacheEvent::CacheEvent(CachePort
*_cachePort
)
167 : Event(&mainEventQueue
, CPU_Tick_Pri
), cachePort(_cachePort
)
169 this->setFlags(AutoDelete
);
173 BaseCache::CacheEvent::CacheEvent(CachePort
*_cachePort
, Packet
*_pkt
)
174 : Event(&mainEventQueue
, CPU_Tick_Pri
), cachePort(_cachePort
), pkt(_pkt
)
176 this->setFlags(AutoDelete
);
180 BaseCache::CacheEvent::process()
184 if (!cachePort
->isCpuSide
)
187 pkt
= cachePort
->cache
->getPacket();
188 MSHR
* mshr
= (MSHR
*) pkt
->senderState
;
189 bool success
= cachePort
->sendTiming(pkt
);
190 DPRINTF(Cache
, "Address %x was %s in sending the timing request\n",
191 pkt
->getAddr(), success
? "succesful" : "unsuccesful");
192 cachePort
->cache
->sendResult(pkt
, mshr
, success
);
193 if (success
&& cachePort
->cache
->doMasterRequest())
195 //Still more to issue, rerequest in 1 cycle
197 this->schedule(curTick
+1);
203 pkt
= cachePort
->cache
->getCoherencePacket();
204 bool success
= cachePort
->sendTiming(pkt
);
206 //Need to send on a retry
207 cachePort
->cshrRetry
= pkt
;
209 else if (cachePort
->cache
->doSlaveRequest())
211 //Still more to issue, rerequest in 1 cycle
213 this->schedule(curTick
+1);
219 //Know the packet to send
220 if (pkt
->flags
& NACKED_LINE
)
221 pkt
->result
= Packet::Nacked
;
223 pkt
->result
= Packet::Success
;
224 pkt
->makeTimingResponse();
225 if (!cachePort
->drainList
.empty()) {
226 //Already blocked waiting for bus, just append
227 cachePort
->drainList
.push_back(pkt
);
229 else if (!cachePort
->sendTiming(pkt
)) {
230 //It failed, save it to list of drain events
231 cachePort
->drainList
.push_back(pkt
);
236 BaseCache::CacheEvent::description()
238 return "timing event\n";
242 BaseCache::getPort(const std::string
&if_name
, int idx
)
246 if(cpuSidePort
== NULL
)
247 cpuSidePort
= new CachePort(name() + "-cpu_side_port", this, true);
250 else if (if_name
== "functional")
252 if(cpuSidePort
== NULL
)
253 cpuSidePort
= new CachePort(name() + "-cpu_side_port", this, true);
256 else if (if_name
== "cpu_side")
258 if(cpuSidePort
== NULL
)
259 cpuSidePort
= new CachePort(name() + "-cpu_side_port", this, true);
262 else if (if_name
== "mem_side")
264 if (memSidePort
!= NULL
)
265 panic("Already have a mem side for this cache\n");
266 memSidePort
= new CachePort(name() + "-mem_side_port", this, false);
269 else panic("Port name %s unrecognized\n", if_name
);
275 if (!cpuSidePort
|| !memSidePort
)
276 panic("Cache not hooked up on both sides\n");
277 cpuSidePort
->sendStatusChange(Port::RangeChange
);
281 BaseCache::regStats()
283 Request
temp_req((Addr
) NULL
, 4, 0);
284 Packet::Command temp_cmd
= Packet::ReadReq
;
285 Packet
temp_pkt(&temp_req
, temp_cmd
, 0); //@todo FIx command strings so this isn't neccessary
286 temp_pkt
.allocate(); //Temp allocate, all need data
288 using namespace Stats
;
291 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
292 Packet::Command cmd
= (Packet::Command
)access_idx
;
293 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
296 .init(maxThreadsPerCPU
)
297 .name(name() + "." + cstr
+ "_hits")
298 .desc("number of " + cstr
+ " hits")
299 .flags(total
| nozero
| nonan
)
304 .name(name() + ".demand_hits")
305 .desc("number of demand (read+write) hits")
308 demandHits
= hits
[Packet::ReadReq
] + hits
[Packet::WriteReq
];
311 .name(name() + ".overall_hits")
312 .desc("number of overall hits")
315 overallHits
= demandHits
+ hits
[Packet::SoftPFReq
] + hits
[Packet::HardPFReq
]
316 + hits
[Packet::Writeback
];
319 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
320 Packet::Command cmd
= (Packet::Command
)access_idx
;
321 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
324 .init(maxThreadsPerCPU
)
325 .name(name() + "." + cstr
+ "_misses")
326 .desc("number of " + cstr
+ " misses")
327 .flags(total
| nozero
| nonan
)
332 .name(name() + ".demand_misses")
333 .desc("number of demand (read+write) misses")
336 demandMisses
= misses
[Packet::ReadReq
] + misses
[Packet::WriteReq
];
339 .name(name() + ".overall_misses")
340 .desc("number of overall misses")
343 overallMisses
= demandMisses
+ misses
[Packet::SoftPFReq
] +
344 misses
[Packet::HardPFReq
] + misses
[Packet::Writeback
];
346 // Miss latency statistics
347 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
348 Packet::Command cmd
= (Packet::Command
)access_idx
;
349 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
351 missLatency
[access_idx
]
352 .init(maxThreadsPerCPU
)
353 .name(name() + "." + cstr
+ "_miss_latency")
354 .desc("number of " + cstr
+ " miss cycles")
355 .flags(total
| nozero
| nonan
)
360 .name(name() + ".demand_miss_latency")
361 .desc("number of demand (read+write) miss cycles")
364 demandMissLatency
= missLatency
[Packet::ReadReq
] + missLatency
[Packet::WriteReq
];
367 .name(name() + ".overall_miss_latency")
368 .desc("number of overall miss cycles")
371 overallMissLatency
= demandMissLatency
+ missLatency
[Packet::SoftPFReq
] +
372 missLatency
[Packet::HardPFReq
];
375 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
376 Packet::Command cmd
= (Packet::Command
)access_idx
;
377 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
380 .name(name() + "." + cstr
+ "_accesses")
381 .desc("number of " + cstr
+ " accesses(hits+misses)")
382 .flags(total
| nozero
| nonan
)
385 accesses
[access_idx
] = hits
[access_idx
] + misses
[access_idx
];
389 .name(name() + ".demand_accesses")
390 .desc("number of demand (read+write) accesses")
393 demandAccesses
= demandHits
+ demandMisses
;
396 .name(name() + ".overall_accesses")
397 .desc("number of overall (read+write) accesses")
400 overallAccesses
= overallHits
+ overallMisses
;
402 // miss rate formulas
403 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
404 Packet::Command cmd
= (Packet::Command
)access_idx
;
405 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
408 .name(name() + "." + cstr
+ "_miss_rate")
409 .desc("miss rate for " + cstr
+ " accesses")
410 .flags(total
| nozero
| nonan
)
413 missRate
[access_idx
] = misses
[access_idx
] / accesses
[access_idx
];
417 .name(name() + ".demand_miss_rate")
418 .desc("miss rate for demand accesses")
421 demandMissRate
= demandMisses
/ demandAccesses
;
424 .name(name() + ".overall_miss_rate")
425 .desc("miss rate for overall accesses")
428 overallMissRate
= overallMisses
/ overallAccesses
;
430 // miss latency formulas
431 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
432 Packet::Command cmd
= (Packet::Command
)access_idx
;
433 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
435 avgMissLatency
[access_idx
]
436 .name(name() + "." + cstr
+ "_avg_miss_latency")
437 .desc("average " + cstr
+ " miss latency")
438 .flags(total
| nozero
| nonan
)
441 avgMissLatency
[access_idx
] =
442 missLatency
[access_idx
] / misses
[access_idx
];
446 .name(name() + ".demand_avg_miss_latency")
447 .desc("average overall miss latency")
450 demandAvgMissLatency
= demandMissLatency
/ demandMisses
;
452 overallAvgMissLatency
453 .name(name() + ".overall_avg_miss_latency")
454 .desc("average overall miss latency")
457 overallAvgMissLatency
= overallMissLatency
/ overallMisses
;
459 blocked_cycles
.init(NUM_BLOCKED_CAUSES
);
461 .name(name() + ".blocked_cycles")
462 .desc("number of cycles access was blocked")
463 .subname(Blocked_NoMSHRs
, "no_mshrs")
464 .subname(Blocked_NoTargets
, "no_targets")
468 blocked_causes
.init(NUM_BLOCKED_CAUSES
);
470 .name(name() + ".blocked")
471 .desc("number of cycles access was blocked")
472 .subname(Blocked_NoMSHRs
, "no_mshrs")
473 .subname(Blocked_NoTargets
, "no_targets")
477 .name(name() + ".avg_blocked_cycles")
478 .desc("average number of cycles each access was blocked")
479 .subname(Blocked_NoMSHRs
, "no_mshrs")
480 .subname(Blocked_NoTargets
, "no_targets")
483 avg_blocked
= blocked_cycles
/ blocked_causes
;
486 .name(name() + ".fast_writes")
487 .desc("number of fast writes performed")
491 .name(name() + ".cache_copies")
492 .desc("number of cache copies performed")