2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
33 * Definition of BaseCache functions.
36 #include "mem/cache/base_cache.hh"
38 #include "cpu/base.hh"
42 BaseCache::CachePort::CachePort(const std::string
&_name
, BaseCache
*_cache
,
44 : Port(_name
), cache(_cache
), isCpuSide(_isCpuSide
)
48 waitingOnRetry
= false;
49 //Start ports at null if more than one is created we should panic
55 BaseCache::CachePort::recvStatusChange(Port::Status status
)
57 cache
->recvStatusChange(status
, isCpuSide
);
61 BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList
&resp
,
64 cache
->getAddressRanges(resp
, snoop
, isCpuSide
);
68 BaseCache::CachePort::deviceBlockSize()
70 return cache
->getBlockSize();
74 BaseCache::CachePort::recvTiming(Packet
*pkt
)
77 && !pkt
->req
->isUncacheable()
78 && pkt
->isInvalidate()
79 && !pkt
->isRead() && !pkt
->isWrite()) {
80 //Upgrade or Invalidate
81 //Look into what happens if two slave caches on bus
82 DPRINTF(Cache
, "%s %x ? blk_addr: %x\n", pkt
->cmdString(),
83 pkt
->getAddr() & (((ULL(1))<<48)-1),
84 pkt
->getAddr() & ~((Addr
)cache
->blkSize
- 1));
86 assert(!(pkt
->flags
& SATISFIED
));
87 pkt
->flags
|= SATISFIED
;
88 //Invalidates/Upgrades need no response if they get the bus
92 if (pkt
->isRequest() && blocked
)
94 DPRINTF(Cache
,"Scheduling a retry while blocked\n");
98 return cache
->doTimingAccess(pkt
, this, isCpuSide
);
102 BaseCache::CachePort::recvAtomic(Packet
*pkt
)
104 return cache
->doAtomicAccess(pkt
, isCpuSide
);
108 BaseCache::CachePort::recvFunctional(Packet
*pkt
)
110 cache
->doFunctionalAccess(pkt
, isCpuSide
);
114 BaseCache::CachePort::recvRetry()
117 assert(waitingOnRetry
);
118 if (!drainList
.empty()) {
119 //We have some responses to drain first
120 if (sendTiming(drainList
.front())) {
121 drainList
.pop_front();
122 if (!drainList
.empty() ||
123 !isCpuSide
&& cache
->doMasterRequest() ||
124 isCpuSide
&& cache
->doSlaveRequest()) {
125 BaseCache::CacheEvent
* reqCpu
= new BaseCache::CacheEvent(this);
126 reqCpu
->schedule(curTick
+ 1);
128 waitingOnRetry
= false;
133 assert(cache
->doMasterRequest());
134 pkt
= cache
->getPacket();
135 MSHR
* mshr
= (MSHR
*)pkt
->senderState
;
136 bool success
= sendTiming(pkt
);
137 DPRINTF(Cache
, "Address %x was %s in sending the timing request\n",
138 pkt
->getAddr(), success
? "succesful" : "unsuccesful");
139 cache
->sendResult(pkt
, mshr
, success
);
140 waitingOnRetry
= !success
;
141 if (success
&& cache
->doMasterRequest())
143 //Still more to issue, rerequest in 1 cycle
145 BaseCache::CacheEvent
* reqCpu
= new BaseCache::CacheEvent(this);
146 reqCpu
->schedule(curTick
+ 1);
152 //pkt = cache->getCoherencePacket();
153 //We save the packet, no reordering on CSHRS
155 bool success
= sendTiming(pkt
);
156 waitingOnRetry
= !success
;
157 if (success
&& cache
->doSlaveRequest())
159 //Still more to issue, rerequest in 1 cycle
161 BaseCache::CacheEvent
* reqCpu
= new BaseCache::CacheEvent(this);
162 reqCpu
->schedule(curTick
+ 1);
169 BaseCache::CachePort::setBlocked()
172 DPRINTF(Cache
, "Cache Blocking\n");
174 //Clear the retry flag
175 mustSendRetry
= false;
179 BaseCache::CachePort::clearBlocked()
182 DPRINTF(Cache
, "Cache Unblocking\n");
186 DPRINTF(Cache
, "Cache Sending Retry\n");
187 mustSendRetry
= false;
192 BaseCache::CacheEvent::CacheEvent(CachePort
*_cachePort
)
193 : Event(&mainEventQueue
, CPU_Tick_Pri
), cachePort(_cachePort
)
195 this->setFlags(AutoDelete
);
199 BaseCache::CacheEvent::CacheEvent(CachePort
*_cachePort
, Packet
*_pkt
)
200 : Event(&mainEventQueue
, CPU_Tick_Pri
), cachePort(_cachePort
), pkt(_pkt
)
202 this->setFlags(AutoDelete
);
206 BaseCache::CacheEvent::process()
210 if (cachePort
->waitingOnRetry
) return;
211 //We have some responses to drain first
212 if (!cachePort
->drainList
.empty()) {
213 if (cachePort
->sendTiming(cachePort
->drainList
.front())) {
214 cachePort
->drainList
.pop_front();
215 if (!cachePort
->drainList
.empty() ||
216 !cachePort
->isCpuSide
&& cachePort
->cache
->doMasterRequest() ||
217 cachePort
->isCpuSide
&& cachePort
->cache
->doSlaveRequest())
218 this->schedule(curTick
+ 1);
220 else cachePort
->waitingOnRetry
= true;
222 else if (!cachePort
->isCpuSide
)
225 pkt
= cachePort
->cache
->getPacket();
226 MSHR
* mshr
= (MSHR
*) pkt
->senderState
;
227 bool success
= cachePort
->sendTiming(pkt
);
228 DPRINTF(Cache
, "Address %x was %s in sending the timing request\n",
229 pkt
->getAddr(), success
? "succesful" : "unsuccesful");
230 cachePort
->cache
->sendResult(pkt
, mshr
, success
);
231 cachePort
->waitingOnRetry
= !success
;
232 if (success
&& cachePort
->cache
->doMasterRequest())
234 //Still more to issue, rerequest in 1 cycle
236 this->schedule(curTick
+1);
242 pkt
= cachePort
->cache
->getCoherencePacket();
243 bool success
= cachePort
->sendTiming(pkt
);
245 //Need to send on a retry
246 cachePort
->cshrRetry
= pkt
;
247 cachePort
->waitingOnRetry
= true;
249 else if (cachePort
->cache
->doSlaveRequest())
251 //Still more to issue, rerequest in 1 cycle
253 this->schedule(curTick
+1);
259 //Know the packet to send
260 if (pkt
->flags
& NACKED_LINE
)
261 pkt
->result
= Packet::Nacked
;
263 pkt
->result
= Packet::Success
;
264 pkt
->makeTimingResponse();
265 if (!cachePort
->drainList
.empty()) {
266 //Already have a list, just append
267 cachePort
->drainList
.push_back(pkt
);
269 else if (!cachePort
->sendTiming(pkt
)) {
270 //It failed, save it to list of drain events
271 cachePort
->drainList
.push_back(pkt
);
272 cachePort
->waitingOnRetry
= true;
277 BaseCache::CacheEvent::description()
279 return "timing event\n";
283 BaseCache::getPort(const std::string
&if_name
, int idx
)
287 if(cpuSidePort
== NULL
)
288 cpuSidePort
= new CachePort(name() + "-cpu_side_port", this, true);
291 else if (if_name
== "functional")
293 if(cpuSidePort
== NULL
)
294 cpuSidePort
= new CachePort(name() + "-cpu_side_port", this, true);
297 else if (if_name
== "cpu_side")
299 if(cpuSidePort
== NULL
)
300 cpuSidePort
= new CachePort(name() + "-cpu_side_port", this, true);
303 else if (if_name
== "mem_side")
305 if (memSidePort
!= NULL
)
306 panic("Already have a mem side for this cache\n");
307 memSidePort
= new CachePort(name() + "-mem_side_port", this, false);
310 else panic("Port name %s unrecognized\n", if_name
);
316 if (!cpuSidePort
|| !memSidePort
)
317 panic("Cache not hooked up on both sides\n");
318 cpuSidePort
->sendStatusChange(Port::RangeChange
);
322 BaseCache::regStats()
324 Request
temp_req((Addr
) NULL
, 4, 0);
325 Packet::Command temp_cmd
= Packet::ReadReq
;
326 Packet
temp_pkt(&temp_req
, temp_cmd
, 0); //@todo FIx command strings so this isn't neccessary
327 temp_pkt
.allocate(); //Temp allocate, all need data
329 using namespace Stats
;
332 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
333 Packet::Command cmd
= (Packet::Command
)access_idx
;
334 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
337 .init(maxThreadsPerCPU
)
338 .name(name() + "." + cstr
+ "_hits")
339 .desc("number of " + cstr
+ " hits")
340 .flags(total
| nozero
| nonan
)
345 .name(name() + ".demand_hits")
346 .desc("number of demand (read+write) hits")
349 demandHits
= hits
[Packet::ReadReq
] + hits
[Packet::WriteReq
];
352 .name(name() + ".overall_hits")
353 .desc("number of overall hits")
356 overallHits
= demandHits
+ hits
[Packet::SoftPFReq
] + hits
[Packet::HardPFReq
]
357 + hits
[Packet::Writeback
];
360 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
361 Packet::Command cmd
= (Packet::Command
)access_idx
;
362 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
365 .init(maxThreadsPerCPU
)
366 .name(name() + "." + cstr
+ "_misses")
367 .desc("number of " + cstr
+ " misses")
368 .flags(total
| nozero
| nonan
)
373 .name(name() + ".demand_misses")
374 .desc("number of demand (read+write) misses")
377 demandMisses
= misses
[Packet::ReadReq
] + misses
[Packet::WriteReq
];
380 .name(name() + ".overall_misses")
381 .desc("number of overall misses")
384 overallMisses
= demandMisses
+ misses
[Packet::SoftPFReq
] +
385 misses
[Packet::HardPFReq
] + misses
[Packet::Writeback
];
387 // Miss latency statistics
388 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
389 Packet::Command cmd
= (Packet::Command
)access_idx
;
390 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
392 missLatency
[access_idx
]
393 .init(maxThreadsPerCPU
)
394 .name(name() + "." + cstr
+ "_miss_latency")
395 .desc("number of " + cstr
+ " miss cycles")
396 .flags(total
| nozero
| nonan
)
401 .name(name() + ".demand_miss_latency")
402 .desc("number of demand (read+write) miss cycles")
405 demandMissLatency
= missLatency
[Packet::ReadReq
] + missLatency
[Packet::WriteReq
];
408 .name(name() + ".overall_miss_latency")
409 .desc("number of overall miss cycles")
412 overallMissLatency
= demandMissLatency
+ missLatency
[Packet::SoftPFReq
] +
413 missLatency
[Packet::HardPFReq
];
416 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
417 Packet::Command cmd
= (Packet::Command
)access_idx
;
418 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
421 .name(name() + "." + cstr
+ "_accesses")
422 .desc("number of " + cstr
+ " accesses(hits+misses)")
423 .flags(total
| nozero
| nonan
)
426 accesses
[access_idx
] = hits
[access_idx
] + misses
[access_idx
];
430 .name(name() + ".demand_accesses")
431 .desc("number of demand (read+write) accesses")
434 demandAccesses
= demandHits
+ demandMisses
;
437 .name(name() + ".overall_accesses")
438 .desc("number of overall (read+write) accesses")
441 overallAccesses
= overallHits
+ overallMisses
;
443 // miss rate formulas
444 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
445 Packet::Command cmd
= (Packet::Command
)access_idx
;
446 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
449 .name(name() + "." + cstr
+ "_miss_rate")
450 .desc("miss rate for " + cstr
+ " accesses")
451 .flags(total
| nozero
| nonan
)
454 missRate
[access_idx
] = misses
[access_idx
] / accesses
[access_idx
];
458 .name(name() + ".demand_miss_rate")
459 .desc("miss rate for demand accesses")
462 demandMissRate
= demandMisses
/ demandAccesses
;
465 .name(name() + ".overall_miss_rate")
466 .desc("miss rate for overall accesses")
469 overallMissRate
= overallMisses
/ overallAccesses
;
471 // miss latency formulas
472 for (int access_idx
= 0; access_idx
< NUM_MEM_CMDS
; ++access_idx
) {
473 Packet::Command cmd
= (Packet::Command
)access_idx
;
474 const string
&cstr
= temp_pkt
.cmdIdxToString(cmd
);
476 avgMissLatency
[access_idx
]
477 .name(name() + "." + cstr
+ "_avg_miss_latency")
478 .desc("average " + cstr
+ " miss latency")
479 .flags(total
| nozero
| nonan
)
482 avgMissLatency
[access_idx
] =
483 missLatency
[access_idx
] / misses
[access_idx
];
487 .name(name() + ".demand_avg_miss_latency")
488 .desc("average overall miss latency")
491 demandAvgMissLatency
= demandMissLatency
/ demandMisses
;
493 overallAvgMissLatency
494 .name(name() + ".overall_avg_miss_latency")
495 .desc("average overall miss latency")
498 overallAvgMissLatency
= overallMissLatency
/ overallMisses
;
500 blocked_cycles
.init(NUM_BLOCKED_CAUSES
);
502 .name(name() + ".blocked_cycles")
503 .desc("number of cycles access was blocked")
504 .subname(Blocked_NoMSHRs
, "no_mshrs")
505 .subname(Blocked_NoTargets
, "no_targets")
509 blocked_causes
.init(NUM_BLOCKED_CAUSES
);
511 .name(name() + ".blocked")
512 .desc("number of cycles access was blocked")
513 .subname(Blocked_NoMSHRs
, "no_mshrs")
514 .subname(Blocked_NoTargets
, "no_targets")
518 .name(name() + ".avg_blocked_cycles")
519 .desc("average number of cycles each access was blocked")
520 .subname(Blocked_NoMSHRs
, "no_mshrs")
521 .subname(Blocked_NoTargets
, "no_targets")
524 avg_blocked
= blocked_cycles
/ blocked_causes
;
527 .name(name() + ".fast_writes")
528 .desc("number of fast writes performed")
532 .name(name() + ".cache_copies")
533 .desc("number of cache copies performed")