Merge zizzer:/z/m5/Bitkeeper/newmem
[gem5.git] / src / mem / cache / base_cache.hh
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36 #ifndef __BASE_CACHE_HH__
37 #define __BASE_CACHE_HH__
38
39 #include <vector>
40 #include <string>
41 #include <list>
42 #include <inttypes.h>
43
44 #include "base/misc.hh"
45 #include "base/statistics.hh"
46 #include "base/trace.hh"
47 #include "mem/mem_object.hh"
48 #include "mem/packet.hh"
49 #include "mem/port.hh"
50 #include "mem/request.hh"
51 #include "sim/eventq.hh"
52
53 /**
54 * Reasons for Caches to be Blocked.
55 */
56 enum BlockedCause{
57 Blocked_NoMSHRs,
58 Blocked_NoTargets,
59 Blocked_NoWBBuffers,
60 Blocked_Coherence,
61 Blocked_Copy,
62 NUM_BLOCKED_CAUSES
63 };
64
65 /**
66 * Reasons for cache to request a bus.
67 */
68 enum RequestCause{
69 Request_MSHR,
70 Request_WB,
71 Request_Coherence,
72 Request_PF
73 };
74
75 /**
76 * A basic cache interface. Implements some common functions for speed.
77 */
78 class BaseCache : public MemObject
79 {
80 class CachePort : public Port
81 {
82 public:
83 BaseCache *cache;
84
85 CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
86
87 protected:
88 virtual bool recvTiming(Packet *pkt);
89
90 virtual Tick recvAtomic(Packet *pkt);
91
92 virtual void recvFunctional(Packet *pkt);
93
94 virtual void recvStatusChange(Status status);
95
96 virtual void getDeviceAddressRanges(AddrRangeList &resp,
97 AddrRangeList &snoop);
98
99 virtual int deviceBlockSize();
100
101 virtual void recvRetry();
102
103 public:
104 void setBlocked();
105
106 void clearBlocked();
107
108 bool blocked;
109
110 bool mustSendRetry;
111
112 bool isCpuSide;
113 };
114
115 struct CacheEvent : public Event
116 {
117 CachePort *cachePort;
118 Packet *pkt;
119
120 CacheEvent(CachePort *_cachePort);
121 CacheEvent(CachePort *_cachePort, Packet *_pkt);
122 void process();
123 const char *description();
124 };
125
126 protected:
127 CachePort *cpuSidePort;
128 CachePort *memSidePort;
129
130 public:
131 virtual Port *getPort(const std::string &if_name, int idx = -1);
132
133 private:
134 //To be defined in cache_impl.hh not in base class
135 virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
136 {
137 fatal("No implementation");
138 }
139
140 virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide)
141 {
142 fatal("No implementation");
143 }
144
145 virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide)
146 {
147 fatal("No implementation");
148 }
149
150 void recvStatusChange(Port::Status status, bool isCpuSide)
151 {
152 if (status == Port::RangeChange)
153 {
154 if (!isCpuSide)
155 {
156 cpuSidePort->sendStatusChange(Port::RangeChange);
157 }
158 else
159 {
160 memSidePort->sendStatusChange(Port::RangeChange);
161 }
162 }
163 }
164
165 virtual Packet *getPacket()
166 {
167 fatal("No implementation");
168 }
169
170 virtual Packet *getCoherencePacket()
171 {
172 fatal("No implementation");
173 }
174
175 virtual void sendResult(Packet* &pkt, bool success)
176 {
177
178 fatal("No implementation");
179 }
180
181 /**
182 * Bit vector of the blocking reasons for the access path.
183 * @sa #BlockedCause
184 */
185 uint8_t blocked;
186
187 /**
188 * Bit vector for the blocking reasons for the snoop path.
189 * @sa #BlockedCause
190 */
191 uint8_t blockedSnoop;
192
193 /**
194 * Bit vector for the outstanding requests for the master interface.
195 */
196 uint8_t masterRequests;
197
198 /**
199 * Bit vector for the outstanding requests for the slave interface.
200 */
201 uint8_t slaveRequests;
202
203 protected:
204
205 /** True if this cache is connected to the CPU. */
206 bool topLevelCache;
207
208 /** Stores time the cache blocked for statistics. */
209 Tick blockedCycle;
210
211 /** Block size of this cache */
212 const int blkSize;
213
214 /** The number of misses to trigger an exit event. */
215 Counter missCount;
216
217 public:
218 // Statistics
219 /**
220 * @addtogroup CacheStatistics
221 * @{
222 */
223
224 /** Number of hits per thread for each type of command. @sa Packet::Command */
225 Stats::Vector<> hits[NUM_MEM_CMDS];
226 /** Number of hits for demand accesses. */
227 Stats::Formula demandHits;
228 /** Number of hit for all accesses. */
229 Stats::Formula overallHits;
230
231 /** Number of misses per thread for each type of command. @sa Packet::Command */
232 Stats::Vector<> misses[NUM_MEM_CMDS];
233 /** Number of misses for demand accesses. */
234 Stats::Formula demandMisses;
235 /** Number of misses for all accesses. */
236 Stats::Formula overallMisses;
237
238 /**
239 * Total number of cycles per thread/command spent waiting for a miss.
240 * Used to calculate the average miss latency.
241 */
242 Stats::Vector<> missLatency[NUM_MEM_CMDS];
243 /** Total number of cycles spent waiting for demand misses. */
244 Stats::Formula demandMissLatency;
245 /** Total number of cycles spent waiting for all misses. */
246 Stats::Formula overallMissLatency;
247
248 /** The number of accesses per command and thread. */
249 Stats::Formula accesses[NUM_MEM_CMDS];
250 /** The number of demand accesses. */
251 Stats::Formula demandAccesses;
252 /** The number of overall accesses. */
253 Stats::Formula overallAccesses;
254
255 /** The miss rate per command and thread. */
256 Stats::Formula missRate[NUM_MEM_CMDS];
257 /** The miss rate of all demand accesses. */
258 Stats::Formula demandMissRate;
259 /** The miss rate for all accesses. */
260 Stats::Formula overallMissRate;
261
262 /** The average miss latency per command and thread. */
263 Stats::Formula avgMissLatency[NUM_MEM_CMDS];
264 /** The average miss latency for demand misses. */
265 Stats::Formula demandAvgMissLatency;
266 /** The average miss latency for all misses. */
267 Stats::Formula overallAvgMissLatency;
268
269 /** The total number of cycles blocked for each blocked cause. */
270 Stats::Vector<> blocked_cycles;
271 /** The number of times this cache blocked for each blocked cause. */
272 Stats::Vector<> blocked_causes;
273
274 /** The average number of cycles blocked for each blocked cause. */
275 Stats::Formula avg_blocked;
276
277 /** The number of fast writes (WH64) performed. */
278 Stats::Scalar<> fastWrites;
279
280 /** The number of cache copies performed. */
281 Stats::Scalar<> cacheCopies;
282
283 /**
284 * @}
285 */
286
287 /**
288 * Register stats for this object.
289 */
290 virtual void regStats();
291
292 public:
293
294 class Params
295 {
296 public:
297 /** List of address ranges of this cache. */
298 std::vector<Range<Addr> > addrRange;
299 /** The hit latency for this cache. */
300 int hitLatency;
301 /** The block size of this cache. */
302 int blkSize;
303 /**
304 * The maximum number of misses this cache should handle before
305 * ending the simulation.
306 */
307 Counter maxMisses;
308
309 /**
310 * Construct an instance of this parameter class.
311 */
312 Params(std::vector<Range<Addr> > addr_range,
313 int hit_latency, int _blkSize, Counter max_misses)
314 : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
315 maxMisses(max_misses)
316 {
317 }
318 };
319
320 /**
321 * Create and initialize a basic cache object.
322 * @param name The name of this cache.
323 * @param hier_params Pointer to the HierParams object for this hierarchy
324 * of this cache.
325 * @param params The parameter object for this BaseCache.
326 */
327 BaseCache(const std::string &name, Params &params)
328 : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
329 slaveRequests(0), topLevelCache(false), blkSize(params.blkSize),
330 missCount(params.maxMisses)
331 {
332 //Start ports at null if more than one is created we should panic
333 cpuSidePort = NULL;
334 memSidePort = NULL;
335 }
336
337 virtual void init();
338
339 /**
340 * Query block size of a cache.
341 * @return The block size
342 */
343 int getBlockSize() const
344 {
345 return blkSize;
346 }
347
348 /**
349 * Returns true if this cache is connect to the CPU.
350 * @return True if this is a L1 cache.
351 */
352 bool isTopLevel()
353 {
354 return topLevelCache;
355 }
356
357 /**
358 * Returns true if the cache is blocked for accesses.
359 */
360 bool isBlocked()
361 {
362 return blocked != 0;
363 }
364
365 /**
366 * Returns true if the cache is blocked for snoops.
367 */
368 bool isBlockedForSnoop()
369 {
370 return blockedSnoop != 0;
371 }
372
373 /**
374 * Marks the access path of the cache as blocked for the given cause. This
375 * also sets the blocked flag in the slave interface.
376 * @param cause The reason for the cache blocking.
377 */
378 void setBlocked(BlockedCause cause)
379 {
380 uint8_t flag = 1 << cause;
381 if (blocked == 0) {
382 blocked_causes[cause]++;
383 blockedCycle = curTick;
384 }
385 blocked |= flag;
386 DPRINTF(Cache,"Blocking for cause %s\n", cause);
387 cpuSidePort->setBlocked();
388 }
389
390 /**
391 * Marks the snoop path of the cache as blocked for the given cause. This
392 * also sets the blocked flag in the master interface.
393 * @param cause The reason to block the snoop path.
394 */
395 void setBlockedForSnoop(BlockedCause cause)
396 {
397 uint8_t flag = 1 << cause;
398 blockedSnoop |= flag;
399 memSidePort->setBlocked();
400 }
401
402 /**
403 * Marks the cache as unblocked for the given cause. This also clears the
404 * blocked flags in the appropriate interfaces.
405 * @param cause The newly unblocked cause.
406 * @warning Calling this function can cause a blocked request on the bus to
407 * access the cache. The cache must be in a state to handle that request.
408 */
409 void clearBlocked(BlockedCause cause)
410 {
411 uint8_t flag = 1 << cause;
412 DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
413 cause, blocked);
414 if (blocked & flag)
415 {
416 blocked &= ~flag;
417 if (!isBlocked()) {
418 blocked_cycles[cause] += curTick - blockedCycle;
419 DPRINTF(Cache,"Unblocking from all causes\n");
420 cpuSidePort->clearBlocked();
421 }
422 }
423 if (blockedSnoop & flag)
424 {
425 blockedSnoop &= ~flag;
426 if (!isBlockedForSnoop()) {
427 memSidePort->clearBlocked();
428 }
429 }
430 }
431
432 /**
433 * True if the master bus should be requested.
434 * @return True if there are outstanding requests for the master bus.
435 */
436 bool doMasterRequest()
437 {
438 return masterRequests != 0;
439 }
440
441 /**
442 * Request the master bus for the given cause and time.
443 * @param cause The reason for the request.
444 * @param time The time to make the request.
445 */
446 void setMasterRequest(RequestCause cause, Tick time)
447 {
448 if (!doMasterRequest())
449 {
450 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
451 reqCpu->schedule(time);
452 }
453 uint8_t flag = 1<<cause;
454 masterRequests |= flag;
455 }
456
457 /**
458 * Clear the master bus request for the given cause.
459 * @param cause The request reason to clear.
460 */
461 void clearMasterRequest(RequestCause cause)
462 {
463 uint8_t flag = 1<<cause;
464 masterRequests &= ~flag;
465 }
466
467 /**
468 * Return true if the slave bus should be requested.
469 * @return True if there are outstanding requests for the slave bus.
470 */
471 bool doSlaveRequest()
472 {
473 return slaveRequests != 0;
474 }
475
476 /**
477 * Request the slave bus for the given reason and time.
478 * @param cause The reason for the request.
479 * @param time The time to make the request.
480 */
481 void setSlaveRequest(RequestCause cause, Tick time)
482 {
483 uint8_t flag = 1<<cause;
484 slaveRequests |= flag;
485 assert("Implement\n" && 0);
486 // si->pktuest(time);
487 }
488
489 /**
490 * Clear the slave bus request for the given reason.
491 * @param cause The request reason to clear.
492 */
493 void clearSlaveRequest(RequestCause cause)
494 {
495 uint8_t flag = 1<<cause;
496 slaveRequests &= ~flag;
497 }
498
499 /**
500 * Send a response to the slave interface.
501 * @param pkt The request being responded to.
502 * @param time The time the response is ready.
503 */
504 void respond(Packet *pkt, Tick time)
505 {
506 pkt->makeTimingResponse();
507 pkt->result = Packet::Success;
508 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
509 reqCpu->schedule(time);
510 }
511
512 /**
513 * Send a reponse to the slave interface and calculate miss latency.
514 * @param pkt The request to respond to.
515 * @param time The time the response is ready.
516 */
517 void respondToMiss(Packet *pkt, Tick time)
518 {
519 if (!pkt->req->isUncacheable()) {
520 missLatency[pkt->cmdToIndex()][pkt->req->getThreadNum()] += time - pkt->time;
521 }
522 pkt->makeTimingResponse();
523 pkt->result = Packet::Success;
524 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
525 reqCpu->schedule(time);
526 }
527
528 /**
529 * Suppliess the data if cache to cache transfers are enabled.
530 * @param pkt The bus transaction to fulfill.
531 */
532 void respondToSnoop(Packet *pkt)
533 {
534 assert("Implement\n" && 0);
535 // mi->respond(pkt,curTick + hitLatency);
536 }
537
538 /**
539 * Notification from master interface that a address range changed. Nothing
540 * to do for a cache.
541 */
542 void rangeChange() {}
543
544 void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
545 {
546 if (isCpuSide)
547 {
548 AddrRangeList dummy;
549 memSidePort->getPeerAddressRanges(resp, dummy);
550 }
551 else
552 {
553 //This is where snoops get updated
554 return;
555 }
556 }
557 };
558
559 #endif //__BASE_CACHE_HH__