Merge zizzer:/z/m5/Bitkeeper/newmem
[gem5.git] / src / mem / cache / base_cache.hh
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36 #ifndef __BASE_CACHE_HH__
37 #define __BASE_CACHE_HH__
38
39 #include <vector>
40 #include <string>
41 #include <list>
42 #include <inttypes.h>
43
44 #include "base/misc.hh"
45 #include "base/statistics.hh"
46 #include "base/trace.hh"
47 #include "mem/mem_object.hh"
48 #include "mem/packet.hh"
49 #include "mem/port.hh"
50 #include "mem/request.hh"
51 #include "sim/eventq.hh"
52
53 /**
54 * Reasons for Caches to be Blocked.
55 */
56 enum BlockedCause{
57 Blocked_NoMSHRs,
58 Blocked_NoTargets,
59 Blocked_NoWBBuffers,
60 Blocked_Coherence,
61 Blocked_Copy,
62 NUM_BLOCKED_CAUSES
63 };
64
65 /**
66 * Reasons for cache to request a bus.
67 */
68 enum RequestCause{
69 Request_MSHR,
70 Request_WB,
71 Request_Coherence,
72 Request_PF
73 };
74
75 class MSHR;
76 /**
77 * A basic cache interface. Implements some common functions for speed.
78 */
79 class BaseCache : public MemObject
80 {
81 class CachePort : public Port
82 {
83 public:
84 BaseCache *cache;
85
86 CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
87
88 protected:
89 virtual bool recvTiming(Packet *pkt);
90
91 virtual Tick recvAtomic(Packet *pkt);
92
93 virtual void recvFunctional(Packet *pkt);
94
95 virtual void recvStatusChange(Status status);
96
97 virtual void getDeviceAddressRanges(AddrRangeList &resp,
98 AddrRangeList &snoop);
99
100 virtual int deviceBlockSize();
101
102 virtual void recvRetry();
103
104 public:
105 void setBlocked();
106
107 void clearBlocked();
108
109 bool blocked;
110
111 bool mustSendRetry;
112
113 bool isCpuSide;
114
115 std::list<Packet *> drainList;
116
117 Packet *cshrRetry;
118 };
119
120 struct CacheEvent : public Event
121 {
122 CachePort *cachePort;
123 Packet *pkt;
124
125 CacheEvent(CachePort *_cachePort);
126 CacheEvent(CachePort *_cachePort, Packet *_pkt);
127 void process();
128 const char *description();
129 };
130
131 protected:
132 CachePort *cpuSidePort;
133 CachePort *memSidePort;
134
135 bool snoopRangesSent;
136
137 public:
138 virtual Port *getPort(const std::string &if_name, int idx = -1);
139
140 private:
141 //To be defined in cache_impl.hh not in base class
142 virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
143 {
144 fatal("No implementation");
145 }
146
147 virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide)
148 {
149 fatal("No implementation");
150 }
151
152 virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide)
153 {
154 fatal("No implementation");
155 }
156
157 void recvStatusChange(Port::Status status, bool isCpuSide)
158 {
159 if (status == Port::RangeChange){
160 if (!isCpuSide) {
161 cpuSidePort->sendStatusChange(Port::RangeChange);
162 if (!snoopRangesSent) {
163 snoopRangesSent = true;
164 memSidePort->sendStatusChange(Port::RangeChange);
165 }
166 }
167 else {
168 memSidePort->sendStatusChange(Port::RangeChange);
169 }
170 }
171 }
172
173 virtual Packet *getPacket()
174 {
175 fatal("No implementation");
176 }
177
178 virtual Packet *getCoherencePacket()
179 {
180 fatal("No implementation");
181 }
182
183 virtual void sendResult(Packet* &pkt, MSHR* mshr, bool success)
184 {
185
186 fatal("No implementation");
187 }
188
189 /**
190 * Bit vector of the blocking reasons for the access path.
191 * @sa #BlockedCause
192 */
193 uint8_t blocked;
194
195 /**
196 * Bit vector for the blocking reasons for the snoop path.
197 * @sa #BlockedCause
198 */
199 uint8_t blockedSnoop;
200
201 /**
202 * Bit vector for the outstanding requests for the master interface.
203 */
204 uint8_t masterRequests;
205
206 /**
207 * Bit vector for the outstanding requests for the slave interface.
208 */
209 uint8_t slaveRequests;
210
211 protected:
212
213 /** True if this cache is connected to the CPU. */
214 bool topLevelCache;
215
216
217 /** Stores time the cache blocked for statistics. */
218 Tick blockedCycle;
219
220 /** Block size of this cache */
221 const int blkSize;
222
223 /** The number of misses to trigger an exit event. */
224 Counter missCount;
225
226 public:
227 // Statistics
228 /**
229 * @addtogroup CacheStatistics
230 * @{
231 */
232
233 /** Number of hits per thread for each type of command. @sa Packet::Command */
234 Stats::Vector<> hits[NUM_MEM_CMDS];
235 /** Number of hits for demand accesses. */
236 Stats::Formula demandHits;
237 /** Number of hit for all accesses. */
238 Stats::Formula overallHits;
239
240 /** Number of misses per thread for each type of command. @sa Packet::Command */
241 Stats::Vector<> misses[NUM_MEM_CMDS];
242 /** Number of misses for demand accesses. */
243 Stats::Formula demandMisses;
244 /** Number of misses for all accesses. */
245 Stats::Formula overallMisses;
246
247 /**
248 * Total number of cycles per thread/command spent waiting for a miss.
249 * Used to calculate the average miss latency.
250 */
251 Stats::Vector<> missLatency[NUM_MEM_CMDS];
252 /** Total number of cycles spent waiting for demand misses. */
253 Stats::Formula demandMissLatency;
254 /** Total number of cycles spent waiting for all misses. */
255 Stats::Formula overallMissLatency;
256
257 /** The number of accesses per command and thread. */
258 Stats::Formula accesses[NUM_MEM_CMDS];
259 /** The number of demand accesses. */
260 Stats::Formula demandAccesses;
261 /** The number of overall accesses. */
262 Stats::Formula overallAccesses;
263
264 /** The miss rate per command and thread. */
265 Stats::Formula missRate[NUM_MEM_CMDS];
266 /** The miss rate of all demand accesses. */
267 Stats::Formula demandMissRate;
268 /** The miss rate for all accesses. */
269 Stats::Formula overallMissRate;
270
271 /** The average miss latency per command and thread. */
272 Stats::Formula avgMissLatency[NUM_MEM_CMDS];
273 /** The average miss latency for demand misses. */
274 Stats::Formula demandAvgMissLatency;
275 /** The average miss latency for all misses. */
276 Stats::Formula overallAvgMissLatency;
277
278 /** The total number of cycles blocked for each blocked cause. */
279 Stats::Vector<> blocked_cycles;
280 /** The number of times this cache blocked for each blocked cause. */
281 Stats::Vector<> blocked_causes;
282
283 /** The average number of cycles blocked for each blocked cause. */
284 Stats::Formula avg_blocked;
285
286 /** The number of fast writes (WH64) performed. */
287 Stats::Scalar<> fastWrites;
288
289 /** The number of cache copies performed. */
290 Stats::Scalar<> cacheCopies;
291
292 /**
293 * @}
294 */
295
296 /**
297 * Register stats for this object.
298 */
299 virtual void regStats();
300
301 public:
302
303 class Params
304 {
305 public:
306 /** List of address ranges of this cache. */
307 std::vector<Range<Addr> > addrRange;
308 /** The hit latency for this cache. */
309 int hitLatency;
310 /** The block size of this cache. */
311 int blkSize;
312 /**
313 * The maximum number of misses this cache should handle before
314 * ending the simulation.
315 */
316 Counter maxMisses;
317
318 /**
319 * Construct an instance of this parameter class.
320 */
321 Params(std::vector<Range<Addr> > addr_range,
322 int hit_latency, int _blkSize, Counter max_misses)
323 : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
324 maxMisses(max_misses)
325 {
326 }
327 };
328
329 /**
330 * Create and initialize a basic cache object.
331 * @param name The name of this cache.
332 * @param hier_params Pointer to the HierParams object for this hierarchy
333 * of this cache.
334 * @param params The parameter object for this BaseCache.
335 */
336 BaseCache(const std::string &name, Params &params)
337 : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
338 slaveRequests(0), topLevelCache(false), blkSize(params.blkSize),
339 missCount(params.maxMisses)
340 {
341 //Start ports at null if more than one is created we should panic
342 cpuSidePort = NULL;
343 memSidePort = NULL;
344 snoopRangesSent = false;
345 }
346
347 virtual void init();
348
349 /**
350 * Query block size of a cache.
351 * @return The block size
352 */
353 int getBlockSize() const
354 {
355 return blkSize;
356 }
357
358 /**
359 * Returns true if this cache is connect to the CPU.
360 * @return True if this is a L1 cache.
361 */
362 bool isTopLevel()
363 {
364 return topLevelCache;
365 }
366
367 /**
368 * Returns true if the cache is blocked for accesses.
369 */
370 bool isBlocked()
371 {
372 return blocked != 0;
373 }
374
375 /**
376 * Returns true if the cache is blocked for snoops.
377 */
378 bool isBlockedForSnoop()
379 {
380 return blockedSnoop != 0;
381 }
382
383 /**
384 * Marks the access path of the cache as blocked for the given cause. This
385 * also sets the blocked flag in the slave interface.
386 * @param cause The reason for the cache blocking.
387 */
388 void setBlocked(BlockedCause cause)
389 {
390 uint8_t flag = 1 << cause;
391 if (blocked == 0) {
392 blocked_causes[cause]++;
393 blockedCycle = curTick;
394 }
395 int old_state = blocked;
396 if (!(blocked & flag)) {
397 //Wasn't already blocked for this cause
398 blocked |= flag;
399 DPRINTF(Cache,"Blocking for cause %s\n", cause);
400 if (!old_state)
401 cpuSidePort->setBlocked();
402 }
403 }
404
405 /**
406 * Marks the snoop path of the cache as blocked for the given cause. This
407 * also sets the blocked flag in the master interface.
408 * @param cause The reason to block the snoop path.
409 */
410 void setBlockedForSnoop(BlockedCause cause)
411 {
412 uint8_t flag = 1 << cause;
413 uint8_t old_state = blockedSnoop;
414 if (!(blockedSnoop & flag)) {
415 //Wasn't already blocked for this cause
416 blockedSnoop |= flag;
417 if (!old_state)
418 memSidePort->setBlocked();
419 }
420 }
421
422 /**
423 * Marks the cache as unblocked for the given cause. This also clears the
424 * blocked flags in the appropriate interfaces.
425 * @param cause The newly unblocked cause.
426 * @warning Calling this function can cause a blocked request on the bus to
427 * access the cache. The cache must be in a state to handle that request.
428 */
429 void clearBlocked(BlockedCause cause)
430 {
431 uint8_t flag = 1 << cause;
432 DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
433 cause, blocked);
434 if (blocked & flag)
435 {
436 blocked &= ~flag;
437 if (!isBlocked()) {
438 blocked_cycles[cause] += curTick - blockedCycle;
439 DPRINTF(Cache,"Unblocking from all causes\n");
440 cpuSidePort->clearBlocked();
441 }
442 }
443 if (blockedSnoop & flag)
444 {
445 blockedSnoop &= ~flag;
446 if (!isBlockedForSnoop()) {
447 memSidePort->clearBlocked();
448 }
449 }
450 }
451
452 /**
453 * True if the master bus should be requested.
454 * @return True if there are outstanding requests for the master bus.
455 */
456 bool doMasterRequest()
457 {
458 return masterRequests != 0;
459 }
460
461 /**
462 * Request the master bus for the given cause and time.
463 * @param cause The reason for the request.
464 * @param time The time to make the request.
465 */
466 void setMasterRequest(RequestCause cause, Tick time)
467 {
468 if (!doMasterRequest())
469 {
470 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
471 reqCpu->schedule(time);
472 }
473 uint8_t flag = 1<<cause;
474 masterRequests |= flag;
475 }
476
477 /**
478 * Clear the master bus request for the given cause.
479 * @param cause The request reason to clear.
480 */
481 void clearMasterRequest(RequestCause cause)
482 {
483 uint8_t flag = 1<<cause;
484 masterRequests &= ~flag;
485 }
486
487 /**
488 * Return true if the slave bus should be requested.
489 * @return True if there are outstanding requests for the slave bus.
490 */
491 bool doSlaveRequest()
492 {
493 return slaveRequests != 0;
494 }
495
496 /**
497 * Request the slave bus for the given reason and time.
498 * @param cause The reason for the request.
499 * @param time The time to make the request.
500 */
501 void setSlaveRequest(RequestCause cause, Tick time)
502 {
503 uint8_t flag = 1<<cause;
504 slaveRequests |= flag;
505 assert("Implement\n" && 0);
506 // si->pktuest(time);
507 }
508
509 /**
510 * Clear the slave bus request for the given reason.
511 * @param cause The request reason to clear.
512 */
513 void clearSlaveRequest(RequestCause cause)
514 {
515 uint8_t flag = 1<<cause;
516 slaveRequests &= ~flag;
517 }
518
519 /**
520 * Send a response to the slave interface.
521 * @param pkt The request being responded to.
522 * @param time The time the response is ready.
523 */
524 void respond(Packet *pkt, Tick time)
525 {
526 if (pkt->needsResponse()) {
527 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
528 reqCpu->schedule(time);
529 }
530 }
531
532 /**
533 * Send a reponse to the slave interface and calculate miss latency.
534 * @param pkt The request to respond to.
535 * @param time The time the response is ready.
536 */
537 void respondToMiss(Packet *pkt, Tick time)
538 {
539 if (!pkt->req->isUncacheable()) {
540 missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += time - pkt->time;
541 }
542 if (pkt->needsResponse()) {
543 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
544 reqCpu->schedule(time);
545 }
546 }
547
548 /**
549 * Suppliess the data if cache to cache transfers are enabled.
550 * @param pkt The bus transaction to fulfill.
551 */
552 void respondToSnoop(Packet *pkt, Tick time)
553 {
554 // assert("Implement\n" && 0);
555 // mi->respond(pkt,curTick + hitLatency);
556 assert (pkt->needsResponse());
557 CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
558 reqMem->schedule(time);
559 }
560
561 /**
562 * Notification from master interface that a address range changed. Nothing
563 * to do for a cache.
564 */
565 void rangeChange() {}
566
567 void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
568 {
569 if (isCpuSide)
570 {
571 AddrRangeList dummy;
572 memSidePort->getPeerAddressRanges(resp, dummy);
573 }
574 else
575 {
576 //This is where snoops get updated
577 AddrRangeList dummy;
578 // if (!topLevelCache)
579 // {
580 cpuSidePort->getPeerAddressRanges(dummy, snoop);
581 // }
582 // else
583 // {
584 // snoop.push_back(RangeSize(0,-1));
585 // }
586
587 return;
588 }
589 }
590 };
591
592 #endif //__BASE_CACHE_HH__