Include packet_impl.hh (need this on my laptop,
[gem5.git] / src / mem / cache / base_cache.hh
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36 #ifndef __BASE_CACHE_HH__
37 #define __BASE_CACHE_HH__
38
39 #include <vector>
40 #include <string>
41 #include <list>
42 #include <inttypes.h>
43
44 #include "base/misc.hh"
45 #include "base/statistics.hh"
46 #include "base/trace.hh"
47 #include "mem/mem_object.hh"
48 #include "mem/packet.hh"
49 #include "mem/port.hh"
50 #include "mem/request.hh"
51 #include "sim/eventq.hh"
52
53 /**
54 * Reasons for Caches to be Blocked.
55 */
56 enum BlockedCause{
57 Blocked_NoMSHRs,
58 Blocked_NoTargets,
59 Blocked_NoWBBuffers,
60 Blocked_Coherence,
61 Blocked_Copy,
62 NUM_BLOCKED_CAUSES
63 };
64
65 /**
66 * Reasons for cache to request a bus.
67 */
68 enum RequestCause{
69 Request_MSHR,
70 Request_WB,
71 Request_Coherence,
72 Request_PF
73 };
74
75 class MSHR;
76 /**
77 * A basic cache interface. Implements some common functions for speed.
78 */
79 class BaseCache : public MemObject
80 {
81 class CachePort : public Port
82 {
83 public:
84 BaseCache *cache;
85
86 CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
87
88 protected:
89 virtual bool recvTiming(Packet *pkt);
90
91 virtual Tick recvAtomic(Packet *pkt);
92
93 virtual void recvFunctional(Packet *pkt);
94
95 virtual void recvStatusChange(Status status);
96
97 virtual void getDeviceAddressRanges(AddrRangeList &resp,
98 AddrRangeList &snoop);
99
100 virtual int deviceBlockSize();
101
102 virtual void recvRetry();
103
104 public:
105 void setBlocked();
106
107 void clearBlocked();
108
109 bool blocked;
110
111 bool mustSendRetry;
112
113 bool isCpuSide;
114
115 bool waitingOnRetry;
116
117 std::list<Packet *> drainList;
118
119 };
120
121 struct CacheEvent : public Event
122 {
123 CachePort *cachePort;
124 Packet *pkt;
125
126 CacheEvent(CachePort *_cachePort);
127 CacheEvent(CachePort *_cachePort, Packet *_pkt);
128 void process();
129 const char *description();
130 };
131
132 protected:
133 CachePort *cpuSidePort;
134 CachePort *memSidePort;
135
136 bool snoopRangesSent;
137
138 public:
139 virtual Port *getPort(const std::string &if_name, int idx = -1);
140
141 private:
142 //To be defined in cache_impl.hh not in base class
143 virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
144 {
145 fatal("No implementation");
146 }
147
148 virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide)
149 {
150 fatal("No implementation");
151 }
152
153 virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide)
154 {
155 fatal("No implementation");
156 }
157
158 void recvStatusChange(Port::Status status, bool isCpuSide)
159 {
160 if (status == Port::RangeChange){
161 if (!isCpuSide) {
162 cpuSidePort->sendStatusChange(Port::RangeChange);
163 if (!snoopRangesSent) {
164 snoopRangesSent = true;
165 memSidePort->sendStatusChange(Port::RangeChange);
166 }
167 }
168 else {
169 memSidePort->sendStatusChange(Port::RangeChange);
170 }
171 }
172 }
173
174 virtual Packet *getPacket()
175 {
176 fatal("No implementation");
177 }
178
179 virtual Packet *getCoherencePacket()
180 {
181 fatal("No implementation");
182 }
183
184 virtual void sendResult(Packet* &pkt, MSHR* mshr, bool success)
185 {
186
187 fatal("No implementation");
188 }
189
190 virtual void sendCoherenceResult(Packet* &pkt, MSHR* mshr, bool success)
191 {
192
193 fatal("No implementation");
194 }
195
196 /**
197 * Bit vector of the blocking reasons for the access path.
198 * @sa #BlockedCause
199 */
200 uint8_t blocked;
201
202 /**
203 * Bit vector for the blocking reasons for the snoop path.
204 * @sa #BlockedCause
205 */
206 uint8_t blockedSnoop;
207
208 /**
209 * Bit vector for the outstanding requests for the master interface.
210 */
211 uint8_t masterRequests;
212
213 /**
214 * Bit vector for the outstanding requests for the slave interface.
215 */
216 uint8_t slaveRequests;
217
218 protected:
219
220 /** Stores time the cache blocked for statistics. */
221 Tick blockedCycle;
222
223 /** Block size of this cache */
224 const int blkSize;
225
226 /** The number of misses to trigger an exit event. */
227 Counter missCount;
228
229 public:
230 // Statistics
231 /**
232 * @addtogroup CacheStatistics
233 * @{
234 */
235
236 /** Number of hits per thread for each type of command. @sa Packet::Command */
237 Stats::Vector<> hits[NUM_MEM_CMDS];
238 /** Number of hits for demand accesses. */
239 Stats::Formula demandHits;
240 /** Number of hit for all accesses. */
241 Stats::Formula overallHits;
242
243 /** Number of misses per thread for each type of command. @sa Packet::Command */
244 Stats::Vector<> misses[NUM_MEM_CMDS];
245 /** Number of misses for demand accesses. */
246 Stats::Formula demandMisses;
247 /** Number of misses for all accesses. */
248 Stats::Formula overallMisses;
249
250 /**
251 * Total number of cycles per thread/command spent waiting for a miss.
252 * Used to calculate the average miss latency.
253 */
254 Stats::Vector<> missLatency[NUM_MEM_CMDS];
255 /** Total number of cycles spent waiting for demand misses. */
256 Stats::Formula demandMissLatency;
257 /** Total number of cycles spent waiting for all misses. */
258 Stats::Formula overallMissLatency;
259
260 /** The number of accesses per command and thread. */
261 Stats::Formula accesses[NUM_MEM_CMDS];
262 /** The number of demand accesses. */
263 Stats::Formula demandAccesses;
264 /** The number of overall accesses. */
265 Stats::Formula overallAccesses;
266
267 /** The miss rate per command and thread. */
268 Stats::Formula missRate[NUM_MEM_CMDS];
269 /** The miss rate of all demand accesses. */
270 Stats::Formula demandMissRate;
271 /** The miss rate for all accesses. */
272 Stats::Formula overallMissRate;
273
274 /** The average miss latency per command and thread. */
275 Stats::Formula avgMissLatency[NUM_MEM_CMDS];
276 /** The average miss latency for demand misses. */
277 Stats::Formula demandAvgMissLatency;
278 /** The average miss latency for all misses. */
279 Stats::Formula overallAvgMissLatency;
280
281 /** The total number of cycles blocked for each blocked cause. */
282 Stats::Vector<> blocked_cycles;
283 /** The number of times this cache blocked for each blocked cause. */
284 Stats::Vector<> blocked_causes;
285
286 /** The average number of cycles blocked for each blocked cause. */
287 Stats::Formula avg_blocked;
288
289 /** The number of fast writes (WH64) performed. */
290 Stats::Scalar<> fastWrites;
291
292 /** The number of cache copies performed. */
293 Stats::Scalar<> cacheCopies;
294
295 /**
296 * @}
297 */
298
299 /**
300 * Register stats for this object.
301 */
302 virtual void regStats();
303
304 public:
305
306 class Params
307 {
308 public:
309 /** List of address ranges of this cache. */
310 std::vector<Range<Addr> > addrRange;
311 /** The hit latency for this cache. */
312 int hitLatency;
313 /** The block size of this cache. */
314 int blkSize;
315 /**
316 * The maximum number of misses this cache should handle before
317 * ending the simulation.
318 */
319 Counter maxMisses;
320
321 /**
322 * Construct an instance of this parameter class.
323 */
324 Params(std::vector<Range<Addr> > addr_range,
325 int hit_latency, int _blkSize, Counter max_misses)
326 : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
327 maxMisses(max_misses)
328 {
329 }
330 };
331
332 /**
333 * Create and initialize a basic cache object.
334 * @param name The name of this cache.
335 * @param hier_params Pointer to the HierParams object for this hierarchy
336 * of this cache.
337 * @param params The parameter object for this BaseCache.
338 */
339 BaseCache(const std::string &name, Params &params)
340 : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
341 slaveRequests(0), blkSize(params.blkSize),
342 missCount(params.maxMisses)
343 {
344 //Start ports at null if more than one is created we should panic
345 cpuSidePort = NULL;
346 memSidePort = NULL;
347 snoopRangesSent = false;
348 }
349
350 virtual void init();
351
352 /**
353 * Query block size of a cache.
354 * @return The block size
355 */
356 int getBlockSize() const
357 {
358 return blkSize;
359 }
360
361 /**
362 * Returns true if the cache is blocked for accesses.
363 */
364 bool isBlocked()
365 {
366 return blocked != 0;
367 }
368
369 /**
370 * Returns true if the cache is blocked for snoops.
371 */
372 bool isBlockedForSnoop()
373 {
374 return blockedSnoop != 0;
375 }
376
377 /**
378 * Marks the access path of the cache as blocked for the given cause. This
379 * also sets the blocked flag in the slave interface.
380 * @param cause The reason for the cache blocking.
381 */
382 void setBlocked(BlockedCause cause)
383 {
384 uint8_t flag = 1 << cause;
385 if (blocked == 0) {
386 blocked_causes[cause]++;
387 blockedCycle = curTick;
388 }
389 int old_state = blocked;
390 if (!(blocked & flag)) {
391 //Wasn't already blocked for this cause
392 blocked |= flag;
393 DPRINTF(Cache,"Blocking for cause %s\n", cause);
394 if (!old_state)
395 cpuSidePort->setBlocked();
396 }
397 }
398
399 /**
400 * Marks the snoop path of the cache as blocked for the given cause. This
401 * also sets the blocked flag in the master interface.
402 * @param cause The reason to block the snoop path.
403 */
404 void setBlockedForSnoop(BlockedCause cause)
405 {
406 uint8_t flag = 1 << cause;
407 uint8_t old_state = blockedSnoop;
408 if (!(blockedSnoop & flag)) {
409 //Wasn't already blocked for this cause
410 blockedSnoop |= flag;
411 if (!old_state)
412 memSidePort->setBlocked();
413 }
414 }
415
416 /**
417 * Marks the cache as unblocked for the given cause. This also clears the
418 * blocked flags in the appropriate interfaces.
419 * @param cause The newly unblocked cause.
420 * @warning Calling this function can cause a blocked request on the bus to
421 * access the cache. The cache must be in a state to handle that request.
422 */
423 void clearBlocked(BlockedCause cause)
424 {
425 uint8_t flag = 1 << cause;
426 DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
427 cause, blocked);
428 if (blocked & flag)
429 {
430 blocked &= ~flag;
431 if (!isBlocked()) {
432 blocked_cycles[cause] += curTick - blockedCycle;
433 DPRINTF(Cache,"Unblocking from all causes\n");
434 cpuSidePort->clearBlocked();
435 }
436 }
437 if (blockedSnoop & flag)
438 {
439 blockedSnoop &= ~flag;
440 if (!isBlockedForSnoop()) {
441 memSidePort->clearBlocked();
442 }
443 }
444 }
445
446 /**
447 * True if the master bus should be requested.
448 * @return True if there are outstanding requests for the master bus.
449 */
450 bool doMasterRequest()
451 {
452 return masterRequests != 0;
453 }
454
455 /**
456 * Request the master bus for the given cause and time.
457 * @param cause The reason for the request.
458 * @param time The time to make the request.
459 */
460 void setMasterRequest(RequestCause cause, Tick time)
461 {
462 if (!doMasterRequest() && !memSidePort->waitingOnRetry)
463 {
464 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
465 reqCpu->schedule(time);
466 }
467 uint8_t flag = 1<<cause;
468 masterRequests |= flag;
469 }
470
471 /**
472 * Clear the master bus request for the given cause.
473 * @param cause The request reason to clear.
474 */
475 void clearMasterRequest(RequestCause cause)
476 {
477 uint8_t flag = 1<<cause;
478 masterRequests &= ~flag;
479 }
480
481 /**
482 * Return true if the slave bus should be requested.
483 * @return True if there are outstanding requests for the slave bus.
484 */
485 bool doSlaveRequest()
486 {
487 return slaveRequests != 0;
488 }
489
490 /**
491 * Request the slave bus for the given reason and time.
492 * @param cause The reason for the request.
493 * @param time The time to make the request.
494 */
495 void setSlaveRequest(RequestCause cause, Tick time)
496 {
497 if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
498 {
499 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(cpuSidePort);
500 reqCpu->schedule(time);
501 }
502 uint8_t flag = 1<<cause;
503 slaveRequests |= flag;
504 }
505
506 /**
507 * Clear the slave bus request for the given reason.
508 * @param cause The request reason to clear.
509 */
510 void clearSlaveRequest(RequestCause cause)
511 {
512 uint8_t flag = 1<<cause;
513 slaveRequests &= ~flag;
514 }
515
516 /**
517 * Send a response to the slave interface.
518 * @param pkt The request being responded to.
519 * @param time The time the response is ready.
520 */
521 void respond(Packet *pkt, Tick time)
522 {
523 if (pkt->needsResponse()) {
524 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
525 reqCpu->schedule(time);
526 }
527 else {
528 if (pkt->cmd != Packet::UpgradeReq)
529 {
530 delete pkt->req;
531 delete pkt;
532 }
533 }
534 }
535
536 /**
537 * Send a reponse to the slave interface and calculate miss latency.
538 * @param pkt The request to respond to.
539 * @param time The time the response is ready.
540 */
541 void respondToMiss(Packet *pkt, Tick time)
542 {
543 if (!pkt->req->isUncacheable()) {
544 missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += time - pkt->time;
545 }
546 if (pkt->needsResponse()) {
547 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
548 reqCpu->schedule(time);
549 }
550 else {
551 if (pkt->cmd != Packet::UpgradeReq)
552 {
553 delete pkt->req;
554 delete pkt;
555 }
556 }
557 }
558
559 /**
560 * Suppliess the data if cache to cache transfers are enabled.
561 * @param pkt The bus transaction to fulfill.
562 */
563 void respondToSnoop(Packet *pkt, Tick time)
564 {
565 assert (pkt->needsResponse());
566 CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
567 reqMem->schedule(time);
568 }
569
570 /**
571 * Notification from master interface that a address range changed. Nothing
572 * to do for a cache.
573 */
574 void rangeChange() {}
575
576 void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
577 {
578 if (isCpuSide)
579 {
580 AddrRangeList dummy;
581 memSidePort->getPeerAddressRanges(resp, dummy);
582 }
583 else
584 {
585 //This is where snoops get updated
586 AddrRangeList dummy;
587 cpuSidePort->getPeerAddressRanges(dummy, snoop);
588 return;
589 }
590 }
591 };
592
593 #endif //__BASE_CACHE_HH__