Merge ktlim@zizzer:/bk/newmem
[gem5.git] / src / mem / cache / base_cache.hh
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36 #ifndef __BASE_CACHE_HH__
37 #define __BASE_CACHE_HH__
38
39 #include <vector>
40 #include <string>
41 #include <list>
42 #include <inttypes.h>
43
44 #include "base/misc.hh"
45 #include "base/statistics.hh"
46 #include "base/trace.hh"
47 #include "mem/mem_object.hh"
48 #include "mem/packet.hh"
49 #include "mem/port.hh"
50 #include "mem/request.hh"
51 #include "sim/eventq.hh"
52
53 /**
54 * Reasons for Caches to be Blocked.
55 */
56 enum BlockedCause{
57 Blocked_NoMSHRs,
58 Blocked_NoTargets,
59 Blocked_NoWBBuffers,
60 Blocked_Coherence,
61 NUM_BLOCKED_CAUSES
62 };
63
64 /**
65 * Reasons for cache to request a bus.
66 */
67 enum RequestCause{
68 Request_MSHR,
69 Request_WB,
70 Request_Coherence,
71 Request_PF
72 };
73
74 class MSHR;
75 /**
76 * A basic cache interface. Implements some common functions for speed.
77 */
78 class BaseCache : public MemObject
79 {
80 class CachePort : public Port
81 {
82 public:
83 BaseCache *cache;
84
85 CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
86
87 protected:
88 virtual bool recvTiming(PacketPtr pkt);
89
90 virtual Tick recvAtomic(PacketPtr pkt);
91
92 virtual void recvFunctional(PacketPtr pkt);
93
94 virtual void recvStatusChange(Status status);
95
96 virtual void getDeviceAddressRanges(AddrRangeList &resp,
97 AddrRangeList &snoop);
98
99 virtual int deviceBlockSize();
100
101 virtual void recvRetry();
102
103 public:
104 void setBlocked();
105
106 void clearBlocked();
107
108 bool canDrain() { return drainList.empty(); }
109
110 bool blocked;
111
112 bool mustSendRetry;
113
114 bool isCpuSide;
115
116 bool waitingOnRetry;
117
118 std::list<PacketPtr> drainList;
119
120 };
121
122 struct CacheEvent : public Event
123 {
124 CachePort *cachePort;
125 PacketPtr pkt;
126
127 CacheEvent(CachePort *_cachePort);
128 CacheEvent(CachePort *_cachePort, PacketPtr _pkt);
129 void process();
130 const char *description();
131 };
132
133 public: //Made public so coherence can get at it.
134 CachePort *cpuSidePort;
135
136 protected:
137 CachePort *memSidePort;
138
139 bool snoopRangesSent;
140
141 public:
142 virtual Port *getPort(const std::string &if_name, int idx = -1);
143
144 private:
145 //To be defined in cache_impl.hh not in base class
146 virtual bool doTimingAccess(PacketPtr pkt, CachePort *cachePort, bool isCpuSide)
147 {
148 fatal("No implementation");
149 }
150
151 virtual Tick doAtomicAccess(PacketPtr pkt, bool isCpuSide)
152 {
153 fatal("No implementation");
154 }
155
156 virtual void doFunctionalAccess(PacketPtr pkt, bool isCpuSide)
157 {
158 fatal("No implementation");
159 }
160
161 void recvStatusChange(Port::Status status, bool isCpuSide)
162 {
163 if (status == Port::RangeChange){
164 if (!isCpuSide) {
165 cpuSidePort->sendStatusChange(Port::RangeChange);
166 if (!snoopRangesSent) {
167 snoopRangesSent = true;
168 memSidePort->sendStatusChange(Port::RangeChange);
169 }
170 }
171 else {
172 memSidePort->sendStatusChange(Port::RangeChange);
173 }
174 }
175 }
176
177 virtual PacketPtr getPacket()
178 {
179 fatal("No implementation");
180 }
181
182 virtual PacketPtr getCoherencePacket()
183 {
184 fatal("No implementation");
185 }
186
187 virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success)
188 {
189
190 fatal("No implementation");
191 }
192
193 virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* mshr, bool success)
194 {
195
196 fatal("No implementation");
197 }
198
199 /**
200 * Bit vector of the blocking reasons for the access path.
201 * @sa #BlockedCause
202 */
203 uint8_t blocked;
204
205 /**
206 * Bit vector for the blocking reasons for the snoop path.
207 * @sa #BlockedCause
208 */
209 uint8_t blockedSnoop;
210
211 /**
212 * Bit vector for the outstanding requests for the master interface.
213 */
214 uint8_t masterRequests;
215
216 /**
217 * Bit vector for the outstanding requests for the slave interface.
218 */
219 uint8_t slaveRequests;
220
221 protected:
222
223 /** Stores time the cache blocked for statistics. */
224 Tick blockedCycle;
225
226 /** Block size of this cache */
227 const int blkSize;
228
229 /** The number of misses to trigger an exit event. */
230 Counter missCount;
231
232 /** The drain event. */
233 Event *drainEvent;
234
235 public:
236 // Statistics
237 /**
238 * @addtogroup CacheStatistics
239 * @{
240 */
241
242 /** Number of hits per thread for each type of command. @sa Packet::Command */
243 Stats::Vector<> hits[NUM_MEM_CMDS];
244 /** Number of hits for demand accesses. */
245 Stats::Formula demandHits;
246 /** Number of hit for all accesses. */
247 Stats::Formula overallHits;
248
249 /** Number of misses per thread for each type of command. @sa Packet::Command */
250 Stats::Vector<> misses[NUM_MEM_CMDS];
251 /** Number of misses for demand accesses. */
252 Stats::Formula demandMisses;
253 /** Number of misses for all accesses. */
254 Stats::Formula overallMisses;
255
256 /**
257 * Total number of cycles per thread/command spent waiting for a miss.
258 * Used to calculate the average miss latency.
259 */
260 Stats::Vector<> missLatency[NUM_MEM_CMDS];
261 /** Total number of cycles spent waiting for demand misses. */
262 Stats::Formula demandMissLatency;
263 /** Total number of cycles spent waiting for all misses. */
264 Stats::Formula overallMissLatency;
265
266 /** The number of accesses per command and thread. */
267 Stats::Formula accesses[NUM_MEM_CMDS];
268 /** The number of demand accesses. */
269 Stats::Formula demandAccesses;
270 /** The number of overall accesses. */
271 Stats::Formula overallAccesses;
272
273 /** The miss rate per command and thread. */
274 Stats::Formula missRate[NUM_MEM_CMDS];
275 /** The miss rate of all demand accesses. */
276 Stats::Formula demandMissRate;
277 /** The miss rate for all accesses. */
278 Stats::Formula overallMissRate;
279
280 /** The average miss latency per command and thread. */
281 Stats::Formula avgMissLatency[NUM_MEM_CMDS];
282 /** The average miss latency for demand misses. */
283 Stats::Formula demandAvgMissLatency;
284 /** The average miss latency for all misses. */
285 Stats::Formula overallAvgMissLatency;
286
287 /** The total number of cycles blocked for each blocked cause. */
288 Stats::Vector<> blocked_cycles;
289 /** The number of times this cache blocked for each blocked cause. */
290 Stats::Vector<> blocked_causes;
291
292 /** The average number of cycles blocked for each blocked cause. */
293 Stats::Formula avg_blocked;
294
295 /** The number of fast writes (WH64) performed. */
296 Stats::Scalar<> fastWrites;
297
298 /** The number of cache copies performed. */
299 Stats::Scalar<> cacheCopies;
300
301 /**
302 * @}
303 */
304
305 /**
306 * Register stats for this object.
307 */
308 virtual void regStats();
309
310 public:
311
312 class Params
313 {
314 public:
315 /** List of address ranges of this cache. */
316 std::vector<Range<Addr> > addrRange;
317 /** The hit latency for this cache. */
318 int hitLatency;
319 /** The block size of this cache. */
320 int blkSize;
321 /**
322 * The maximum number of misses this cache should handle before
323 * ending the simulation.
324 */
325 Counter maxMisses;
326
327 /**
328 * Construct an instance of this parameter class.
329 */
330 Params(std::vector<Range<Addr> > addr_range,
331 int hit_latency, int _blkSize, Counter max_misses)
332 : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
333 maxMisses(max_misses)
334 {
335 }
336 };
337
338 /**
339 * Create and initialize a basic cache object.
340 * @param name The name of this cache.
341 * @param hier_params Pointer to the HierParams object for this hierarchy
342 * of this cache.
343 * @param params The parameter object for this BaseCache.
344 */
345 BaseCache(const std::string &name, Params &params)
346 : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
347 slaveRequests(0), blkSize(params.blkSize),
348 missCount(params.maxMisses), drainEvent(NULL)
349 {
350 //Start ports at null if more than one is created we should panic
351 cpuSidePort = NULL;
352 memSidePort = NULL;
353 snoopRangesSent = false;
354 }
355
356 virtual void init();
357
358 /**
359 * Query block size of a cache.
360 * @return The block size
361 */
362 int getBlockSize() const
363 {
364 return blkSize;
365 }
366
367 /**
368 * Returns true if the cache is blocked for accesses.
369 */
370 bool isBlocked()
371 {
372 return blocked != 0;
373 }
374
375 /**
376 * Returns true if the cache is blocked for snoops.
377 */
378 bool isBlockedForSnoop()
379 {
380 return blockedSnoop != 0;
381 }
382
383 /**
384 * Marks the access path of the cache as blocked for the given cause. This
385 * also sets the blocked flag in the slave interface.
386 * @param cause The reason for the cache blocking.
387 */
388 void setBlocked(BlockedCause cause)
389 {
390 uint8_t flag = 1 << cause;
391 if (blocked == 0) {
392 blocked_causes[cause]++;
393 blockedCycle = curTick;
394 }
395 int old_state = blocked;
396 if (!(blocked & flag)) {
397 //Wasn't already blocked for this cause
398 blocked |= flag;
399 DPRINTF(Cache,"Blocking for cause %s\n", cause);
400 if (!old_state)
401 cpuSidePort->setBlocked();
402 }
403 }
404
405 /**
406 * Marks the snoop path of the cache as blocked for the given cause. This
407 * also sets the blocked flag in the master interface.
408 * @param cause The reason to block the snoop path.
409 */
410 void setBlockedForSnoop(BlockedCause cause)
411 {
412 uint8_t flag = 1 << cause;
413 uint8_t old_state = blockedSnoop;
414 if (!(blockedSnoop & flag)) {
415 //Wasn't already blocked for this cause
416 blockedSnoop |= flag;
417 if (!old_state)
418 memSidePort->setBlocked();
419 }
420 }
421
422 /**
423 * Marks the cache as unblocked for the given cause. This also clears the
424 * blocked flags in the appropriate interfaces.
425 * @param cause The newly unblocked cause.
426 * @warning Calling this function can cause a blocked request on the bus to
427 * access the cache. The cache must be in a state to handle that request.
428 */
429 void clearBlocked(BlockedCause cause)
430 {
431 uint8_t flag = 1 << cause;
432 DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
433 cause, blocked);
434 if (blocked & flag)
435 {
436 blocked &= ~flag;
437 if (!isBlocked()) {
438 blocked_cycles[cause] += curTick - blockedCycle;
439 DPRINTF(Cache,"Unblocking from all causes\n");
440 cpuSidePort->clearBlocked();
441 }
442 }
443 if (blockedSnoop & flag)
444 {
445 blockedSnoop &= ~flag;
446 if (!isBlockedForSnoop()) {
447 memSidePort->clearBlocked();
448 }
449 }
450 }
451
452 /**
453 * True if the master bus should be requested.
454 * @return True if there are outstanding requests for the master bus.
455 */
456 bool doMasterRequest()
457 {
458 return masterRequests != 0;
459 }
460
461 /**
462 * Request the master bus for the given cause and time.
463 * @param cause The reason for the request.
464 * @param time The time to make the request.
465 */
466 void setMasterRequest(RequestCause cause, Tick time)
467 {
468 if (!doMasterRequest() && !memSidePort->waitingOnRetry)
469 {
470 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
471 reqCpu->schedule(time);
472 }
473 uint8_t flag = 1<<cause;
474 masterRequests |= flag;
475 }
476
477 /**
478 * Clear the master bus request for the given cause.
479 * @param cause The request reason to clear.
480 */
481 void clearMasterRequest(RequestCause cause)
482 {
483 uint8_t flag = 1<<cause;
484 masterRequests &= ~flag;
485 checkDrain();
486 }
487
488 /**
489 * Return true if the slave bus should be requested.
490 * @return True if there are outstanding requests for the slave bus.
491 */
492 bool doSlaveRequest()
493 {
494 return slaveRequests != 0;
495 }
496
497 /**
498 * Request the slave bus for the given reason and time.
499 * @param cause The reason for the request.
500 * @param time The time to make the request.
501 */
502 void setSlaveRequest(RequestCause cause, Tick time)
503 {
504 if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
505 {
506 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(cpuSidePort);
507 reqCpu->schedule(time);
508 }
509 uint8_t flag = 1<<cause;
510 slaveRequests |= flag;
511 }
512
513 /**
514 * Clear the slave bus request for the given reason.
515 * @param cause The request reason to clear.
516 */
517 void clearSlaveRequest(RequestCause cause)
518 {
519 uint8_t flag = 1<<cause;
520 slaveRequests &= ~flag;
521 checkDrain();
522 }
523
524 /**
525 * Send a response to the slave interface.
526 * @param pkt The request being responded to.
527 * @param time The time the response is ready.
528 */
529 void respond(PacketPtr pkt, Tick time)
530 {
531 if (pkt->needsResponse()) {
532 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
533 reqCpu->schedule(time);
534 }
535 else {
536 if (pkt->cmd != Packet::UpgradeReq)
537 {
538 delete pkt->req;
539 delete pkt;
540 }
541 }
542 }
543
544 /**
545 * Send a reponse to the slave interface and calculate miss latency.
546 * @param pkt The request to respond to.
547 * @param time The time the response is ready.
548 */
549 void respondToMiss(PacketPtr pkt, Tick time)
550 {
551 if (!pkt->req->isUncacheable()) {
552 missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += time - pkt->time;
553 }
554 if (pkt->needsResponse()) {
555 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
556 reqCpu->schedule(time);
557 }
558 else {
559 if (pkt->cmd != Packet::UpgradeReq)
560 {
561 delete pkt->req;
562 delete pkt;
563 }
564 }
565 }
566
567 /**
568 * Suppliess the data if cache to cache transfers are enabled.
569 * @param pkt The bus transaction to fulfill.
570 */
571 void respondToSnoop(PacketPtr pkt, Tick time)
572 {
573 assert (pkt->needsResponse());
574 CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
575 reqMem->schedule(time);
576 }
577
578 /**
579 * Notification from master interface that a address range changed. Nothing
580 * to do for a cache.
581 */
582 void rangeChange() {}
583
584 void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
585 {
586 if (isCpuSide)
587 {
588 AddrRangeList dummy;
589 memSidePort->getPeerAddressRanges(resp, dummy);
590 }
591 else
592 {
593 //This is where snoops get updated
594 AddrRangeList dummy;
595 cpuSidePort->getPeerAddressRanges(dummy, snoop);
596 return;
597 }
598 }
599
600 virtual unsigned int drain(Event *de);
601
602 void checkDrain()
603 {
604 if (drainEvent && canDrain()) {
605 drainEvent->process();
606 changeState(SimObject::Drained);
607 // Clear the drain event
608 drainEvent = NULL;
609 }
610 }
611
612 bool canDrain()
613 {
614 if (doMasterRequest() || doSlaveRequest()) {
615 return false;
616 } else if (memSidePort && !memSidePort->canDrain()) {
617 return false;
618 } else if (cpuSidePort && !cpuSidePort->canDrain()) {
619 return false;
620 }
621 return true;
622 }
623 };
624
625 #endif //__BASE_CACHE_HH__