Merge zizzer:/bk/newmem
[gem5.git] / src / mem / cache / base_cache.hh
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36 #ifndef __BASE_CACHE_HH__
37 #define __BASE_CACHE_HH__
38
39 #include <vector>
40 #include <string>
41 #include <list>
42 #include <inttypes.h>
43
44 #include "base/misc.hh"
45 #include "base/statistics.hh"
46 #include "base/trace.hh"
47 #include "mem/mem_object.hh"
48 #include "mem/packet.hh"
49 #include "mem/port.hh"
50 #include "mem/request.hh"
51 #include "sim/eventq.hh"
52
53 /**
54 * Reasons for Caches to be Blocked.
55 */
56 enum BlockedCause{
57 Blocked_NoMSHRs,
58 Blocked_NoTargets,
59 Blocked_NoWBBuffers,
60 Blocked_Coherence,
61 NUM_BLOCKED_CAUSES
62 };
63
64 /**
65 * Reasons for cache to request a bus.
66 */
67 enum RequestCause{
68 Request_MSHR,
69 Request_WB,
70 Request_Coherence,
71 Request_PF
72 };
73
74 class MSHR;
75 /**
76 * A basic cache interface. Implements some common functions for speed.
77 */
78 class BaseCache : public MemObject
79 {
80 class CachePort : public Port
81 {
82 public:
83 BaseCache *cache;
84
85 CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
86
87 protected:
88 virtual bool recvTiming(PacketPtr pkt);
89
90 virtual Tick recvAtomic(PacketPtr pkt);
91
92 virtual void recvFunctional(PacketPtr pkt);
93
94 virtual void recvStatusChange(Status status);
95
96 virtual void getDeviceAddressRanges(AddrRangeList &resp,
97 AddrRangeList &snoop);
98
99 virtual int deviceBlockSize();
100
101 virtual void recvRetry();
102
103 public:
104 void setBlocked();
105
106 void clearBlocked();
107
108 bool blocked;
109
110 bool mustSendRetry;
111
112 bool isCpuSide;
113
114 bool waitingOnRetry;
115
116 std::list<PacketPtr> drainList;
117
118 };
119
120 struct CacheEvent : public Event
121 {
122 CachePort *cachePort;
123 PacketPtr pkt;
124
125 CacheEvent(CachePort *_cachePort);
126 CacheEvent(CachePort *_cachePort, PacketPtr _pkt);
127 void process();
128 const char *description();
129 };
130
131 public: //Made public so coherence can get at it.
132 CachePort *cpuSidePort;
133
134 protected:
135 CachePort *memSidePort;
136
137 bool snoopRangesSent;
138
139 public:
140 virtual Port *getPort(const std::string &if_name, int idx = -1);
141
142 private:
143 //To be defined in cache_impl.hh not in base class
144 virtual bool doTimingAccess(PacketPtr pkt, CachePort *cachePort, bool isCpuSide)
145 {
146 fatal("No implementation");
147 }
148
149 virtual Tick doAtomicAccess(PacketPtr pkt, bool isCpuSide)
150 {
151 fatal("No implementation");
152 }
153
154 virtual void doFunctionalAccess(PacketPtr pkt, bool isCpuSide)
155 {
156 fatal("No implementation");
157 }
158
159 void recvStatusChange(Port::Status status, bool isCpuSide)
160 {
161 if (status == Port::RangeChange){
162 if (!isCpuSide) {
163 cpuSidePort->sendStatusChange(Port::RangeChange);
164 if (!snoopRangesSent) {
165 snoopRangesSent = true;
166 memSidePort->sendStatusChange(Port::RangeChange);
167 }
168 }
169 else {
170 memSidePort->sendStatusChange(Port::RangeChange);
171 }
172 }
173 }
174
175 virtual PacketPtr getPacket()
176 {
177 fatal("No implementation");
178 }
179
180 virtual PacketPtr getCoherencePacket()
181 {
182 fatal("No implementation");
183 }
184
185 virtual void sendResult(PacketPtr &pkt, MSHR* mshr, bool success)
186 {
187
188 fatal("No implementation");
189 }
190
191 virtual void sendCoherenceResult(PacketPtr &pkt, MSHR* mshr, bool success)
192 {
193
194 fatal("No implementation");
195 }
196
197 /**
198 * Bit vector of the blocking reasons for the access path.
199 * @sa #BlockedCause
200 */
201 uint8_t blocked;
202
203 /**
204 * Bit vector for the blocking reasons for the snoop path.
205 * @sa #BlockedCause
206 */
207 uint8_t blockedSnoop;
208
209 /**
210 * Bit vector for the outstanding requests for the master interface.
211 */
212 uint8_t masterRequests;
213
214 /**
215 * Bit vector for the outstanding requests for the slave interface.
216 */
217 uint8_t slaveRequests;
218
219 protected:
220
221 /** Stores time the cache blocked for statistics. */
222 Tick blockedCycle;
223
224 /** Block size of this cache */
225 const int blkSize;
226
227 /** The number of misses to trigger an exit event. */
228 Counter missCount;
229
230 public:
231 // Statistics
232 /**
233 * @addtogroup CacheStatistics
234 * @{
235 */
236
237 /** Number of hits per thread for each type of command. @sa Packet::Command */
238 Stats::Vector<> hits[NUM_MEM_CMDS];
239 /** Number of hits for demand accesses. */
240 Stats::Formula demandHits;
241 /** Number of hit for all accesses. */
242 Stats::Formula overallHits;
243
244 /** Number of misses per thread for each type of command. @sa Packet::Command */
245 Stats::Vector<> misses[NUM_MEM_CMDS];
246 /** Number of misses for demand accesses. */
247 Stats::Formula demandMisses;
248 /** Number of misses for all accesses. */
249 Stats::Formula overallMisses;
250
251 /**
252 * Total number of cycles per thread/command spent waiting for a miss.
253 * Used to calculate the average miss latency.
254 */
255 Stats::Vector<> missLatency[NUM_MEM_CMDS];
256 /** Total number of cycles spent waiting for demand misses. */
257 Stats::Formula demandMissLatency;
258 /** Total number of cycles spent waiting for all misses. */
259 Stats::Formula overallMissLatency;
260
261 /** The number of accesses per command and thread. */
262 Stats::Formula accesses[NUM_MEM_CMDS];
263 /** The number of demand accesses. */
264 Stats::Formula demandAccesses;
265 /** The number of overall accesses. */
266 Stats::Formula overallAccesses;
267
268 /** The miss rate per command and thread. */
269 Stats::Formula missRate[NUM_MEM_CMDS];
270 /** The miss rate of all demand accesses. */
271 Stats::Formula demandMissRate;
272 /** The miss rate for all accesses. */
273 Stats::Formula overallMissRate;
274
275 /** The average miss latency per command and thread. */
276 Stats::Formula avgMissLatency[NUM_MEM_CMDS];
277 /** The average miss latency for demand misses. */
278 Stats::Formula demandAvgMissLatency;
279 /** The average miss latency for all misses. */
280 Stats::Formula overallAvgMissLatency;
281
282 /** The total number of cycles blocked for each blocked cause. */
283 Stats::Vector<> blocked_cycles;
284 /** The number of times this cache blocked for each blocked cause. */
285 Stats::Vector<> blocked_causes;
286
287 /** The average number of cycles blocked for each blocked cause. */
288 Stats::Formula avg_blocked;
289
290 /** The number of fast writes (WH64) performed. */
291 Stats::Scalar<> fastWrites;
292
293 /** The number of cache copies performed. */
294 Stats::Scalar<> cacheCopies;
295
296 /**
297 * @}
298 */
299
300 /**
301 * Register stats for this object.
302 */
303 virtual void regStats();
304
305 public:
306
307 class Params
308 {
309 public:
310 /** List of address ranges of this cache. */
311 std::vector<Range<Addr> > addrRange;
312 /** The hit latency for this cache. */
313 int hitLatency;
314 /** The block size of this cache. */
315 int blkSize;
316 /**
317 * The maximum number of misses this cache should handle before
318 * ending the simulation.
319 */
320 Counter maxMisses;
321
322 /**
323 * Construct an instance of this parameter class.
324 */
325 Params(std::vector<Range<Addr> > addr_range,
326 int hit_latency, int _blkSize, Counter max_misses)
327 : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
328 maxMisses(max_misses)
329 {
330 }
331 };
332
333 /**
334 * Create and initialize a basic cache object.
335 * @param name The name of this cache.
336 * @param hier_params Pointer to the HierParams object for this hierarchy
337 * of this cache.
338 * @param params The parameter object for this BaseCache.
339 */
340 BaseCache(const std::string &name, Params &params)
341 : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
342 slaveRequests(0), blkSize(params.blkSize),
343 missCount(params.maxMisses)
344 {
345 //Start ports at null if more than one is created we should panic
346 cpuSidePort = NULL;
347 memSidePort = NULL;
348 snoopRangesSent = false;
349 }
350
351 virtual void init();
352
353 /**
354 * Query block size of a cache.
355 * @return The block size
356 */
357 int getBlockSize() const
358 {
359 return blkSize;
360 }
361
362 /**
363 * Returns true if the cache is blocked for accesses.
364 */
365 bool isBlocked()
366 {
367 return blocked != 0;
368 }
369
370 /**
371 * Returns true if the cache is blocked for snoops.
372 */
373 bool isBlockedForSnoop()
374 {
375 return blockedSnoop != 0;
376 }
377
378 /**
379 * Marks the access path of the cache as blocked for the given cause. This
380 * also sets the blocked flag in the slave interface.
381 * @param cause The reason for the cache blocking.
382 */
383 void setBlocked(BlockedCause cause)
384 {
385 uint8_t flag = 1 << cause;
386 if (blocked == 0) {
387 blocked_causes[cause]++;
388 blockedCycle = curTick;
389 }
390 int old_state = blocked;
391 if (!(blocked & flag)) {
392 //Wasn't already blocked for this cause
393 blocked |= flag;
394 DPRINTF(Cache,"Blocking for cause %s\n", cause);
395 if (!old_state)
396 cpuSidePort->setBlocked();
397 }
398 }
399
400 /**
401 * Marks the snoop path of the cache as blocked for the given cause. This
402 * also sets the blocked flag in the master interface.
403 * @param cause The reason to block the snoop path.
404 */
405 void setBlockedForSnoop(BlockedCause cause)
406 {
407 uint8_t flag = 1 << cause;
408 uint8_t old_state = blockedSnoop;
409 if (!(blockedSnoop & flag)) {
410 //Wasn't already blocked for this cause
411 blockedSnoop |= flag;
412 if (!old_state)
413 memSidePort->setBlocked();
414 }
415 }
416
417 /**
418 * Marks the cache as unblocked for the given cause. This also clears the
419 * blocked flags in the appropriate interfaces.
420 * @param cause The newly unblocked cause.
421 * @warning Calling this function can cause a blocked request on the bus to
422 * access the cache. The cache must be in a state to handle that request.
423 */
424 void clearBlocked(BlockedCause cause)
425 {
426 uint8_t flag = 1 << cause;
427 DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
428 cause, blocked);
429 if (blocked & flag)
430 {
431 blocked &= ~flag;
432 if (!isBlocked()) {
433 blocked_cycles[cause] += curTick - blockedCycle;
434 DPRINTF(Cache,"Unblocking from all causes\n");
435 cpuSidePort->clearBlocked();
436 }
437 }
438 if (blockedSnoop & flag)
439 {
440 blockedSnoop &= ~flag;
441 if (!isBlockedForSnoop()) {
442 memSidePort->clearBlocked();
443 }
444 }
445 }
446
447 /**
448 * True if the master bus should be requested.
449 * @return True if there are outstanding requests for the master bus.
450 */
451 bool doMasterRequest()
452 {
453 return masterRequests != 0;
454 }
455
456 /**
457 * Request the master bus for the given cause and time.
458 * @param cause The reason for the request.
459 * @param time The time to make the request.
460 */
461 void setMasterRequest(RequestCause cause, Tick time)
462 {
463 if (!doMasterRequest() && !memSidePort->waitingOnRetry)
464 {
465 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
466 reqCpu->schedule(time);
467 }
468 uint8_t flag = 1<<cause;
469 masterRequests |= flag;
470 }
471
472 /**
473 * Clear the master bus request for the given cause.
474 * @param cause The request reason to clear.
475 */
476 void clearMasterRequest(RequestCause cause)
477 {
478 uint8_t flag = 1<<cause;
479 masterRequests &= ~flag;
480 }
481
482 /**
483 * Return true if the slave bus should be requested.
484 * @return True if there are outstanding requests for the slave bus.
485 */
486 bool doSlaveRequest()
487 {
488 return slaveRequests != 0;
489 }
490
491 /**
492 * Request the slave bus for the given reason and time.
493 * @param cause The reason for the request.
494 * @param time The time to make the request.
495 */
496 void setSlaveRequest(RequestCause cause, Tick time)
497 {
498 if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry)
499 {
500 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(cpuSidePort);
501 reqCpu->schedule(time);
502 }
503 uint8_t flag = 1<<cause;
504 slaveRequests |= flag;
505 }
506
507 /**
508 * Clear the slave bus request for the given reason.
509 * @param cause The request reason to clear.
510 */
511 void clearSlaveRequest(RequestCause cause)
512 {
513 uint8_t flag = 1<<cause;
514 slaveRequests &= ~flag;
515 }
516
517 /**
518 * Send a response to the slave interface.
519 * @param pkt The request being responded to.
520 * @param time The time the response is ready.
521 */
522 void respond(PacketPtr pkt, Tick time)
523 {
524 if (pkt->needsResponse()) {
525 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
526 reqCpu->schedule(time);
527 }
528 else {
529 if (pkt->cmd != Packet::UpgradeReq)
530 {
531 delete pkt->req;
532 delete pkt;
533 }
534 }
535 }
536
537 /**
538 * Send a reponse to the slave interface and calculate miss latency.
539 * @param pkt The request to respond to.
540 * @param time The time the response is ready.
541 */
542 void respondToMiss(PacketPtr pkt, Tick time)
543 {
544 if (!pkt->req->isUncacheable()) {
545 missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += time - pkt->time;
546 }
547 if (pkt->needsResponse()) {
548 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
549 reqCpu->schedule(time);
550 }
551 else {
552 if (pkt->cmd != Packet::UpgradeReq)
553 {
554 delete pkt->req;
555 delete pkt;
556 }
557 }
558 }
559
560 /**
561 * Suppliess the data if cache to cache transfers are enabled.
562 * @param pkt The bus transaction to fulfill.
563 */
564 void respondToSnoop(PacketPtr pkt, Tick time)
565 {
566 assert (pkt->needsResponse());
567 CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
568 reqMem->schedule(time);
569 }
570
571 /**
572 * Notification from master interface that a address range changed. Nothing
573 * to do for a cache.
574 */
575 void rangeChange() {}
576
577 void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
578 {
579 if (isCpuSide)
580 {
581 AddrRangeList dummy;
582 memSidePort->getPeerAddressRanges(resp, dummy);
583 }
584 else
585 {
586 //This is where snoops get updated
587 AddrRangeList dummy;
588 cpuSidePort->getPeerAddressRanges(dummy, snoop);
589 return;
590 }
591 }
592 };
593
594 #endif //__BASE_CACHE_HH__