Remove threadnum from cache everywhere for now
[gem5.git] / src / mem / cache / base_cache.hh
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36 #ifndef __BASE_CACHE_HH__
37 #define __BASE_CACHE_HH__
38
39 #include <vector>
40 #include <string>
41 #include <list>
42 #include <inttypes.h>
43
44 #include "base/misc.hh"
45 #include "base/statistics.hh"
46 #include "base/trace.hh"
47 #include "mem/mem_object.hh"
48 #include "mem/packet.hh"
49 #include "mem/port.hh"
50 #include "mem/request.hh"
51 #include "sim/eventq.hh"
52
53 /**
54 * Reasons for Caches to be Blocked.
55 */
56 enum BlockedCause{
57 Blocked_NoMSHRs,
58 Blocked_NoTargets,
59 Blocked_NoWBBuffers,
60 Blocked_Coherence,
61 Blocked_Copy,
62 NUM_BLOCKED_CAUSES
63 };
64
65 /**
66 * Reasons for cache to request a bus.
67 */
68 enum RequestCause{
69 Request_MSHR,
70 Request_WB,
71 Request_Coherence,
72 Request_PF
73 };
74
75 /**
76 * A basic cache interface. Implements some common functions for speed.
77 */
78 class BaseCache : public MemObject
79 {
80 class CachePort : public Port
81 {
82 public:
83 BaseCache *cache;
84
85 CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
86
87 protected:
88 virtual bool recvTiming(Packet *pkt);
89
90 virtual Tick recvAtomic(Packet *pkt);
91
92 virtual void recvFunctional(Packet *pkt);
93
94 virtual void recvStatusChange(Status status);
95
96 virtual void getDeviceAddressRanges(AddrRangeList &resp,
97 AddrRangeList &snoop);
98
99 virtual int deviceBlockSize();
100
101 virtual void recvRetry();
102
103 public:
104 void setBlocked();
105
106 void clearBlocked();
107
108 bool blocked;
109
110 bool mustSendRetry;
111
112 bool isCpuSide;
113 };
114
115 struct CacheEvent : public Event
116 {
117 CachePort *cachePort;
118 Packet *pkt;
119
120 CacheEvent(CachePort *_cachePort);
121 CacheEvent(CachePort *_cachePort, Packet *_pkt);
122 void process();
123 const char *description();
124 };
125
126 protected:
127 CachePort *cpuSidePort;
128 CachePort *memSidePort;
129
130 bool snoopRangesSent;
131
132 public:
133 virtual Port *getPort(const std::string &if_name, int idx = -1);
134
135 private:
136 //To be defined in cache_impl.hh not in base class
137 virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
138 {
139 fatal("No implementation");
140 }
141
142 virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide)
143 {
144 fatal("No implementation");
145 }
146
147 virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide)
148 {
149 fatal("No implementation");
150 }
151
152 void recvStatusChange(Port::Status status, bool isCpuSide)
153 {
154 if (status == Port::RangeChange){
155 if (!isCpuSide) {
156 cpuSidePort->sendStatusChange(Port::RangeChange);
157 if (topLevelCache && !snoopRangesSent) {
158 snoopRangesSent = true;
159 memSidePort->sendStatusChange(Port::RangeChange);
160 }
161 }
162 else {
163 memSidePort->sendStatusChange(Port::RangeChange);
164 }
165 }
166 else if (status == Port::SnoopSquash) {
167 assert(snoopPhase2);
168 snoopPhase2 = false;
169 }
170 }
171
172 virtual Packet *getPacket()
173 {
174 fatal("No implementation");
175 }
176
177 virtual Packet *getCoherencePacket()
178 {
179 fatal("No implementation");
180 }
181
182 virtual void sendResult(Packet* &pkt, bool success)
183 {
184
185 fatal("No implementation");
186 }
187
188 /**
189 * Bit vector of the blocking reasons for the access path.
190 * @sa #BlockedCause
191 */
192 uint8_t blocked;
193
194 /**
195 * Bit vector for the blocking reasons for the snoop path.
196 * @sa #BlockedCause
197 */
198 uint8_t blockedSnoop;
199
200 /**
201 * Bit vector for the outstanding requests for the master interface.
202 */
203 uint8_t masterRequests;
204
205 /**
206 * Bit vector for the outstanding requests for the slave interface.
207 */
208 uint8_t slaveRequests;
209
210 protected:
211
212 /** True if this cache is connected to the CPU. */
213 bool topLevelCache;
214
215
216 /** True if we are now in phase 2 of the snoop process. */
217 bool snoopPhase2;
218
219 /** Stores time the cache blocked for statistics. */
220 Tick blockedCycle;
221
222 /** Block size of this cache */
223 const int blkSize;
224
225 /** The number of misses to trigger an exit event. */
226 Counter missCount;
227
228 public:
229 // Statistics
230 /**
231 * @addtogroup CacheStatistics
232 * @{
233 */
234
235 /** Number of hits per thread for each type of command. @sa Packet::Command */
236 Stats::Vector<> hits[NUM_MEM_CMDS];
237 /** Number of hits for demand accesses. */
238 Stats::Formula demandHits;
239 /** Number of hit for all accesses. */
240 Stats::Formula overallHits;
241
242 /** Number of misses per thread for each type of command. @sa Packet::Command */
243 Stats::Vector<> misses[NUM_MEM_CMDS];
244 /** Number of misses for demand accesses. */
245 Stats::Formula demandMisses;
246 /** Number of misses for all accesses. */
247 Stats::Formula overallMisses;
248
249 /**
250 * Total number of cycles per thread/command spent waiting for a miss.
251 * Used to calculate the average miss latency.
252 */
253 Stats::Vector<> missLatency[NUM_MEM_CMDS];
254 /** Total number of cycles spent waiting for demand misses. */
255 Stats::Formula demandMissLatency;
256 /** Total number of cycles spent waiting for all misses. */
257 Stats::Formula overallMissLatency;
258
259 /** The number of accesses per command and thread. */
260 Stats::Formula accesses[NUM_MEM_CMDS];
261 /** The number of demand accesses. */
262 Stats::Formula demandAccesses;
263 /** The number of overall accesses. */
264 Stats::Formula overallAccesses;
265
266 /** The miss rate per command and thread. */
267 Stats::Formula missRate[NUM_MEM_CMDS];
268 /** The miss rate of all demand accesses. */
269 Stats::Formula demandMissRate;
270 /** The miss rate for all accesses. */
271 Stats::Formula overallMissRate;
272
273 /** The average miss latency per command and thread. */
274 Stats::Formula avgMissLatency[NUM_MEM_CMDS];
275 /** The average miss latency for demand misses. */
276 Stats::Formula demandAvgMissLatency;
277 /** The average miss latency for all misses. */
278 Stats::Formula overallAvgMissLatency;
279
280 /** The total number of cycles blocked for each blocked cause. */
281 Stats::Vector<> blocked_cycles;
282 /** The number of times this cache blocked for each blocked cause. */
283 Stats::Vector<> blocked_causes;
284
285 /** The average number of cycles blocked for each blocked cause. */
286 Stats::Formula avg_blocked;
287
288 /** The number of fast writes (WH64) performed. */
289 Stats::Scalar<> fastWrites;
290
291 /** The number of cache copies performed. */
292 Stats::Scalar<> cacheCopies;
293
294 /**
295 * @}
296 */
297
298 /**
299 * Register stats for this object.
300 */
301 virtual void regStats();
302
303 public:
304
305 class Params
306 {
307 public:
308 /** List of address ranges of this cache. */
309 std::vector<Range<Addr> > addrRange;
310 /** The hit latency for this cache. */
311 int hitLatency;
312 /** The block size of this cache. */
313 int blkSize;
314 /**
315 * The maximum number of misses this cache should handle before
316 * ending the simulation.
317 */
318 Counter maxMisses;
319
320 /**
321 * Construct an instance of this parameter class.
322 */
323 Params(std::vector<Range<Addr> > addr_range,
324 int hit_latency, int _blkSize, Counter max_misses)
325 : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
326 maxMisses(max_misses)
327 {
328 }
329 };
330
331 /**
332 * Create and initialize a basic cache object.
333 * @param name The name of this cache.
334 * @param hier_params Pointer to the HierParams object for this hierarchy
335 * of this cache.
336 * @param params The parameter object for this BaseCache.
337 */
338 BaseCache(const std::string &name, Params &params)
339 : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
340 slaveRequests(0), topLevelCache(false), blkSize(params.blkSize),
341 missCount(params.maxMisses)
342 {
343 //Start ports at null if more than one is created we should panic
344 cpuSidePort = NULL;
345 memSidePort = NULL;
346 snoopRangesSent = false;
347 }
348
349 virtual void init();
350
351 /**
352 * Query block size of a cache.
353 * @return The block size
354 */
355 int getBlockSize() const
356 {
357 return blkSize;
358 }
359
360 /**
361 * Returns true if this cache is connect to the CPU.
362 * @return True if this is a L1 cache.
363 */
364 bool isTopLevel()
365 {
366 return topLevelCache;
367 }
368
369 /**
370 * Returns true if the cache is blocked for accesses.
371 */
372 bool isBlocked()
373 {
374 return blocked != 0;
375 }
376
377 /**
378 * Returns true if the cache is blocked for snoops.
379 */
380 bool isBlockedForSnoop()
381 {
382 return blockedSnoop != 0;
383 }
384
385 /**
386 * Marks the access path of the cache as blocked for the given cause. This
387 * also sets the blocked flag in the slave interface.
388 * @param cause The reason for the cache blocking.
389 */
390 void setBlocked(BlockedCause cause)
391 {
392 uint8_t flag = 1 << cause;
393 if (blocked == 0) {
394 blocked_causes[cause]++;
395 blockedCycle = curTick;
396 }
397 if (!(blocked & flag)) {
398 //Wasn't already blocked for this cause
399 blocked |= flag;
400 DPRINTF(Cache,"Blocking for cause %s\n", cause);
401 cpuSidePort->setBlocked();
402 }
403 }
404
405 /**
406 * Marks the snoop path of the cache as blocked for the given cause. This
407 * also sets the blocked flag in the master interface.
408 * @param cause The reason to block the snoop path.
409 */
410 void setBlockedForSnoop(BlockedCause cause)
411 {
412 uint8_t flag = 1 << cause;
413 if (!(blocked & flag)) {
414 //Wasn't already blocked for this cause
415 blockedSnoop |= flag;
416 memSidePort->setBlocked();
417 }
418 }
419
420 /**
421 * Marks the cache as unblocked for the given cause. This also clears the
422 * blocked flags in the appropriate interfaces.
423 * @param cause The newly unblocked cause.
424 * @warning Calling this function can cause a blocked request on the bus to
425 * access the cache. The cache must be in a state to handle that request.
426 */
427 void clearBlocked(BlockedCause cause)
428 {
429 uint8_t flag = 1 << cause;
430 DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
431 cause, blocked);
432 if (blocked & flag)
433 {
434 blocked &= ~flag;
435 if (!isBlocked()) {
436 blocked_cycles[cause] += curTick - blockedCycle;
437 DPRINTF(Cache,"Unblocking from all causes\n");
438 cpuSidePort->clearBlocked();
439 }
440 }
441 if (blockedSnoop & flag)
442 {
443 blockedSnoop &= ~flag;
444 if (!isBlockedForSnoop()) {
445 memSidePort->clearBlocked();
446 }
447 }
448 }
449
450 /**
451 * True if the master bus should be requested.
452 * @return True if there are outstanding requests for the master bus.
453 */
454 bool doMasterRequest()
455 {
456 return masterRequests != 0;
457 }
458
459 /**
460 * Request the master bus for the given cause and time.
461 * @param cause The reason for the request.
462 * @param time The time to make the request.
463 */
464 void setMasterRequest(RequestCause cause, Tick time)
465 {
466 if (!doMasterRequest())
467 {
468 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
469 reqCpu->schedule(time);
470 }
471 uint8_t flag = 1<<cause;
472 masterRequests |= flag;
473 }
474
475 /**
476 * Clear the master bus request for the given cause.
477 * @param cause The request reason to clear.
478 */
479 void clearMasterRequest(RequestCause cause)
480 {
481 uint8_t flag = 1<<cause;
482 masterRequests &= ~flag;
483 }
484
485 /**
486 * Return true if the slave bus should be requested.
487 * @return True if there are outstanding requests for the slave bus.
488 */
489 bool doSlaveRequest()
490 {
491 return slaveRequests != 0;
492 }
493
494 /**
495 * Request the slave bus for the given reason and time.
496 * @param cause The reason for the request.
497 * @param time The time to make the request.
498 */
499 void setSlaveRequest(RequestCause cause, Tick time)
500 {
501 uint8_t flag = 1<<cause;
502 slaveRequests |= flag;
503 assert("Implement\n" && 0);
504 // si->pktuest(time);
505 }
506
507 /**
508 * Clear the slave bus request for the given reason.
509 * @param cause The request reason to clear.
510 */
511 void clearSlaveRequest(RequestCause cause)
512 {
513 uint8_t flag = 1<<cause;
514 slaveRequests &= ~flag;
515 }
516
517 /**
518 * Send a response to the slave interface.
519 * @param pkt The request being responded to.
520 * @param time The time the response is ready.
521 */
522 void respond(Packet *pkt, Tick time)
523 {
524 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
525 reqCpu->schedule(time);
526 }
527
528 /**
529 * Send a reponse to the slave interface and calculate miss latency.
530 * @param pkt The request to respond to.
531 * @param time The time the response is ready.
532 */
533 void respondToMiss(Packet *pkt, Tick time)
534 {
535 if (!pkt->req->isUncacheable()) {
536 missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += time - pkt->time;
537 }
538 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
539 reqCpu->schedule(time);
540 }
541
542 /**
543 * Suppliess the data if cache to cache transfers are enabled.
544 * @param pkt The bus transaction to fulfill.
545 */
546 void respondToSnoop(Packet *pkt, Tick time)
547 {
548 // assert("Implement\n" && 0);
549 // mi->respond(pkt,curTick + hitLatency);
550 CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
551 reqMem->schedule(time);
552 }
553
554 /**
555 * Notification from master interface that a address range changed. Nothing
556 * to do for a cache.
557 */
558 void rangeChange() {}
559
560 void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
561 {
562 if (isCpuSide)
563 {
564 AddrRangeList dummy;
565 memSidePort->getPeerAddressRanges(resp, dummy);
566 }
567 else
568 {
569 //This is where snoops get updated
570 AddrRangeList dummy;
571 if (!topLevelCache)
572 {
573 cpuSidePort->getPeerAddressRanges(dummy, snoop);
574 }
575 else
576 {
577 snoop.push_back(RangeSize(0,-1));
578 }
579
580 return;
581 }
582 }
583 };
584
585 #endif //__BASE_CACHE_HH__