Fix CSHR retrys
[gem5.git] / src / mem / cache / base_cache.hh
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36 #ifndef __BASE_CACHE_HH__
37 #define __BASE_CACHE_HH__
38
39 #include <vector>
40 #include <string>
41 #include <list>
42 #include <inttypes.h>
43
44 #include "base/misc.hh"
45 #include "base/statistics.hh"
46 #include "base/trace.hh"
47 #include "mem/mem_object.hh"
48 #include "mem/packet.hh"
49 #include "mem/port.hh"
50 #include "mem/request.hh"
51 #include "sim/eventq.hh"
52
53 /**
54 * Reasons for Caches to be Blocked.
55 */
56 enum BlockedCause{
57 Blocked_NoMSHRs,
58 Blocked_NoTargets,
59 Blocked_NoWBBuffers,
60 Blocked_Coherence,
61 Blocked_Copy,
62 NUM_BLOCKED_CAUSES
63 };
64
65 /**
66 * Reasons for cache to request a bus.
67 */
68 enum RequestCause{
69 Request_MSHR,
70 Request_WB,
71 Request_Coherence,
72 Request_PF
73 };
74
75 class MSHR;
76 /**
77 * A basic cache interface. Implements some common functions for speed.
78 */
79 class BaseCache : public MemObject
80 {
81 class CachePort : public Port
82 {
83 public:
84 BaseCache *cache;
85
86 CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
87
88 protected:
89 virtual bool recvTiming(Packet *pkt);
90
91 virtual Tick recvAtomic(Packet *pkt);
92
93 virtual void recvFunctional(Packet *pkt);
94
95 virtual void recvStatusChange(Status status);
96
97 virtual void getDeviceAddressRanges(AddrRangeList &resp,
98 AddrRangeList &snoop);
99
100 virtual int deviceBlockSize();
101
102 virtual void recvRetry();
103
104 public:
105 void setBlocked();
106
107 void clearBlocked();
108
109 bool blocked;
110
111 bool mustSendRetry;
112
113 bool isCpuSide;
114
115 bool waitingOnRetry;
116
117 std::list<Packet *> drainList;
118
119 Packet *cshrRetry;
120 };
121
122 struct CacheEvent : public Event
123 {
124 CachePort *cachePort;
125 Packet *pkt;
126
127 CacheEvent(CachePort *_cachePort);
128 CacheEvent(CachePort *_cachePort, Packet *_pkt);
129 void process();
130 const char *description();
131 };
132
133 protected:
134 CachePort *cpuSidePort;
135 CachePort *memSidePort;
136
137 bool snoopRangesSent;
138
139 public:
140 virtual Port *getPort(const std::string &if_name, int idx = -1);
141
142 private:
143 //To be defined in cache_impl.hh not in base class
144 virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
145 {
146 fatal("No implementation");
147 }
148
149 virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide)
150 {
151 fatal("No implementation");
152 }
153
154 virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide)
155 {
156 fatal("No implementation");
157 }
158
159 void recvStatusChange(Port::Status status, bool isCpuSide)
160 {
161 if (status == Port::RangeChange){
162 if (!isCpuSide) {
163 cpuSidePort->sendStatusChange(Port::RangeChange);
164 if (!snoopRangesSent) {
165 snoopRangesSent = true;
166 memSidePort->sendStatusChange(Port::RangeChange);
167 }
168 }
169 else {
170 memSidePort->sendStatusChange(Port::RangeChange);
171 }
172 }
173 }
174
175 virtual Packet *getPacket()
176 {
177 fatal("No implementation");
178 }
179
180 virtual Packet *getCoherencePacket()
181 {
182 fatal("No implementation");
183 }
184
185 virtual void sendResult(Packet* &pkt, MSHR* mshr, bool success)
186 {
187
188 fatal("No implementation");
189 }
190
191 /**
192 * Bit vector of the blocking reasons for the access path.
193 * @sa #BlockedCause
194 */
195 uint8_t blocked;
196
197 /**
198 * Bit vector for the blocking reasons for the snoop path.
199 * @sa #BlockedCause
200 */
201 uint8_t blockedSnoop;
202
203 /**
204 * Bit vector for the outstanding requests for the master interface.
205 */
206 uint8_t masterRequests;
207
208 /**
209 * Bit vector for the outstanding requests for the slave interface.
210 */
211 uint8_t slaveRequests;
212
213 protected:
214
215 /** Stores time the cache blocked for statistics. */
216 Tick blockedCycle;
217
218 /** Block size of this cache */
219 const int blkSize;
220
221 /** The number of misses to trigger an exit event. */
222 Counter missCount;
223
224 public:
225 // Statistics
226 /**
227 * @addtogroup CacheStatistics
228 * @{
229 */
230
231 /** Number of hits per thread for each type of command. @sa Packet::Command */
232 Stats::Vector<> hits[NUM_MEM_CMDS];
233 /** Number of hits for demand accesses. */
234 Stats::Formula demandHits;
235 /** Number of hit for all accesses. */
236 Stats::Formula overallHits;
237
238 /** Number of misses per thread for each type of command. @sa Packet::Command */
239 Stats::Vector<> misses[NUM_MEM_CMDS];
240 /** Number of misses for demand accesses. */
241 Stats::Formula demandMisses;
242 /** Number of misses for all accesses. */
243 Stats::Formula overallMisses;
244
245 /**
246 * Total number of cycles per thread/command spent waiting for a miss.
247 * Used to calculate the average miss latency.
248 */
249 Stats::Vector<> missLatency[NUM_MEM_CMDS];
250 /** Total number of cycles spent waiting for demand misses. */
251 Stats::Formula demandMissLatency;
252 /** Total number of cycles spent waiting for all misses. */
253 Stats::Formula overallMissLatency;
254
255 /** The number of accesses per command and thread. */
256 Stats::Formula accesses[NUM_MEM_CMDS];
257 /** The number of demand accesses. */
258 Stats::Formula demandAccesses;
259 /** The number of overall accesses. */
260 Stats::Formula overallAccesses;
261
262 /** The miss rate per command and thread. */
263 Stats::Formula missRate[NUM_MEM_CMDS];
264 /** The miss rate of all demand accesses. */
265 Stats::Formula demandMissRate;
266 /** The miss rate for all accesses. */
267 Stats::Formula overallMissRate;
268
269 /** The average miss latency per command and thread. */
270 Stats::Formula avgMissLatency[NUM_MEM_CMDS];
271 /** The average miss latency for demand misses. */
272 Stats::Formula demandAvgMissLatency;
273 /** The average miss latency for all misses. */
274 Stats::Formula overallAvgMissLatency;
275
276 /** The total number of cycles blocked for each blocked cause. */
277 Stats::Vector<> blocked_cycles;
278 /** The number of times this cache blocked for each blocked cause. */
279 Stats::Vector<> blocked_causes;
280
281 /** The average number of cycles blocked for each blocked cause. */
282 Stats::Formula avg_blocked;
283
284 /** The number of fast writes (WH64) performed. */
285 Stats::Scalar<> fastWrites;
286
287 /** The number of cache copies performed. */
288 Stats::Scalar<> cacheCopies;
289
290 /**
291 * @}
292 */
293
294 /**
295 * Register stats for this object.
296 */
297 virtual void regStats();
298
299 public:
300
301 class Params
302 {
303 public:
304 /** List of address ranges of this cache. */
305 std::vector<Range<Addr> > addrRange;
306 /** The hit latency for this cache. */
307 int hitLatency;
308 /** The block size of this cache. */
309 int blkSize;
310 /**
311 * The maximum number of misses this cache should handle before
312 * ending the simulation.
313 */
314 Counter maxMisses;
315
316 /**
317 * Construct an instance of this parameter class.
318 */
319 Params(std::vector<Range<Addr> > addr_range,
320 int hit_latency, int _blkSize, Counter max_misses)
321 : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
322 maxMisses(max_misses)
323 {
324 }
325 };
326
327 /**
328 * Create and initialize a basic cache object.
329 * @param name The name of this cache.
330 * @param hier_params Pointer to the HierParams object for this hierarchy
331 * of this cache.
332 * @param params The parameter object for this BaseCache.
333 */
334 BaseCache(const std::string &name, Params &params)
335 : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
336 slaveRequests(0), blkSize(params.blkSize),
337 missCount(params.maxMisses)
338 {
339 //Start ports at null if more than one is created we should panic
340 cpuSidePort = NULL;
341 memSidePort = NULL;
342 snoopRangesSent = false;
343 }
344
345 virtual void init();
346
347 /**
348 * Query block size of a cache.
349 * @return The block size
350 */
351 int getBlockSize() const
352 {
353 return blkSize;
354 }
355
356 /**
357 * Returns true if the cache is blocked for accesses.
358 */
359 bool isBlocked()
360 {
361 return blocked != 0;
362 }
363
364 /**
365 * Returns true if the cache is blocked for snoops.
366 */
367 bool isBlockedForSnoop()
368 {
369 return blockedSnoop != 0;
370 }
371
372 /**
373 * Marks the access path of the cache as blocked for the given cause. This
374 * also sets the blocked flag in the slave interface.
375 * @param cause The reason for the cache blocking.
376 */
377 void setBlocked(BlockedCause cause)
378 {
379 uint8_t flag = 1 << cause;
380 if (blocked == 0) {
381 blocked_causes[cause]++;
382 blockedCycle = curTick;
383 }
384 int old_state = blocked;
385 if (!(blocked & flag)) {
386 //Wasn't already blocked for this cause
387 blocked |= flag;
388 DPRINTF(Cache,"Blocking for cause %s\n", cause);
389 if (!old_state)
390 cpuSidePort->setBlocked();
391 }
392 }
393
394 /**
395 * Marks the snoop path of the cache as blocked for the given cause. This
396 * also sets the blocked flag in the master interface.
397 * @param cause The reason to block the snoop path.
398 */
399 void setBlockedForSnoop(BlockedCause cause)
400 {
401 uint8_t flag = 1 << cause;
402 uint8_t old_state = blockedSnoop;
403 if (!(blockedSnoop & flag)) {
404 //Wasn't already blocked for this cause
405 blockedSnoop |= flag;
406 if (!old_state)
407 memSidePort->setBlocked();
408 }
409 }
410
411 /**
412 * Marks the cache as unblocked for the given cause. This also clears the
413 * blocked flags in the appropriate interfaces.
414 * @param cause The newly unblocked cause.
415 * @warning Calling this function can cause a blocked request on the bus to
416 * access the cache. The cache must be in a state to handle that request.
417 */
418 void clearBlocked(BlockedCause cause)
419 {
420 uint8_t flag = 1 << cause;
421 DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
422 cause, blocked);
423 if (blocked & flag)
424 {
425 blocked &= ~flag;
426 if (!isBlocked()) {
427 blocked_cycles[cause] += curTick - blockedCycle;
428 DPRINTF(Cache,"Unblocking from all causes\n");
429 cpuSidePort->clearBlocked();
430 }
431 }
432 if (blockedSnoop & flag)
433 {
434 blockedSnoop &= ~flag;
435 if (!isBlockedForSnoop()) {
436 memSidePort->clearBlocked();
437 }
438 }
439 }
440
441 /**
442 * True if the master bus should be requested.
443 * @return True if there are outstanding requests for the master bus.
444 */
445 bool doMasterRequest()
446 {
447 return masterRequests != 0;
448 }
449
450 /**
451 * Request the master bus for the given cause and time.
452 * @param cause The reason for the request.
453 * @param time The time to make the request.
454 */
455 void setMasterRequest(RequestCause cause, Tick time)
456 {
457 if (!doMasterRequest() && !memSidePort->waitingOnRetry)
458 {
459 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
460 reqCpu->schedule(time);
461 }
462 uint8_t flag = 1<<cause;
463 masterRequests |= flag;
464 }
465
466 /**
467 * Clear the master bus request for the given cause.
468 * @param cause The request reason to clear.
469 */
470 void clearMasterRequest(RequestCause cause)
471 {
472 uint8_t flag = 1<<cause;
473 masterRequests &= ~flag;
474 }
475
476 /**
477 * Return true if the slave bus should be requested.
478 * @return True if there are outstanding requests for the slave bus.
479 */
480 bool doSlaveRequest()
481 {
482 return slaveRequests != 0;
483 }
484
485 /**
486 * Request the slave bus for the given reason and time.
487 * @param cause The reason for the request.
488 * @param time The time to make the request.
489 */
490 void setSlaveRequest(RequestCause cause, Tick time)
491 {
492 uint8_t flag = 1<<cause;
493 slaveRequests |= flag;
494 assert("Implement\n" && 0);
495 // si->pktuest(time);
496 }
497
498 /**
499 * Clear the slave bus request for the given reason.
500 * @param cause The request reason to clear.
501 */
502 void clearSlaveRequest(RequestCause cause)
503 {
504 uint8_t flag = 1<<cause;
505 slaveRequests &= ~flag;
506 }
507
508 /**
509 * Send a response to the slave interface.
510 * @param pkt The request being responded to.
511 * @param time The time the response is ready.
512 */
513 void respond(Packet *pkt, Tick time)
514 {
515 if (pkt->needsResponse()) {
516 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
517 reqCpu->schedule(time);
518 }
519 else {
520 if (pkt->cmd == Packet::Writeback) delete pkt->req;
521 delete pkt;
522 }
523 }
524
525 /**
526 * Send a reponse to the slave interface and calculate miss latency.
527 * @param pkt The request to respond to.
528 * @param time The time the response is ready.
529 */
530 void respondToMiss(Packet *pkt, Tick time)
531 {
532 if (!pkt->req->isUncacheable()) {
533 missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += time - pkt->time;
534 }
535 if (pkt->needsResponse()) {
536 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
537 reqCpu->schedule(time);
538 }
539 else {
540 if (pkt->cmd == Packet::Writeback) delete pkt->req;
541 delete pkt;
542 }
543 }
544
545 /**
546 * Suppliess the data if cache to cache transfers are enabled.
547 * @param pkt The bus transaction to fulfill.
548 */
549 void respondToSnoop(Packet *pkt, Tick time)
550 {
551 assert (pkt->needsResponse());
552 CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
553 reqMem->schedule(time);
554 }
555
556 /**
557 * Notification from master interface that a address range changed. Nothing
558 * to do for a cache.
559 */
560 void rangeChange() {}
561
562 void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
563 {
564 if (isCpuSide)
565 {
566 AddrRangeList dummy;
567 memSidePort->getPeerAddressRanges(resp, dummy);
568 }
569 else
570 {
571 //This is where snoops get updated
572 AddrRangeList dummy;
573 cpuSidePort->getPeerAddressRanges(dummy, snoop);
574 return;
575 }
576 }
577 };
578
579 #endif //__BASE_CACHE_HH__