Only respond if the pkt needs a response.
[gem5.git] / src / mem / cache / base_cache.hh
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Declares a basic cache interface BaseCache.
34 */
35
36 #ifndef __BASE_CACHE_HH__
37 #define __BASE_CACHE_HH__
38
39 #include <vector>
40 #include <string>
41 #include <list>
42 #include <inttypes.h>
43
44 #include "base/misc.hh"
45 #include "base/statistics.hh"
46 #include "base/trace.hh"
47 #include "mem/mem_object.hh"
48 #include "mem/packet.hh"
49 #include "mem/port.hh"
50 #include "mem/request.hh"
51 #include "sim/eventq.hh"
52
53 /**
54 * Reasons for Caches to be Blocked.
55 */
56 enum BlockedCause{
57 Blocked_NoMSHRs,
58 Blocked_NoTargets,
59 Blocked_NoWBBuffers,
60 Blocked_Coherence,
61 Blocked_Copy,
62 NUM_BLOCKED_CAUSES
63 };
64
65 /**
66 * Reasons for cache to request a bus.
67 */
68 enum RequestCause{
69 Request_MSHR,
70 Request_WB,
71 Request_Coherence,
72 Request_PF
73 };
74
75 /**
76 * A basic cache interface. Implements some common functions for speed.
77 */
78 class BaseCache : public MemObject
79 {
80 class CachePort : public Port
81 {
82 public:
83 BaseCache *cache;
84
85 CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
86
87 protected:
88 virtual bool recvTiming(Packet *pkt);
89
90 virtual Tick recvAtomic(Packet *pkt);
91
92 virtual void recvFunctional(Packet *pkt);
93
94 virtual void recvStatusChange(Status status);
95
96 virtual void getDeviceAddressRanges(AddrRangeList &resp,
97 AddrRangeList &snoop);
98
99 virtual int deviceBlockSize();
100
101 virtual void recvRetry();
102
103 public:
104 void setBlocked();
105
106 void clearBlocked();
107
108 bool blocked;
109
110 bool mustSendRetry;
111
112 bool isCpuSide;
113
114 std::list<Packet *> drainList;
115 };
116
117 struct CacheEvent : public Event
118 {
119 CachePort *cachePort;
120 Packet *pkt;
121
122 CacheEvent(CachePort *_cachePort);
123 CacheEvent(CachePort *_cachePort, Packet *_pkt);
124 void process();
125 const char *description();
126 };
127
128 protected:
129 CachePort *cpuSidePort;
130 CachePort *memSidePort;
131
132 bool snoopRangesSent;
133
134 public:
135 virtual Port *getPort(const std::string &if_name, int idx = -1);
136
137 private:
138 //To be defined in cache_impl.hh not in base class
139 virtual bool doTimingAccess(Packet *pkt, CachePort *cachePort, bool isCpuSide)
140 {
141 fatal("No implementation");
142 }
143
144 virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide)
145 {
146 fatal("No implementation");
147 }
148
149 virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide)
150 {
151 fatal("No implementation");
152 }
153
154 void recvStatusChange(Port::Status status, bool isCpuSide)
155 {
156 if (status == Port::RangeChange){
157 if (!isCpuSide) {
158 cpuSidePort->sendStatusChange(Port::RangeChange);
159 if (topLevelCache && !snoopRangesSent) {
160 snoopRangesSent = true;
161 memSidePort->sendStatusChange(Port::RangeChange);
162 }
163 }
164 else {
165 memSidePort->sendStatusChange(Port::RangeChange);
166 }
167 }
168 }
169
170 virtual Packet *getPacket()
171 {
172 fatal("No implementation");
173 }
174
175 virtual Packet *getCoherencePacket()
176 {
177 fatal("No implementation");
178 }
179
180 virtual void sendResult(Packet* &pkt, bool success)
181 {
182
183 fatal("No implementation");
184 }
185
186 /**
187 * Bit vector of the blocking reasons for the access path.
188 * @sa #BlockedCause
189 */
190 uint8_t blocked;
191
192 /**
193 * Bit vector for the blocking reasons for the snoop path.
194 * @sa #BlockedCause
195 */
196 uint8_t blockedSnoop;
197
198 /**
199 * Bit vector for the outstanding requests for the master interface.
200 */
201 uint8_t masterRequests;
202
203 /**
204 * Bit vector for the outstanding requests for the slave interface.
205 */
206 uint8_t slaveRequests;
207
208 protected:
209
210 /** True if this cache is connected to the CPU. */
211 bool topLevelCache;
212
213
214 /** Stores time the cache blocked for statistics. */
215 Tick blockedCycle;
216
217 /** Block size of this cache */
218 const int blkSize;
219
220 /** The number of misses to trigger an exit event. */
221 Counter missCount;
222
223 public:
224 // Statistics
225 /**
226 * @addtogroup CacheStatistics
227 * @{
228 */
229
230 /** Number of hits per thread for each type of command. @sa Packet::Command */
231 Stats::Vector<> hits[NUM_MEM_CMDS];
232 /** Number of hits for demand accesses. */
233 Stats::Formula demandHits;
234 /** Number of hit for all accesses. */
235 Stats::Formula overallHits;
236
237 /** Number of misses per thread for each type of command. @sa Packet::Command */
238 Stats::Vector<> misses[NUM_MEM_CMDS];
239 /** Number of misses for demand accesses. */
240 Stats::Formula demandMisses;
241 /** Number of misses for all accesses. */
242 Stats::Formula overallMisses;
243
244 /**
245 * Total number of cycles per thread/command spent waiting for a miss.
246 * Used to calculate the average miss latency.
247 */
248 Stats::Vector<> missLatency[NUM_MEM_CMDS];
249 /** Total number of cycles spent waiting for demand misses. */
250 Stats::Formula demandMissLatency;
251 /** Total number of cycles spent waiting for all misses. */
252 Stats::Formula overallMissLatency;
253
254 /** The number of accesses per command and thread. */
255 Stats::Formula accesses[NUM_MEM_CMDS];
256 /** The number of demand accesses. */
257 Stats::Formula demandAccesses;
258 /** The number of overall accesses. */
259 Stats::Formula overallAccesses;
260
261 /** The miss rate per command and thread. */
262 Stats::Formula missRate[NUM_MEM_CMDS];
263 /** The miss rate of all demand accesses. */
264 Stats::Formula demandMissRate;
265 /** The miss rate for all accesses. */
266 Stats::Formula overallMissRate;
267
268 /** The average miss latency per command and thread. */
269 Stats::Formula avgMissLatency[NUM_MEM_CMDS];
270 /** The average miss latency for demand misses. */
271 Stats::Formula demandAvgMissLatency;
272 /** The average miss latency for all misses. */
273 Stats::Formula overallAvgMissLatency;
274
275 /** The total number of cycles blocked for each blocked cause. */
276 Stats::Vector<> blocked_cycles;
277 /** The number of times this cache blocked for each blocked cause. */
278 Stats::Vector<> blocked_causes;
279
280 /** The average number of cycles blocked for each blocked cause. */
281 Stats::Formula avg_blocked;
282
283 /** The number of fast writes (WH64) performed. */
284 Stats::Scalar<> fastWrites;
285
286 /** The number of cache copies performed. */
287 Stats::Scalar<> cacheCopies;
288
289 /**
290 * @}
291 */
292
293 /**
294 * Register stats for this object.
295 */
296 virtual void regStats();
297
298 public:
299
300 class Params
301 {
302 public:
303 /** List of address ranges of this cache. */
304 std::vector<Range<Addr> > addrRange;
305 /** The hit latency for this cache. */
306 int hitLatency;
307 /** The block size of this cache. */
308 int blkSize;
309 /**
310 * The maximum number of misses this cache should handle before
311 * ending the simulation.
312 */
313 Counter maxMisses;
314
315 /**
316 * Construct an instance of this parameter class.
317 */
318 Params(std::vector<Range<Addr> > addr_range,
319 int hit_latency, int _blkSize, Counter max_misses)
320 : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
321 maxMisses(max_misses)
322 {
323 }
324 };
325
326 /**
327 * Create and initialize a basic cache object.
328 * @param name The name of this cache.
329 * @param hier_params Pointer to the HierParams object for this hierarchy
330 * of this cache.
331 * @param params The parameter object for this BaseCache.
332 */
333 BaseCache(const std::string &name, Params &params)
334 : MemObject(name), blocked(0), blockedSnoop(0), masterRequests(0),
335 slaveRequests(0), topLevelCache(false), blkSize(params.blkSize),
336 missCount(params.maxMisses)
337 {
338 //Start ports at null if more than one is created we should panic
339 cpuSidePort = NULL;
340 memSidePort = NULL;
341 snoopRangesSent = false;
342 }
343
344 virtual void init();
345
346 /**
347 * Query block size of a cache.
348 * @return The block size
349 */
350 int getBlockSize() const
351 {
352 return blkSize;
353 }
354
355 /**
356 * Returns true if this cache is connect to the CPU.
357 * @return True if this is a L1 cache.
358 */
359 bool isTopLevel()
360 {
361 return topLevelCache;
362 }
363
364 /**
365 * Returns true if the cache is blocked for accesses.
366 */
367 bool isBlocked()
368 {
369 return blocked != 0;
370 }
371
372 /**
373 * Returns true if the cache is blocked for snoops.
374 */
375 bool isBlockedForSnoop()
376 {
377 return blockedSnoop != 0;
378 }
379
380 /**
381 * Marks the access path of the cache as blocked for the given cause. This
382 * also sets the blocked flag in the slave interface.
383 * @param cause The reason for the cache blocking.
384 */
385 void setBlocked(BlockedCause cause)
386 {
387 uint8_t flag = 1 << cause;
388 if (blocked == 0) {
389 blocked_causes[cause]++;
390 blockedCycle = curTick;
391 }
392 if (!(blocked & flag)) {
393 //Wasn't already blocked for this cause
394 blocked |= flag;
395 DPRINTF(Cache,"Blocking for cause %s\n", cause);
396 cpuSidePort->setBlocked();
397 }
398 }
399
400 /**
401 * Marks the snoop path of the cache as blocked for the given cause. This
402 * also sets the blocked flag in the master interface.
403 * @param cause The reason to block the snoop path.
404 */
405 void setBlockedForSnoop(BlockedCause cause)
406 {
407 uint8_t flag = 1 << cause;
408 if (!(blocked & flag)) {
409 //Wasn't already blocked for this cause
410 blockedSnoop |= flag;
411 memSidePort->setBlocked();
412 }
413 }
414
415 /**
416 * Marks the cache as unblocked for the given cause. This also clears the
417 * blocked flags in the appropriate interfaces.
418 * @param cause The newly unblocked cause.
419 * @warning Calling this function can cause a blocked request on the bus to
420 * access the cache. The cache must be in a state to handle that request.
421 */
422 void clearBlocked(BlockedCause cause)
423 {
424 uint8_t flag = 1 << cause;
425 DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
426 cause, blocked);
427 if (blocked & flag)
428 {
429 blocked &= ~flag;
430 if (!isBlocked()) {
431 blocked_cycles[cause] += curTick - blockedCycle;
432 DPRINTF(Cache,"Unblocking from all causes\n");
433 cpuSidePort->clearBlocked();
434 }
435 }
436 if (blockedSnoop & flag)
437 {
438 blockedSnoop &= ~flag;
439 if (!isBlockedForSnoop()) {
440 memSidePort->clearBlocked();
441 }
442 }
443 }
444
445 /**
446 * True if the master bus should be requested.
447 * @return True if there are outstanding requests for the master bus.
448 */
449 bool doMasterRequest()
450 {
451 return masterRequests != 0;
452 }
453
454 /**
455 * Request the master bus for the given cause and time.
456 * @param cause The reason for the request.
457 * @param time The time to make the request.
458 */
459 void setMasterRequest(RequestCause cause, Tick time)
460 {
461 if (!doMasterRequest())
462 {
463 BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(memSidePort);
464 reqCpu->schedule(time);
465 }
466 uint8_t flag = 1<<cause;
467 masterRequests |= flag;
468 }
469
470 /**
471 * Clear the master bus request for the given cause.
472 * @param cause The request reason to clear.
473 */
474 void clearMasterRequest(RequestCause cause)
475 {
476 uint8_t flag = 1<<cause;
477 masterRequests &= ~flag;
478 }
479
480 /**
481 * Return true if the slave bus should be requested.
482 * @return True if there are outstanding requests for the slave bus.
483 */
484 bool doSlaveRequest()
485 {
486 return slaveRequests != 0;
487 }
488
489 /**
490 * Request the slave bus for the given reason and time.
491 * @param cause The reason for the request.
492 * @param time The time to make the request.
493 */
494 void setSlaveRequest(RequestCause cause, Tick time)
495 {
496 uint8_t flag = 1<<cause;
497 slaveRequests |= flag;
498 assert("Implement\n" && 0);
499 // si->pktuest(time);
500 }
501
502 /**
503 * Clear the slave bus request for the given reason.
504 * @param cause The request reason to clear.
505 */
506 void clearSlaveRequest(RequestCause cause)
507 {
508 uint8_t flag = 1<<cause;
509 slaveRequests &= ~flag;
510 }
511
512 /**
513 * Send a response to the slave interface.
514 * @param pkt The request being responded to.
515 * @param time The time the response is ready.
516 */
517 void respond(Packet *pkt, Tick time)
518 {
519 if (pkt->needsResponse()) {
520 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
521 reqCpu->schedule(time);
522 }
523 }
524
525 /**
526 * Send a reponse to the slave interface and calculate miss latency.
527 * @param pkt The request to respond to.
528 * @param time The time the response is ready.
529 */
530 void respondToMiss(Packet *pkt, Tick time)
531 {
532 if (!pkt->req->isUncacheable()) {
533 missLatency[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] += time - pkt->time;
534 }
535 if (pkt->needsResponse()) {
536 CacheEvent *reqCpu = new CacheEvent(cpuSidePort, pkt);
537 reqCpu->schedule(time);
538 }
539 }
540
541 /**
542 * Suppliess the data if cache to cache transfers are enabled.
543 * @param pkt The bus transaction to fulfill.
544 */
545 void respondToSnoop(Packet *pkt, Tick time)
546 {
547 // assert("Implement\n" && 0);
548 // mi->respond(pkt,curTick + hitLatency);
549 assert (pkt->needsResponse());
550 CacheEvent *reqMem = new CacheEvent(memSidePort, pkt);
551 reqMem->schedule(time);
552 }
553
554 /**
555 * Notification from master interface that a address range changed. Nothing
556 * to do for a cache.
557 */
558 void rangeChange() {}
559
560 void getAddressRanges(AddrRangeList &resp, AddrRangeList &snoop, bool isCpuSide)
561 {
562 if (isCpuSide)
563 {
564 AddrRangeList dummy;
565 memSidePort->getPeerAddressRanges(resp, dummy);
566 }
567 else
568 {
569 //This is where snoops get updated
570 AddrRangeList dummy;
571 if (!topLevelCache)
572 {
573 cpuSidePort->getPeerAddressRanges(dummy, snoop);
574 }
575 else
576 {
577 snoop.push_back(RangeSize(0,-1));
578 }
579
580 return;
581 }
582 }
583 };
584
585 #endif //__BASE_CACHE_HH__