mem-ruby: Cleanup replacement_data usage
[gem5.git] / src / mem / ruby / structures / CacheMemory.cc
1 /*
2 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
3 * Copyright (c) 2013 Advanced Micro Devices, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "mem/ruby/structures/CacheMemory.hh"
31
32 #include "base/intmath.hh"
33 #include "base/logging.hh"
34 #include "debug/RubyCache.hh"
35 #include "debug/RubyCacheTrace.hh"
36 #include "debug/RubyResourceStalls.hh"
37 #include "debug/RubyStats.hh"
38 #include "mem/cache/replacement_policies/weighted_lru_rp.hh"
39 #include "mem/ruby/protocol/AccessPermission.hh"
40 #include "mem/ruby/system/RubySystem.hh"
41
42 using namespace std;
43
44 ostream&
45 operator<<(ostream& out, const CacheMemory& obj)
46 {
47 obj.print(out);
48 out << flush;
49 return out;
50 }
51
52 CacheMemory *
53 RubyCacheParams::create()
54 {
55 return new CacheMemory(this);
56 }
57
58 CacheMemory::CacheMemory(const Params *p)
59 : SimObject(p),
60 dataArray(p->dataArrayBanks, p->dataAccessLatency,
61 p->start_index_bit, p->ruby_system),
62 tagArray(p->tagArrayBanks, p->tagAccessLatency,
63 p->start_index_bit, p->ruby_system)
64 {
65 m_cache_size = p->size;
66 m_cache_assoc = p->assoc;
67 m_replacementPolicy_ptr = p->replacement_policy;
68 m_start_index_bit = p->start_index_bit;
69 m_is_instruction_only_cache = p->is_icache;
70 m_resource_stalls = p->resourceStalls;
71 m_block_size = p->block_size; // may be 0 at this point. Updated in init()
72 m_use_occupancy = dynamic_cast<WeightedLRUPolicy*>(
73 m_replacementPolicy_ptr) ? true : false;
74 }
75
76 void
77 CacheMemory::init()
78 {
79 if (m_block_size == 0) {
80 m_block_size = RubySystem::getBlockSizeBytes();
81 }
82 m_cache_num_sets = (m_cache_size / m_cache_assoc) / m_block_size;
83 assert(m_cache_num_sets > 1);
84 m_cache_num_set_bits = floorLog2(m_cache_num_sets);
85 assert(m_cache_num_set_bits > 0);
86
87 m_cache.resize(m_cache_num_sets,
88 std::vector<AbstractCacheEntry*>(m_cache_assoc, nullptr));
89 replacement_data.resize(m_cache_num_sets,
90 std::vector<ReplData>(m_cache_assoc, nullptr));
91 // instantiate all the replacement_data here
92 for (int i = 0; i < m_cache_num_sets; i++) {
93 for ( int j = 0; j < m_cache_assoc; j++) {
94 replacement_data[i][j] =
95 m_replacementPolicy_ptr->instantiateEntry();
96 }
97 }
98 }
99
100 CacheMemory::~CacheMemory()
101 {
102 if (m_replacementPolicy_ptr)
103 delete m_replacementPolicy_ptr;
104 for (int i = 0; i < m_cache_num_sets; i++) {
105 for (int j = 0; j < m_cache_assoc; j++) {
106 delete m_cache[i][j];
107 }
108 }
109 }
110
111 // convert a Address to its location in the cache
112 int64_t
113 CacheMemory::addressToCacheSet(Addr address) const
114 {
115 assert(address == makeLineAddress(address));
116 return bitSelect(address, m_start_index_bit,
117 m_start_index_bit + m_cache_num_set_bits - 1);
118 }
119
120 // Given a cache index: returns the index of the tag in a set.
121 // returns -1 if the tag is not found.
122 int
123 CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
124 {
125 assert(tag == makeLineAddress(tag));
126 // search the set for the tags
127 auto it = m_tag_index.find(tag);
128 if (it != m_tag_index.end())
129 if (m_cache[cacheSet][it->second]->m_Permission !=
130 AccessPermission_NotPresent)
131 return it->second;
132 return -1; // Not found
133 }
134
135 // Given a cache index: returns the index of the tag in a set.
136 // returns -1 if the tag is not found.
137 int
138 CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
139 Addr tag) const
140 {
141 assert(tag == makeLineAddress(tag));
142 // search the set for the tags
143 auto it = m_tag_index.find(tag);
144 if (it != m_tag_index.end())
145 return it->second;
146 return -1; // Not found
147 }
148
149 // Given an unique cache block identifier (idx): return the valid address
150 // stored by the cache block. If the block is invalid/notpresent, the
151 // function returns the 0 address
152 Addr
153 CacheMemory::getAddressAtIdx(int idx) const
154 {
155 Addr tmp(0);
156
157 int set = idx / m_cache_assoc;
158 assert(set < m_cache_num_sets);
159
160 int way = idx - set * m_cache_assoc;
161 assert (way < m_cache_assoc);
162
163 AbstractCacheEntry* entry = m_cache[set][way];
164 if (entry == NULL ||
165 entry->m_Permission == AccessPermission_Invalid ||
166 entry->m_Permission == AccessPermission_NotPresent) {
167 return tmp;
168 }
169 return entry->m_Address;
170 }
171
172 bool
173 CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
174 DataBlock*& data_ptr)
175 {
176 assert(address == makeLineAddress(address));
177 DPRINTF(RubyCache, "address: %#x\n", address);
178 int64_t cacheSet = addressToCacheSet(address);
179 int loc = findTagInSet(cacheSet, address);
180 if (loc != -1) {
181 // Do we even have a tag match?
182 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
183 m_replacementPolicy_ptr->touch(entry->replacementData);
184 m_cache[cacheSet][loc]->setLastAccess(curTick());
185 data_ptr = &(entry->getDataBlk());
186
187 if (entry->m_Permission == AccessPermission_Read_Write) {
188 return true;
189 }
190 if ((entry->m_Permission == AccessPermission_Read_Only) &&
191 (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
192 return true;
193 }
194 // The line must not be accessible
195 }
196 data_ptr = NULL;
197 return false;
198 }
199
200 bool
201 CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
202 DataBlock*& data_ptr)
203 {
204 assert(address == makeLineAddress(address));
205 DPRINTF(RubyCache, "address: %#x\n", address);
206 int64_t cacheSet = addressToCacheSet(address);
207 int loc = findTagInSet(cacheSet, address);
208
209 if (loc != -1) {
210 // Do we even have a tag match?
211 AbstractCacheEntry* entry = m_cache[cacheSet][loc];
212 m_replacementPolicy_ptr->touch(entry->replacementData);
213 m_cache[cacheSet][loc]->setLastAccess(curTick());
214 data_ptr = &(entry->getDataBlk());
215
216 return m_cache[cacheSet][loc]->m_Permission !=
217 AccessPermission_NotPresent;
218 }
219
220 data_ptr = NULL;
221 return false;
222 }
223
224 // tests to see if an address is present in the cache
225 bool
226 CacheMemory::isTagPresent(Addr address) const
227 {
228 assert(address == makeLineAddress(address));
229 int64_t cacheSet = addressToCacheSet(address);
230 int loc = findTagInSet(cacheSet, address);
231
232 if (loc == -1) {
233 // We didn't find the tag
234 DPRINTF(RubyCache, "No tag match for address: %#x\n", address);
235 return false;
236 }
237 DPRINTF(RubyCache, "address: %#x found\n", address);
238 return true;
239 }
240
241 // Returns true if there is:
242 // a) a tag match on this address or there is
243 // b) an unused line in the same cache "way"
244 bool
245 CacheMemory::cacheAvail(Addr address) const
246 {
247 assert(address == makeLineAddress(address));
248
249 int64_t cacheSet = addressToCacheSet(address);
250
251 for (int i = 0; i < m_cache_assoc; i++) {
252 AbstractCacheEntry* entry = m_cache[cacheSet][i];
253 if (entry != NULL) {
254 if (entry->m_Address == address ||
255 entry->m_Permission == AccessPermission_NotPresent) {
256 // Already in the cache or we found an empty entry
257 return true;
258 }
259 } else {
260 return true;
261 }
262 }
263 return false;
264 }
265
266 AbstractCacheEntry*
267 CacheMemory::allocate(Addr address, AbstractCacheEntry *entry)
268 {
269 assert(address == makeLineAddress(address));
270 assert(!isTagPresent(address));
271 assert(cacheAvail(address));
272 DPRINTF(RubyCache, "address: %#x\n", address);
273
274 // Find the first open slot
275 int64_t cacheSet = addressToCacheSet(address);
276 std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
277 for (int i = 0; i < m_cache_assoc; i++) {
278 if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
279 if (set[i] && (set[i] != entry)) {
280 warn_once("This protocol contains a cache entry handling bug: "
281 "Entries in the cache should never be NotPresent! If\n"
282 "this entry (%#x) is not tracked elsewhere, it will memory "
283 "leak here. Fix your protocol to eliminate these!",
284 address);
285 }
286 set[i] = entry; // Init entry
287 set[i]->m_Address = address;
288 set[i]->m_Permission = AccessPermission_Invalid;
289 DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
290 address);
291 set[i]->m_locked = -1;
292 m_tag_index[address] = i;
293 set[i]->setPosition(cacheSet, i);
294 set[i]->replacementData = replacement_data[cacheSet][i];
295 set[i]->setLastAccess(curTick());
296
297 // Call reset function here to set initial value for different
298 // replacement policies.
299 m_replacementPolicy_ptr->reset(entry->replacementData);
300
301 return entry;
302 }
303 }
304 panic("Allocate didn't find an available entry");
305 }
306
307 void
308 CacheMemory::deallocate(Addr address)
309 {
310 DPRINTF(RubyCache, "address: %#x\n", address);
311 AbstractCacheEntry* entry = lookup(address);
312 assert(entry != nullptr);
313 m_replacementPolicy_ptr->invalidate(entry->replacementData);
314 uint32_t cache_set = entry->getSet();
315 uint32_t way = entry->getWay();
316 delete entry;
317 m_cache[cache_set][way] = NULL;
318 m_tag_index.erase(address);
319 }
320
321 // Returns with the physical address of the conflicting cache line
322 Addr
323 CacheMemory::cacheProbe(Addr address) const
324 {
325 assert(address == makeLineAddress(address));
326 assert(!cacheAvail(address));
327
328 int64_t cacheSet = addressToCacheSet(address);
329 std::vector<ReplaceableEntry*> candidates;
330 for (int i = 0; i < m_cache_assoc; i++) {
331 candidates.push_back(static_cast<ReplaceableEntry*>(
332 m_cache[cacheSet][i]));
333 }
334 return m_cache[cacheSet][m_replacementPolicy_ptr->
335 getVictim(candidates)->getWay()]->m_Address;
336 }
337
338 // looks an address up in the cache
339 AbstractCacheEntry*
340 CacheMemory::lookup(Addr address)
341 {
342 assert(address == makeLineAddress(address));
343 int64_t cacheSet = addressToCacheSet(address);
344 int loc = findTagInSet(cacheSet, address);
345 if (loc == -1) return NULL;
346 return m_cache[cacheSet][loc];
347 }
348
349 // looks an address up in the cache
350 const AbstractCacheEntry*
351 CacheMemory::lookup(Addr address) const
352 {
353 assert(address == makeLineAddress(address));
354 int64_t cacheSet = addressToCacheSet(address);
355 int loc = findTagInSet(cacheSet, address);
356 if (loc == -1) return NULL;
357 return m_cache[cacheSet][loc];
358 }
359
360 // Sets the most recently used bit for a cache block
361 void
362 CacheMemory::setMRU(Addr address)
363 {
364 AbstractCacheEntry* entry = lookup(makeLineAddress(address));
365 if (entry != nullptr) {
366 m_replacementPolicy_ptr->touch(entry->replacementData);
367 entry->setLastAccess(curTick());
368 }
369 }
370
371 void
372 CacheMemory::setMRU(AbstractCacheEntry *entry)
373 {
374 assert(entry != nullptr);
375 m_replacementPolicy_ptr->touch(entry->replacementData);
376 entry->setLastAccess(curTick());
377 }
378
379 void
380 CacheMemory::setMRU(Addr address, int occupancy)
381 {
382 AbstractCacheEntry* entry = lookup(makeLineAddress(address));
383 if (entry != nullptr) {
384 // m_use_occupancy can decide whether we are using WeightedLRU
385 // replacement policy. Depending on different replacement policies,
386 // use different touch() function.
387 if (m_use_occupancy) {
388 static_cast<WeightedLRUPolicy*>(m_replacementPolicy_ptr)->touch(
389 entry->replacementData, occupancy);
390 } else {
391 m_replacementPolicy_ptr->touch(entry->replacementData);
392 }
393 entry->setLastAccess(curTick());
394 }
395 }
396
397 int
398 CacheMemory::getReplacementWeight(int64_t set, int64_t loc)
399 {
400 assert(set < m_cache_num_sets);
401 assert(loc < m_cache_assoc);
402 int ret = 0;
403 if (m_cache[set][loc] != NULL) {
404 ret = m_cache[set][loc]->getNumValidBlocks();
405 assert(ret >= 0);
406 }
407
408 return ret;
409 }
410
411 void
412 CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
413 {
414 uint64_t warmedUpBlocks = 0;
415 uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
416 (uint64_t)m_cache_assoc;
417
418 for (int i = 0; i < m_cache_num_sets; i++) {
419 for (int j = 0; j < m_cache_assoc; j++) {
420 if (m_cache[i][j] != NULL) {
421 AccessPermission perm = m_cache[i][j]->m_Permission;
422 RubyRequestType request_type = RubyRequestType_NULL;
423 if (perm == AccessPermission_Read_Only) {
424 if (m_is_instruction_only_cache) {
425 request_type = RubyRequestType_IFETCH;
426 } else {
427 request_type = RubyRequestType_LD;
428 }
429 } else if (perm == AccessPermission_Read_Write) {
430 request_type = RubyRequestType_ST;
431 }
432
433 if (request_type != RubyRequestType_NULL) {
434 Tick lastAccessTick;
435 lastAccessTick = m_cache[i][j]->getLastAccess();
436 tr->addRecord(cntrl, m_cache[i][j]->m_Address,
437 0, request_type, lastAccessTick,
438 m_cache[i][j]->getDataBlk());
439 warmedUpBlocks++;
440 }
441 }
442 }
443 }
444
445 DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
446 "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
447 totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
448 }
449
450 void
451 CacheMemory::print(ostream& out) const
452 {
453 out << "Cache dump: " << name() << endl;
454 for (int i = 0; i < m_cache_num_sets; i++) {
455 for (int j = 0; j < m_cache_assoc; j++) {
456 if (m_cache[i][j] != NULL) {
457 out << " Index: " << i
458 << " way: " << j
459 << " entry: " << *m_cache[i][j] << endl;
460 } else {
461 out << " Index: " << i
462 << " way: " << j
463 << " entry: NULL" << endl;
464 }
465 }
466 }
467 }
468
469 void
470 CacheMemory::printData(ostream& out) const
471 {
472 out << "printData() not supported" << endl;
473 }
474
475 void
476 CacheMemory::setLocked(Addr address, int context)
477 {
478 DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context);
479 assert(address == makeLineAddress(address));
480 int64_t cacheSet = addressToCacheSet(address);
481 int loc = findTagInSet(cacheSet, address);
482 assert(loc != -1);
483 m_cache[cacheSet][loc]->setLocked(context);
484 }
485
486 void
487 CacheMemory::clearLocked(Addr address)
488 {
489 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address);
490 assert(address == makeLineAddress(address));
491 int64_t cacheSet = addressToCacheSet(address);
492 int loc = findTagInSet(cacheSet, address);
493 assert(loc != -1);
494 m_cache[cacheSet][loc]->clearLocked();
495 }
496
497 bool
498 CacheMemory::isLocked(Addr address, int context)
499 {
500 assert(address == makeLineAddress(address));
501 int64_t cacheSet = addressToCacheSet(address);
502 int loc = findTagInSet(cacheSet, address);
503 assert(loc != -1);
504 DPRINTF(RubyCache, "Testing Lock for addr: %#llx cur %d con %d\n",
505 address, m_cache[cacheSet][loc]->m_locked, context);
506 return m_cache[cacheSet][loc]->isLocked(context);
507 }
508
509 void
510 CacheMemory::regStats()
511 {
512 SimObject::regStats();
513
514 m_demand_hits
515 .name(name() + ".demand_hits")
516 .desc("Number of cache demand hits")
517 ;
518
519 m_demand_misses
520 .name(name() + ".demand_misses")
521 .desc("Number of cache demand misses")
522 ;
523
524 m_demand_accesses
525 .name(name() + ".demand_accesses")
526 .desc("Number of cache demand accesses")
527 ;
528
529 m_demand_accesses = m_demand_hits + m_demand_misses;
530
531 m_sw_prefetches
532 .name(name() + ".total_sw_prefetches")
533 .desc("Number of software prefetches")
534 .flags(Stats::nozero)
535 ;
536
537 m_hw_prefetches
538 .name(name() + ".total_hw_prefetches")
539 .desc("Number of hardware prefetches")
540 .flags(Stats::nozero)
541 ;
542
543 m_prefetches
544 .name(name() + ".total_prefetches")
545 .desc("Number of prefetches")
546 .flags(Stats::nozero)
547 ;
548
549 m_prefetches = m_sw_prefetches + m_hw_prefetches;
550
551 m_accessModeType
552 .init(RubyRequestType_NUM)
553 .name(name() + ".access_mode")
554 .flags(Stats::pdf | Stats::total)
555 ;
556 for (int i = 0; i < RubyAccessMode_NUM; i++) {
557 m_accessModeType
558 .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
559 .flags(Stats::nozero)
560 ;
561 }
562
563 numDataArrayReads
564 .name(name() + ".num_data_array_reads")
565 .desc("number of data array reads")
566 .flags(Stats::nozero)
567 ;
568
569 numDataArrayWrites
570 .name(name() + ".num_data_array_writes")
571 .desc("number of data array writes")
572 .flags(Stats::nozero)
573 ;
574
575 numTagArrayReads
576 .name(name() + ".num_tag_array_reads")
577 .desc("number of tag array reads")
578 .flags(Stats::nozero)
579 ;
580
581 numTagArrayWrites
582 .name(name() + ".num_tag_array_writes")
583 .desc("number of tag array writes")
584 .flags(Stats::nozero)
585 ;
586
587 numTagArrayStalls
588 .name(name() + ".num_tag_array_stalls")
589 .desc("number of stalls caused by tag array")
590 .flags(Stats::nozero)
591 ;
592
593 numDataArrayStalls
594 .name(name() + ".num_data_array_stalls")
595 .desc("number of stalls caused by data array")
596 .flags(Stats::nozero)
597 ;
598 }
599
600 // assumption: SLICC generated files will only call this function
601 // once **all** resources are granted
602 void
603 CacheMemory::recordRequestType(CacheRequestType requestType, Addr addr)
604 {
605 DPRINTF(RubyStats, "Recorded statistic: %s\n",
606 CacheRequestType_to_string(requestType));
607 switch(requestType) {
608 case CacheRequestType_DataArrayRead:
609 if (m_resource_stalls)
610 dataArray.reserve(addressToCacheSet(addr));
611 numDataArrayReads++;
612 return;
613 case CacheRequestType_DataArrayWrite:
614 if (m_resource_stalls)
615 dataArray.reserve(addressToCacheSet(addr));
616 numDataArrayWrites++;
617 return;
618 case CacheRequestType_TagArrayRead:
619 if (m_resource_stalls)
620 tagArray.reserve(addressToCacheSet(addr));
621 numTagArrayReads++;
622 return;
623 case CacheRequestType_TagArrayWrite:
624 if (m_resource_stalls)
625 tagArray.reserve(addressToCacheSet(addr));
626 numTagArrayWrites++;
627 return;
628 default:
629 warn("CacheMemory access_type not found: %s",
630 CacheRequestType_to_string(requestType));
631 }
632 }
633
634 bool
635 CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr)
636 {
637 if (!m_resource_stalls) {
638 return true;
639 }
640
641 if (res == CacheResourceType_TagArray) {
642 if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
643 else {
644 DPRINTF(RubyResourceStalls,
645 "Tag array stall on addr %#x in set %d\n",
646 addr, addressToCacheSet(addr));
647 numTagArrayStalls++;
648 return false;
649 }
650 } else if (res == CacheResourceType_DataArray) {
651 if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
652 else {
653 DPRINTF(RubyResourceStalls,
654 "Data array stall on addr %#x in set %d\n",
655 addr, addressToCacheSet(addr));
656 numDataArrayStalls++;
657 return false;
658 }
659 } else {
660 panic("Unrecognized cache resource type.");
661 }
662 }
663
664 bool
665 CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
666 {
667 return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
668 }
669
670 bool
671 CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
672 {
673 return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
674 }