mem-ruby: Update stats style
[gem5.git] / src / mem / ruby / structures / CacheMemory.cc
1 /*
2 * Copyright (c) 2020 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
15 * Copyright (c) 2013 Advanced Micro Devices, Inc.
16 * All rights reserved.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 */
41
42 #include "mem/ruby/structures/CacheMemory.hh"
43
44 #include "base/intmath.hh"
45 #include "base/logging.hh"
46 #include "debug/HtmMem.hh"
47 #include "debug/RubyCache.hh"
48 #include "debug/RubyCacheTrace.hh"
49 #include "debug/RubyResourceStalls.hh"
50 #include "debug/RubyStats.hh"
51 #include "mem/cache/replacement_policies/weighted_lru_rp.hh"
52 #include "mem/ruby/protocol/AccessPermission.hh"
53 #include "mem/ruby/system/RubySystem.hh"
54
55 using namespace std;
56
57 ostream&
58 operator<<(ostream& out, const CacheMemory& obj)
59 {
60 obj.print(out);
61 out << flush;
62 return out;
63 }
64
65 CacheMemory::CacheMemory(const Params &p)
66 : SimObject(p),
67 dataArray(p.dataArrayBanks, p.dataAccessLatency,
68 p.start_index_bit, p.ruby_system),
69 tagArray(p.tagArrayBanks, p.tagAccessLatency,
70 p.start_index_bit, p.ruby_system),
71 cacheMemoryStats(this),
72 ADD_STAT(m_demand_hits, "Number of cache demand hits"),
73 ADD_STAT(m_demand_misses, "Number of cache demand misses"),
74 ADD_STAT(m_demand_accesses, "Number of cache demand accesses",
75 m_demand_hits + m_demand_misses),
76 ADD_STAT(m_sw_prefetches, "Number of software prefetches"),
77 ADD_STAT(m_hw_prefetches, "Number of hardware prefetches"),
78 ADD_STAT(m_prefetches, "Number of prefetches",
79 m_sw_prefetches + m_hw_prefetches),
80 ADD_STAT(m_accessModeType, "")
81 {
82 m_cache_size = p.size;
83 m_cache_assoc = p.assoc;
84 m_replacementPolicy_ptr = p.replacement_policy;
85 m_start_index_bit = p.start_index_bit;
86 m_is_instruction_only_cache = p.is_icache;
87 m_resource_stalls = p.resourceStalls;
88 m_block_size = p.block_size; // may be 0 at this point. Updated in init()
89 m_use_occupancy = dynamic_cast<ReplacementPolicy::WeightedLRU*>(
90 m_replacementPolicy_ptr) ? true : false;
91
92 m_sw_prefetches
93 .flags(Stats::nozero);
94
95 m_hw_prefetches
96 .flags(Stats::nozero);
97
98 m_prefetches
99 .flags(Stats::nozero);
100
101 m_accessModeType
102 .init(RubyRequestType_NUM)
103 .flags(Stats::pdf | Stats::total);
104
105 for (int i = 0; i < RubyAccessMode_NUM; i++) {
106 m_accessModeType
107 .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
108 .flags(Stats::nozero)
109 ;
110 }
111 }
112
113 void
114 CacheMemory::init()
115 {
116 if (m_block_size == 0) {
117 m_block_size = RubySystem::getBlockSizeBytes();
118 }
119 m_cache_num_sets = (m_cache_size / m_cache_assoc) / m_block_size;
120 assert(m_cache_num_sets > 1);
121 m_cache_num_set_bits = floorLog2(m_cache_num_sets);
122 assert(m_cache_num_set_bits > 0);
123
124 m_cache.resize(m_cache_num_sets,
125 std::vector<AbstractCacheEntry*>(m_cache_assoc, nullptr));
126 replacement_data.resize(m_cache_num_sets,
127 std::vector<ReplData>(m_cache_assoc, nullptr));
128 // instantiate all the replacement_data here
129 for (int i = 0; i < m_cache_num_sets; i++) {
130 for ( int j = 0; j < m_cache_assoc; j++) {
131 replacement_data[i][j] =
132 m_replacementPolicy_ptr->instantiateEntry();
133 }
134 }
135 }
136
137 CacheMemory::~CacheMemory()
138 {
139 if (m_replacementPolicy_ptr)
140 delete m_replacementPolicy_ptr;
141 for (int i = 0; i < m_cache_num_sets; i++) {
142 for (int j = 0; j < m_cache_assoc; j++) {
143 delete m_cache[i][j];
144 }
145 }
146 }
147
148 // convert a Address to its location in the cache
149 int64_t
150 CacheMemory::addressToCacheSet(Addr address) const
151 {
152 assert(address == makeLineAddress(address));
153 return bitSelect(address, m_start_index_bit,
154 m_start_index_bit + m_cache_num_set_bits - 1);
155 }
156
157 // Given a cache index: returns the index of the tag in a set.
158 // returns -1 if the tag is not found.
159 int
160 CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
161 {
162 assert(tag == makeLineAddress(tag));
163 // search the set for the tags
164 auto it = m_tag_index.find(tag);
165 if (it != m_tag_index.end())
166 if (m_cache[cacheSet][it->second]->m_Permission !=
167 AccessPermission_NotPresent)
168 return it->second;
169 return -1; // Not found
170 }
171
172 // Given a cache index: returns the index of the tag in a set.
173 // returns -1 if the tag is not found.
174 int
175 CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
176 Addr tag) const
177 {
178 assert(tag == makeLineAddress(tag));
179 // search the set for the tags
180 auto it = m_tag_index.find(tag);
181 if (it != m_tag_index.end())
182 return it->second;
183 return -1; // Not found
184 }
185
186 // Given an unique cache block identifier (idx): return the valid address
187 // stored by the cache block. If the block is invalid/notpresent, the
188 // function returns the 0 address
189 Addr
190 CacheMemory::getAddressAtIdx(int idx) const
191 {
192 Addr tmp(0);
193
194 int set = idx / m_cache_assoc;
195 assert(set < m_cache_num_sets);
196
197 int way = idx - set * m_cache_assoc;
198 assert (way < m_cache_assoc);
199
200 AbstractCacheEntry* entry = m_cache[set][way];
201 if (entry == NULL ||
202 entry->m_Permission == AccessPermission_Invalid ||
203 entry->m_Permission == AccessPermission_NotPresent) {
204 return tmp;
205 }
206 return entry->m_Address;
207 }
208
209 bool
210 CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
211 DataBlock*& data_ptr)
212 {
213 DPRINTF(RubyCache, "address: %#x\n", address);
214 AbstractCacheEntry* entry = lookup(address);
215 if (entry != nullptr) {
216 // Do we even have a tag match?
217 m_replacementPolicy_ptr->touch(entry->replacementData);
218 entry->setLastAccess(curTick());
219 data_ptr = &(entry->getDataBlk());
220
221 if (entry->m_Permission == AccessPermission_Read_Write) {
222 return true;
223 }
224 if ((entry->m_Permission == AccessPermission_Read_Only) &&
225 (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
226 return true;
227 }
228 // The line must not be accessible
229 }
230 data_ptr = NULL;
231 return false;
232 }
233
234 bool
235 CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
236 DataBlock*& data_ptr)
237 {
238 DPRINTF(RubyCache, "address: %#x\n", address);
239 AbstractCacheEntry* entry = lookup(address);
240 if (entry != nullptr) {
241 // Do we even have a tag match?
242 m_replacementPolicy_ptr->touch(entry->replacementData);
243 entry->setLastAccess(curTick());
244 data_ptr = &(entry->getDataBlk());
245
246 return entry->m_Permission != AccessPermission_NotPresent;
247 }
248
249 data_ptr = NULL;
250 return false;
251 }
252
253 // tests to see if an address is present in the cache
254 bool
255 CacheMemory::isTagPresent(Addr address) const
256 {
257 const AbstractCacheEntry* const entry = lookup(address);
258 if (entry == nullptr) {
259 // We didn't find the tag
260 DPRINTF(RubyCache, "No tag match for address: %#x\n", address);
261 return false;
262 }
263 DPRINTF(RubyCache, "address: %#x found\n", address);
264 return true;
265 }
266
267 // Returns true if there is:
268 // a) a tag match on this address or there is
269 // b) an unused line in the same cache "way"
270 bool
271 CacheMemory::cacheAvail(Addr address) const
272 {
273 assert(address == makeLineAddress(address));
274
275 int64_t cacheSet = addressToCacheSet(address);
276
277 for (int i = 0; i < m_cache_assoc; i++) {
278 AbstractCacheEntry* entry = m_cache[cacheSet][i];
279 if (entry != NULL) {
280 if (entry->m_Address == address ||
281 entry->m_Permission == AccessPermission_NotPresent) {
282 // Already in the cache or we found an empty entry
283 return true;
284 }
285 } else {
286 return true;
287 }
288 }
289 return false;
290 }
291
292 AbstractCacheEntry*
293 CacheMemory::allocate(Addr address, AbstractCacheEntry *entry)
294 {
295 assert(address == makeLineAddress(address));
296 assert(!isTagPresent(address));
297 assert(cacheAvail(address));
298 DPRINTF(RubyCache, "address: %#x\n", address);
299
300 // Find the first open slot
301 int64_t cacheSet = addressToCacheSet(address);
302 std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
303 for (int i = 0; i < m_cache_assoc; i++) {
304 if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
305 if (set[i] && (set[i] != entry)) {
306 warn_once("This protocol contains a cache entry handling bug: "
307 "Entries in the cache should never be NotPresent! If\n"
308 "this entry (%#x) is not tracked elsewhere, it will memory "
309 "leak here. Fix your protocol to eliminate these!",
310 address);
311 }
312 set[i] = entry; // Init entry
313 set[i]->m_Address = address;
314 set[i]->m_Permission = AccessPermission_Invalid;
315 DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
316 address);
317 set[i]->m_locked = -1;
318 m_tag_index[address] = i;
319 set[i]->setPosition(cacheSet, i);
320 set[i]->replacementData = replacement_data[cacheSet][i];
321 set[i]->setLastAccess(curTick());
322
323 // Call reset function here to set initial value for different
324 // replacement policies.
325 m_replacementPolicy_ptr->reset(entry->replacementData);
326
327 return entry;
328 }
329 }
330 panic("Allocate didn't find an available entry");
331 }
332
333 void
334 CacheMemory::deallocate(Addr address)
335 {
336 DPRINTF(RubyCache, "address: %#x\n", address);
337 AbstractCacheEntry* entry = lookup(address);
338 assert(entry != nullptr);
339 m_replacementPolicy_ptr->invalidate(entry->replacementData);
340 uint32_t cache_set = entry->getSet();
341 uint32_t way = entry->getWay();
342 delete entry;
343 m_cache[cache_set][way] = NULL;
344 m_tag_index.erase(address);
345 }
346
347 // Returns with the physical address of the conflicting cache line
348 Addr
349 CacheMemory::cacheProbe(Addr address) const
350 {
351 assert(address == makeLineAddress(address));
352 assert(!cacheAvail(address));
353
354 int64_t cacheSet = addressToCacheSet(address);
355 std::vector<ReplaceableEntry*> candidates;
356 for (int i = 0; i < m_cache_assoc; i++) {
357 candidates.push_back(static_cast<ReplaceableEntry*>(
358 m_cache[cacheSet][i]));
359 }
360 return m_cache[cacheSet][m_replacementPolicy_ptr->
361 getVictim(candidates)->getWay()]->m_Address;
362 }
363
364 // looks an address up in the cache
365 AbstractCacheEntry*
366 CacheMemory::lookup(Addr address)
367 {
368 assert(address == makeLineAddress(address));
369 int64_t cacheSet = addressToCacheSet(address);
370 int loc = findTagInSet(cacheSet, address);
371 if (loc == -1) return NULL;
372 return m_cache[cacheSet][loc];
373 }
374
375 // looks an address up in the cache
376 const AbstractCacheEntry*
377 CacheMemory::lookup(Addr address) const
378 {
379 assert(address == makeLineAddress(address));
380 int64_t cacheSet = addressToCacheSet(address);
381 int loc = findTagInSet(cacheSet, address);
382 if (loc == -1) return NULL;
383 return m_cache[cacheSet][loc];
384 }
385
386 // Sets the most recently used bit for a cache block
387 void
388 CacheMemory::setMRU(Addr address)
389 {
390 AbstractCacheEntry* entry = lookup(makeLineAddress(address));
391 if (entry != nullptr) {
392 m_replacementPolicy_ptr->touch(entry->replacementData);
393 entry->setLastAccess(curTick());
394 }
395 }
396
397 void
398 CacheMemory::setMRU(AbstractCacheEntry *entry)
399 {
400 assert(entry != nullptr);
401 m_replacementPolicy_ptr->touch(entry->replacementData);
402 entry->setLastAccess(curTick());
403 }
404
405 void
406 CacheMemory::setMRU(Addr address, int occupancy)
407 {
408 AbstractCacheEntry* entry = lookup(makeLineAddress(address));
409 if (entry != nullptr) {
410 // m_use_occupancy can decide whether we are using WeightedLRU
411 // replacement policy. Depending on different replacement policies,
412 // use different touch() function.
413 if (m_use_occupancy) {
414 static_cast<ReplacementPolicy::WeightedLRU*>(
415 m_replacementPolicy_ptr)->touch(
416 entry->replacementData, occupancy);
417 } else {
418 m_replacementPolicy_ptr->touch(entry->replacementData);
419 }
420 entry->setLastAccess(curTick());
421 }
422 }
423
424 int
425 CacheMemory::getReplacementWeight(int64_t set, int64_t loc)
426 {
427 assert(set < m_cache_num_sets);
428 assert(loc < m_cache_assoc);
429 int ret = 0;
430 if (m_cache[set][loc] != NULL) {
431 ret = m_cache[set][loc]->getNumValidBlocks();
432 assert(ret >= 0);
433 }
434
435 return ret;
436 }
437
438 void
439 CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
440 {
441 uint64_t warmedUpBlocks = 0;
442 M5_VAR_USED uint64_t totalBlocks = (uint64_t)m_cache_num_sets *
443 (uint64_t)m_cache_assoc;
444
445 for (int i = 0; i < m_cache_num_sets; i++) {
446 for (int j = 0; j < m_cache_assoc; j++) {
447 if (m_cache[i][j] != NULL) {
448 AccessPermission perm = m_cache[i][j]->m_Permission;
449 RubyRequestType request_type = RubyRequestType_NULL;
450 if (perm == AccessPermission_Read_Only) {
451 if (m_is_instruction_only_cache) {
452 request_type = RubyRequestType_IFETCH;
453 } else {
454 request_type = RubyRequestType_LD;
455 }
456 } else if (perm == AccessPermission_Read_Write) {
457 request_type = RubyRequestType_ST;
458 }
459
460 if (request_type != RubyRequestType_NULL) {
461 Tick lastAccessTick;
462 lastAccessTick = m_cache[i][j]->getLastAccess();
463 tr->addRecord(cntrl, m_cache[i][j]->m_Address,
464 0, request_type, lastAccessTick,
465 m_cache[i][j]->getDataBlk());
466 warmedUpBlocks++;
467 }
468 }
469 }
470 }
471
472 DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
473 "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
474 totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
475 }
476
477 void
478 CacheMemory::print(ostream& out) const
479 {
480 out << "Cache dump: " << name() << endl;
481 for (int i = 0; i < m_cache_num_sets; i++) {
482 for (int j = 0; j < m_cache_assoc; j++) {
483 if (m_cache[i][j] != NULL) {
484 out << " Index: " << i
485 << " way: " << j
486 << " entry: " << *m_cache[i][j] << endl;
487 } else {
488 out << " Index: " << i
489 << " way: " << j
490 << " entry: NULL" << endl;
491 }
492 }
493 }
494 }
495
496 void
497 CacheMemory::printData(ostream& out) const
498 {
499 out << "printData() not supported" << endl;
500 }
501
502 void
503 CacheMemory::setLocked(Addr address, int context)
504 {
505 DPRINTF(RubyCache, "Setting Lock for addr: %#x to %d\n", address, context);
506 AbstractCacheEntry* entry = lookup(address);
507 assert(entry != nullptr);
508 entry->setLocked(context);
509 }
510
511 void
512 CacheMemory::clearLocked(Addr address)
513 {
514 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n", address);
515 AbstractCacheEntry* entry = lookup(address);
516 assert(entry != nullptr);
517 entry->clearLocked();
518 }
519
520 void
521 CacheMemory::clearLockedAll(int context)
522 {
523 // iterate through every set and way to get a cache line
524 for (auto i = m_cache.begin(); i != m_cache.end(); ++i) {
525 std::vector<AbstractCacheEntry*> set = *i;
526 for (auto j = set.begin(); j != set.end(); ++j) {
527 AbstractCacheEntry *line = *j;
528 if (line && line->isLocked(context)) {
529 DPRINTF(RubyCache, "Clear Lock for addr: %#x\n",
530 line->m_Address);
531 line->clearLocked();
532 }
533 }
534 }
535 }
536
537 bool
538 CacheMemory::isLocked(Addr address, int context)
539 {
540 AbstractCacheEntry* entry = lookup(address);
541 assert(entry != nullptr);
542 DPRINTF(RubyCache, "Testing Lock for addr: %#llx cur %d con %d\n",
543 address, entry->m_locked, context);
544 return entry->isLocked(context);
545 }
546
547 CacheMemory::
548 CacheMemoryStats::CacheMemoryStats(Stats::Group *parent)
549 : Stats::Group(parent),
550 ADD_STAT(numDataArrayReads, "Number of data array reads"),
551 ADD_STAT(numDataArrayWrites, "Number of data array writes"),
552 ADD_STAT(numTagArrayReads, "Number of tag array reads"),
553 ADD_STAT(numTagArrayWrites, "Number of tag array writes"),
554 ADD_STAT(numTagArrayStalls, "Number of stalls caused by tag array"),
555 ADD_STAT(numDataArrayStalls, "Number of stalls caused by data array"),
556 ADD_STAT(htmTransCommitReadSet, "Read set size of a committed "
557 "transaction"),
558 ADD_STAT(htmTransCommitWriteSet, "Write set size of a committed "
559 "transaction"),
560 ADD_STAT(htmTransAbortReadSet, "Read set size of a aborted transaction"),
561 ADD_STAT(htmTransAbortWriteSet, "Write set size of a aborted "
562 "transaction")
563 {
564 numDataArrayReads
565 .flags(Stats::nozero);
566
567 numDataArrayWrites
568 .flags(Stats::nozero);
569
570 numTagArrayReads
571 .flags(Stats::nozero);
572
573 numTagArrayWrites
574 .flags(Stats::nozero);
575
576 numTagArrayStalls
577 .flags(Stats::nozero);
578
579 numDataArrayStalls
580 .flags(Stats::nozero);
581
582 htmTransCommitReadSet
583 .init(8)
584 .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan);
585
586 htmTransCommitWriteSet
587 .init(8)
588 .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan);
589
590 htmTransAbortReadSet
591 .init(8)
592 .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan);
593
594 htmTransAbortWriteSet
595 .init(8)
596 .flags(Stats::pdf | Stats::dist | Stats::nozero | Stats::nonan);
597
598 }
599
600 // assumption: SLICC generated files will only call this function
601 // once **all** resources are granted
602 void
603 CacheMemory::recordRequestType(CacheRequestType requestType, Addr addr)
604 {
605 DPRINTF(RubyStats, "Recorded statistic: %s\n",
606 CacheRequestType_to_string(requestType));
607 switch(requestType) {
608 case CacheRequestType_DataArrayRead:
609 if (m_resource_stalls)
610 dataArray.reserve(addressToCacheSet(addr));
611 cacheMemoryStats.numDataArrayReads++;
612 return;
613 case CacheRequestType_DataArrayWrite:
614 if (m_resource_stalls)
615 dataArray.reserve(addressToCacheSet(addr));
616 cacheMemoryStats.numDataArrayWrites++;
617 return;
618 case CacheRequestType_TagArrayRead:
619 if (m_resource_stalls)
620 tagArray.reserve(addressToCacheSet(addr));
621 cacheMemoryStats.numTagArrayReads++;
622 return;
623 case CacheRequestType_TagArrayWrite:
624 if (m_resource_stalls)
625 tagArray.reserve(addressToCacheSet(addr));
626 cacheMemoryStats.numTagArrayWrites++;
627 return;
628 default:
629 warn("CacheMemory access_type not found: %s",
630 CacheRequestType_to_string(requestType));
631 }
632 }
633
634 bool
635 CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr)
636 {
637 if (!m_resource_stalls) {
638 return true;
639 }
640
641 if (res == CacheResourceType_TagArray) {
642 if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
643 else {
644 DPRINTF(RubyResourceStalls,
645 "Tag array stall on addr %#x in set %d\n",
646 addr, addressToCacheSet(addr));
647 cacheMemoryStats.numTagArrayStalls++;
648 return false;
649 }
650 } else if (res == CacheResourceType_DataArray) {
651 if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
652 else {
653 DPRINTF(RubyResourceStalls,
654 "Data array stall on addr %#x in set %d\n",
655 addr, addressToCacheSet(addr));
656 cacheMemoryStats.numDataArrayStalls++;
657 return false;
658 }
659 } else {
660 panic("Unrecognized cache resource type.");
661 }
662 }
663
664 bool
665 CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
666 {
667 return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
668 }
669
670 bool
671 CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
672 {
673 return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
674 }
675
676 /* hardware transactional memory */
677
678 void
679 CacheMemory::htmAbortTransaction()
680 {
681 uint64_t htmReadSetSize = 0;
682 uint64_t htmWriteSetSize = 0;
683
684 // iterate through every set and way to get a cache line
685 for (auto i = m_cache.begin(); i != m_cache.end(); ++i)
686 {
687 std::vector<AbstractCacheEntry*> set = *i;
688
689 for (auto j = set.begin(); j != set.end(); ++j)
690 {
691 AbstractCacheEntry *line = *j;
692
693 if (line != nullptr) {
694 htmReadSetSize += (line->getInHtmReadSet() ? 1 : 0);
695 htmWriteSetSize += (line->getInHtmWriteSet() ? 1 : 0);
696 if (line->getInHtmWriteSet()) {
697 line->invalidateEntry();
698 }
699 line->setInHtmWriteSet(false);
700 line->setInHtmReadSet(false);
701 line->clearLocked();
702 }
703 }
704 }
705
706 cacheMemoryStats.htmTransAbortReadSet.sample(htmReadSetSize);
707 cacheMemoryStats.htmTransAbortWriteSet.sample(htmWriteSetSize);
708 DPRINTF(HtmMem, "htmAbortTransaction: read set=%u write set=%u\n",
709 htmReadSetSize, htmWriteSetSize);
710 }
711
712 void
713 CacheMemory::htmCommitTransaction()
714 {
715 uint64_t htmReadSetSize = 0;
716 uint64_t htmWriteSetSize = 0;
717
718 // iterate through every set and way to get a cache line
719 for (auto i = m_cache.begin(); i != m_cache.end(); ++i)
720 {
721 std::vector<AbstractCacheEntry*> set = *i;
722
723 for (auto j = set.begin(); j != set.end(); ++j)
724 {
725 AbstractCacheEntry *line = *j;
726 if (line != nullptr) {
727 htmReadSetSize += (line->getInHtmReadSet() ? 1 : 0);
728 htmWriteSetSize += (line->getInHtmWriteSet() ? 1 : 0);
729 line->setInHtmWriteSet(false);
730 line->setInHtmReadSet(false);
731 line->clearLocked();
732 }
733 }
734 }
735
736 cacheMemoryStats.htmTransCommitReadSet.sample(htmReadSetSize);
737 cacheMemoryStats.htmTransCommitWriteSet.sample(htmWriteSetSize);
738 DPRINTF(HtmMem, "htmCommitTransaction: read set=%u write set=%u\n",
739 htmReadSetSize, htmWriteSetSize);
740 }