From a2b7f9544c3a9c9fbd251bb527e6e4e97ab68760 Mon Sep 17 00:00:00 2001 From: "Daniel R. Carvalho" Date: Fri, 7 Jun 2019 15:36:11 +0200 Subject: [PATCH] mem-cache: Use RP for data expansion victimization When searching for victims of a data expansion a simple approach to make room for the expanded block is to evict every co-allocatable block. This, however, ignores replacement policies and tends to be inefficient. Besides, some cache compaction policies do not allow blocks that changed their compression ratio to be allocated in the same location (e.g., Skewed Compressed Caches), so they must be moved elsewhere. The replacement policy approach asks the replacement policy which block(s) would be the best to evict in order to make room for the expanded block. The other approach, on the other hand, simply evicts all co-allocated entries. In the case the replacement policy selects the superblock of the block being expanded, we must make sure the latter is not evicted/moved by mistake. This patch also allows the user to select which approach they would like to use. Change-Id: Iae57cf26dac7218c51ff0169a5cfcf3d6f8ea28a Signed-off-by: Daniel R. Carvalho Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/36577 Reviewed-by: Nikos Nikoleris Maintainer: Nikos Nikoleris Tested-by: kokoro --- src/mem/cache/Cache.py | 2 ++ src/mem/cache/base.cc | 60 ++++++++++++++++++++++++-------- src/mem/cache/base.hh | 10 +++++- src/mem/cache/tags/sector_blk.cc | 2 +- src/mem/cache/tags/sector_blk.hh | 2 +- 5 files changed, 59 insertions(+), 17 deletions(-) diff --git a/src/mem/cache/Cache.py b/src/mem/cache/Cache.py index 9b20b656a..149f48542 100644 --- a/src/mem/cache/Cache.py +++ b/src/mem/cache/Cache.py @@ -103,6 +103,8 @@ class BaseCache(ClockedObject): "Replacement policy") compressor = Param.BaseCacheCompressor(NULL, "Cache compressor.") + replace_expansions = Param.Bool(True, "Apply replacement policy to " \ + "decide which blocks should be evicted on a data expansion") sequential_access = Param.Bool(False, "Whether to access tags and data sequentially") diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc index b74e8cc5c..b128b5c33 100644 --- a/src/mem/cache/base.cc +++ b/src/mem/cache/base.cc @@ -99,6 +99,7 @@ BaseCache::BaseCache(const BaseCacheParams &p, unsigned blk_size) forwardSnoops(true), clusivity(p.clusivity), isReadOnly(p.is_read_only), + replaceExpansions(p.replace_expansions), blocked(0), order(0), noTargetMSHR(nullptr), @@ -831,7 +832,7 @@ BaseCache::handleEvictions(std::vector &evict_blks, } bool -BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data, +BaseCache::updateCompressionData(CacheBlk *&blk, const uint64_t* data, PacketList &writebacks) { // tempBlock does not exist in the tags, so don't do anything for it. @@ -839,11 +840,6 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data, return true; } - // Get superblock of the given block - CompressionBlk* compression_blk = static_cast(blk); - const SuperBlk* superblock = static_cast( - compression_blk->getSectorBlock()); - // The compressor is called to compress the updated data, so that its // metadata can be updated. Cycles compression_lat = Cycles(0); @@ -857,24 +853,53 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data, // the bigger block // Get previous compressed size + CompressionBlk* compression_blk = static_cast(blk); M5_VAR_USED const std::size_t prev_size = compression_blk->getSizeBits(); // Check if new data is co-allocatable + const SuperBlk* superblock = + static_cast(compression_blk->getSectorBlock()); const bool is_co_allocatable = superblock->isCompressed(compression_blk) && superblock->canCoAllocate(compression_size); // If block was compressed, possibly co-allocated with other blocks, and // cannot be co-allocated anymore, one or more blocks must be evicted to - // make room for the expanded block. As of now we decide to evict the co- - // allocated blocks to make room for the expansion, but other approaches - // that take the replacement data of the superblock into account may - // generate better results + // make room for the expanded block const bool was_compressed = compression_blk->isCompressed(); if (was_compressed && !is_co_allocatable) { std::vector evict_blks; - for (const auto& sub_blk : superblock->blks) { - if (sub_blk->isValid() && (compression_blk != sub_blk)) { - evict_blks.push_back(sub_blk); + bool victim_itself = false; + CacheBlk *victim = nullptr; + if (replaceExpansions) { + victim = tags->findVictim(regenerateBlkAddr(blk), + blk->isSecure(), compression_size, evict_blks); + + // It is valid to return nullptr if there is no victim + if (!victim) { + return false; + } + + // If the victim block is itself the block won't need to be moved, + // and the victim should not be evicted + if (blk == victim) { + victim_itself = true; + auto it = std::find_if(evict_blks.begin(), evict_blks.end(), + [&blk](CacheBlk* evict_blk){ return evict_blk == blk; }); + evict_blks.erase(it); + } + + // Print victim block's information + DPRINTF(CacheRepl, "Data expansion replacement victim: %s\n", + victim->print()); + } else { + // If we do not move the expanded block, we must make room for + // the expansion to happen, so evict every co-allocated block + superblock = static_cast( + compression_blk->getSectorBlock()); + for (auto& sub_blk : superblock->blks) { + if (sub_blk->isValid() && (blk != sub_blk)) { + evict_blks.push_back(sub_blk); + } } } @@ -885,9 +910,16 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data, // Update the number of data expansions stats.dataExpansions++; - DPRINTF(CacheComp, "Data expansion: expanding [%s] from %d to %d bits" "\n", blk->print(), prev_size, compression_size); + + if (!victim_itself && replaceExpansions) { + // Move the block's contents to the invalid block so that it now + // co-allocates with the other existing superblock entry + tags->moveBlock(blk, victim); + blk = victim; + compression_blk = static_cast(blk); + } } // We always store compressed blocks when possible diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh index 99b03abf5..62c2953f2 100644 --- a/src/mem/cache/base.hh +++ b/src/mem/cache/base.hh @@ -677,7 +677,7 @@ class BaseCache : public ClockedObject * @param writebacks List for any writebacks that need to be performed. * @return Whether operation is successful or not. */ - bool updateCompressionData(CacheBlk *blk, const uint64_t* data, + bool updateCompressionData(CacheBlk *&blk, const uint64_t* data, PacketList &writebacks); /** @@ -893,6 +893,14 @@ class BaseCache : public ClockedObject */ const bool isReadOnly; + /** + * when a data expansion of a compressed block happens it will not be + * able to co-allocate where it is at anymore. If true, the replacement + * policy is called to chose a new location for the block. Otherwise, + * all co-allocated blocks are evicted. + */ + const bool replaceExpansions; + /** * Bit vector of the blocking reasons for the access path. * @sa #BlockedCause diff --git a/src/mem/cache/tags/sector_blk.cc b/src/mem/cache/tags/sector_blk.cc index 57bfb4d16..f738e0222 100644 --- a/src/mem/cache/tags/sector_blk.cc +++ b/src/mem/cache/tags/sector_blk.cc @@ -45,7 +45,7 @@ SectorSubBlk::setSectorBlock(SectorBlk* sector_blk) _sectorBlk = sector_blk; } -const SectorBlk* +SectorBlk* SectorSubBlk::getSectorBlock() const { return _sectorBlk; diff --git a/src/mem/cache/tags/sector_blk.hh b/src/mem/cache/tags/sector_blk.hh index 2d649cd42..049f49b58 100644 --- a/src/mem/cache/tags/sector_blk.hh +++ b/src/mem/cache/tags/sector_blk.hh @@ -86,7 +86,7 @@ class SectorSubBlk : public CacheBlk * * @return The sector block pointer. */ - const SectorBlk* getSectorBlock() const; + SectorBlk* getSectorBlock() const; /** * Set offset of this sub-block within the sector. -- 2.30.2