mem-cache: Use RP for data expansion victimization
authorDaniel R. Carvalho <odanrc@yahoo.com.br>
Fri, 7 Jun 2019 13:36:11 +0000 (15:36 +0200)
committerDaniel Carvalho <odanrc@yahoo.com.br>
Thu, 12 Nov 2020 21:46:43 +0000 (21:46 +0000)
When searching for victims of a data expansion a simple approach to
make room for the expanded block is to evict every co-allocatable
block. This, however, ignores replacement policies and tends to be
inefficient. Besides, some cache compaction policies do not allow
blocks that changed their compression ratio to be allocated in the
same location (e.g., Skewed Compressed Caches), so they must be
moved elsewhere.

The replacement policy approach asks the replacement policy which
block(s) would be the best to evict in order to make room for the
expanded block. The other approach, on the other hand, simply evicts
all co-allocated entries. In the case the replacement policy selects
the superblock of the block being expanded, we must make sure the
latter is not evicted/moved by mistake.

This patch also allows the user to select which approach they would
like to use.

Change-Id: Iae57cf26dac7218c51ff0169a5cfcf3d6f8ea28a
Signed-off-by: Daniel R. Carvalho <odanrc@yahoo.com.br>
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/36577
Reviewed-by: Nikos Nikoleris <nikos.nikoleris@arm.com>
Maintainer: Nikos Nikoleris <nikos.nikoleris@arm.com>
Tested-by: kokoro <noreply+kokoro@google.com>
src/mem/cache/Cache.py
src/mem/cache/base.cc
src/mem/cache/base.hh
src/mem/cache/tags/sector_blk.cc
src/mem/cache/tags/sector_blk.hh

index 9b20b656a975ced75cde7bd655f019a7d5275d7f..149f48542f62d867e8f19510409f535706ef153a 100644 (file)
@@ -103,6 +103,8 @@ class BaseCache(ClockedObject):
         "Replacement policy")
 
     compressor = Param.BaseCacheCompressor(NULL, "Cache compressor.")
+    replace_expansions = Param.Bool(True, "Apply replacement policy to " \
+        "decide which blocks should be evicted on a data expansion")
 
     sequential_access = Param.Bool(False,
         "Whether to access tags and data sequentially")
index b74e8cc5c3bd55b5507b4337b9cbb0dc858b6ae4..b128b5c334734556e1d850cd337d15db202bffef 100644 (file)
@@ -99,6 +99,7 @@ BaseCache::BaseCache(const BaseCacheParams &p, unsigned blk_size)
       forwardSnoops(true),
       clusivity(p.clusivity),
       isReadOnly(p.is_read_only),
+      replaceExpansions(p.replace_expansions),
       blocked(0),
       order(0),
       noTargetMSHR(nullptr),
@@ -831,7 +832,7 @@ BaseCache::handleEvictions(std::vector<CacheBlk*> &evict_blks,
 }
 
 bool
-BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
+BaseCache::updateCompressionData(CacheBlk *&blk, const uint64_t* data,
                                  PacketList &writebacks)
 {
     // tempBlock does not exist in the tags, so don't do anything for it.
@@ -839,11 +840,6 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
         return true;
     }
 
-    // Get superblock of the given block
-    CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
-    const SuperBlk* superblock = static_cast<const SuperBlk*>(
-        compression_blk->getSectorBlock());
-
     // The compressor is called to compress the updated data, so that its
     // metadata can be updated.
     Cycles compression_lat = Cycles(0);
@@ -857,24 +853,53 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
     // the bigger block
 
     // Get previous compressed size
+    CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
     M5_VAR_USED const std::size_t prev_size = compression_blk->getSizeBits();
 
     // Check if new data is co-allocatable
+    const SuperBlk* superblock =
+        static_cast<const SuperBlk*>(compression_blk->getSectorBlock());
     const bool is_co_allocatable = superblock->isCompressed(compression_blk) &&
         superblock->canCoAllocate(compression_size);
 
     // If block was compressed, possibly co-allocated with other blocks, and
     // cannot be co-allocated anymore, one or more blocks must be evicted to
-    // make room for the expanded block. As of now we decide to evict the co-
-    // allocated blocks to make room for the expansion, but other approaches
-    // that take the replacement data of the superblock into account may
-    // generate better results
+    // make room for the expanded block
     const bool was_compressed = compression_blk->isCompressed();
     if (was_compressed && !is_co_allocatable) {
         std::vector<CacheBlk*> evict_blks;
-        for (const auto& sub_blk : superblock->blks) {
-            if (sub_blk->isValid() && (compression_blk != sub_blk)) {
-                evict_blks.push_back(sub_blk);
+        bool victim_itself = false;
+        CacheBlk *victim = nullptr;
+        if (replaceExpansions) {
+            victim = tags->findVictim(regenerateBlkAddr(blk),
+                blk->isSecure(), compression_size, evict_blks);
+
+            // It is valid to return nullptr if there is no victim
+            if (!victim) {
+                return false;
+            }
+
+            // If the victim block is itself the block won't need to be moved,
+            // and the victim should not be evicted
+            if (blk == victim) {
+                victim_itself = true;
+                auto it = std::find_if(evict_blks.begin(), evict_blks.end(),
+                    [&blk](CacheBlk* evict_blk){ return evict_blk == blk; });
+                evict_blks.erase(it);
+            }
+
+            // Print victim block's information
+            DPRINTF(CacheRepl, "Data expansion replacement victim: %s\n",
+                victim->print());
+        } else {
+            // If we do not move the expanded block, we must make room for
+            // the expansion to happen, so evict every co-allocated block
+            superblock = static_cast<const SuperBlk*>(
+                compression_blk->getSectorBlock());
+            for (auto& sub_blk : superblock->blks) {
+                if (sub_blk->isValid() && (blk != sub_blk)) {
+                    evict_blks.push_back(sub_blk);
+                }
             }
         }
 
@@ -885,9 +910,16 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
 
         // Update the number of data expansions
         stats.dataExpansions++;
-
         DPRINTF(CacheComp, "Data expansion: expanding [%s] from %d to %d bits"
                 "\n", blk->print(), prev_size, compression_size);
+
+        if (!victim_itself && replaceExpansions) {
+            // Move the block's contents to the invalid block so that it now
+            // co-allocates with the other existing superblock entry
+            tags->moveBlock(blk, victim);
+            blk = victim;
+            compression_blk = static_cast<CompressionBlk*>(blk);
+        }
     }
 
     // We always store compressed blocks when possible
index 99b03abf542adf81b04a44050e90ab89bd38a4c7..62c2953f2adc975a42d632e2c25c00d931be09da 100644 (file)
@@ -677,7 +677,7 @@ class BaseCache : public ClockedObject
      * @param writebacks List for any writebacks that need to be performed.
      * @return Whether operation is successful or not.
      */
-    bool updateCompressionData(CacheBlk *blk, const uint64_t* data,
+    bool updateCompressionData(CacheBlk *&blk, const uint64_t* data,
                                PacketList &writebacks);
 
     /**
@@ -893,6 +893,14 @@ class BaseCache : public ClockedObject
      */
     const bool isReadOnly;
 
+    /**
+     * when a data expansion of a compressed block happens it will not be
+     * able to co-allocate where it is at anymore. If true, the replacement
+     * policy is called to chose a new location for the block. Otherwise,
+     * all co-allocated blocks are evicted.
+     */
+    const bool replaceExpansions;
+
     /**
      * Bit vector of the blocking reasons for the access path.
      * @sa #BlockedCause
index 57bfb4d1622200be6426da18860189d8d9e8e7c7..f738e0222602fcdd121738bcc1dca6fa7619c0c2 100644 (file)
@@ -45,7 +45,7 @@ SectorSubBlk::setSectorBlock(SectorBlk* sector_blk)
     _sectorBlk = sector_blk;
 }
 
-const SectorBlk*
+SectorBlk*
 SectorSubBlk::getSectorBlock() const
 {
     return _sectorBlk;
index 2d649cd427dba70a62a5fc147fae6c046b22a6d3..049f49b58687df747011749cc1cb6c6feca6ce62 100644 (file)
@@ -86,7 +86,7 @@ class SectorSubBlk : public CacheBlk
      *
      * @return The sector block pointer.
      */
-    const SectorBlk* getSectorBlock() const;
+    SectorBlk* getSectorBlock() const;
 
     /**
      * Set offset of this sub-block within the sector.