CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
M5_VAR_USED const std::size_t prev_size = compression_blk->getSizeBits();
- // Check if new data is co-allocatable
- const SuperBlk* superblock =
- static_cast<const SuperBlk*>(compression_blk->getSectorBlock());
- const bool is_co_allocatable = superblock->isCompressed(compression_blk) &&
- superblock->canCoAllocate(compression_size);
-
// If compressed size didn't change enough to modify its co-allocatability
// there is nothing to do. Otherwise we may be facing a data expansion
// (block passing from more compressed to less compressed state), or a
} else {
// If we do not move the expanded block, we must make room for
// the expansion to happen, so evict every co-allocated block
- superblock = static_cast<const SuperBlk*>(
+ const SuperBlk* superblock = static_cast<const SuperBlk*>(
compression_blk->getSectorBlock());
for (auto& sub_blk : superblock->blks) {
if (sub_blk->isValid() && (blk != sub_blk)) {
compression_blk->setSizeBits(compression_size);
compression_blk->setDecompressionLatency(decompression_lat);
- if (is_data_expansion || is_data_contraction) {
- // If contracting data, for sure data is compressed. If expanding,
- // both situations can arise. When no contraction or expansion happens
- // block keeps its old state
- if (is_co_allocatable) {
- compression_blk->setCompressed();
- } else {
- compression_blk->setUncompressed();
- }
- }
-
return true;
}
return nullptr;
}
- // If using a compressor, set compression data. This must be done before
- // block insertion, as compressed tags use this information.
+ // Insert new block at victimized entry
+ tags->insertBlock(pkt, victim);
+
+ // If using a compressor, set compression data. This must be done after
+ // insertion, as the compression bit may be set.
if (compressor) {
compressor->setSizeBits(victim, blk_size_bits);
compressor->setDecompressionLatency(victim, decompression_lat);
}
- // Insert new block at victimized entry
- tags->insertBlock(pkt, victim);
-
return victim;
}
return victim;
}
-void
-CompressedTags::insertBlock(const PacketPtr pkt, CacheBlk *blk)
-{
- // We check if block can co-allocate before inserting, because this check
- // assumes the block is still invalid
- CompressionBlk* compression_blk = static_cast<CompressionBlk*>(blk);
- const SuperBlk* superblock = static_cast<const SuperBlk*>(
- compression_blk->getSectorBlock());
- const bool is_co_allocatable = superblock->isCompressed() &&
- superblock->canCoAllocate(compression_blk->getSizeBits());
-
- // Insert block
- SectorTags::insertBlock(pkt, blk);
-
- // We always store compressed blocks when possible
- if (is_co_allocatable) {
- compression_blk->setCompressed();
- } else {
- compression_blk->setUncompressed();
- }
-}
-
void
CompressedTags::forEachBlk(std::function<void(CacheBlk &)> visitor)
{
CompressionBlk::setSizeBits(const std::size_t size)
{
_size = size;
+
+ SuperBlk* superblock = static_cast<SuperBlk*>(getSectorBlock());
+
+ // Either this function is called after an insertion, or an update.
+ // If somebody else is present in the block, keep the superblock's
+ // compressibility. Otherwise, check if it can co-allocate
+ const uint8_t num_valid = superblock->getNumValid();
+ assert(num_valid >= 1);
+ if (num_valid == 1) {
+ if (superblock->canCoAllocate(size)) {
+ setCompressed();
+ } else {
+ setUncompressed();
+ }
+ } else {
+ if (superblock->isCompressed(this)) {
+ setCompressed();
+ } else {
+ setUncompressed();
+ }
+ }
}
Cycles