/*
- * Copyright (c) 2018 Inria
+ * Copyright (c) 2018, 2020 Inria
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Daniel Carvalho
*/
/**
* @file
- * Definitions of a base set associative sector tag store.
+ * Definitions of a sector tag store.
*/
#include "mem/cache/tags/sector_tags.hh"
#include "base/intmath.hh"
#include "base/logging.hh"
#include "base/types.hh"
-#include "debug/CacheRepl.hh"
#include "mem/cache/base.hh"
#include "mem/cache/replacement_policies/base.hh"
-
-SectorTags::SectorTags(const SectorTagsParams *p)
- : BaseTags(p), assoc(p->assoc), allocAssoc(p->assoc),
- sequentialAccess(p->sequential_access),
- replacementPolicy(p->replacement_policy),
- numBlocksPerSector(p->num_blocks_per_sector),
- numSectors(numBlocks / p->num_blocks_per_sector),
- numSets(numSectors / p->assoc),
- blks(numBlocks), secBlks(numSectors), sets(numSets),
- sectorShift(floorLog2(blkSize)),
- setShift(sectorShift + floorLog2(numBlocksPerSector)),
- tagShift(setShift + floorLog2(numSets)),
- sectorMask(numBlocksPerSector - 1), setMask(numSets - 1)
+#include "mem/cache/replacement_policies/replaceable_entry.hh"
+#include "mem/cache/tags/indexing_policies/base.hh"
+
+SectorTags::SectorTags(const SectorTagsParams &p)
+ : BaseTags(p), allocAssoc(p.assoc),
+ sequentialAccess(p.sequential_access),
+ replacementPolicy(p.replacement_policy),
+ numBlocksPerSector(p.num_blocks_per_sector),
+ numSectors(numBlocks / numBlocksPerSector),
+ sectorShift(floorLog2(blkSize)), sectorMask(numBlocksPerSector - 1),
+ sectorStats(stats, *this)
{
+ // There must be a indexing policy
+ fatal_if(!p.indexing_policy, "An indexing policy is required");
+
// Check parameters
fatal_if(blkSize < 4 || !isPowerOf2(blkSize),
"Block size must be at least 4 and a power of 2");
- fatal_if(!isPowerOf2(numSets),
- "# of sets must be non-zero and a power of 2");
fatal_if(!isPowerOf2(numBlocksPerSector),
"# of blocks per sector must be non-zero and a power of 2");
- fatal_if(assoc <= 0, "associativity must be greater than zero");
+}
- // Initialize all sets
- unsigned sec_blk_index = 0; // index into sector blks array
- unsigned blk_index = 0; // index into blks array
- for (unsigned i = 0; i < numSets; ++i) {
- sets[i].resize(assoc);
+void
+SectorTags::tagsInit()
+{
+ // Create blocks and sector blocks
+ blks = std::vector<SectorSubBlk>(numBlocks);
+ secBlks = std::vector<SectorBlk>(numSectors);
- // Initialize all sectors in this set
- for (unsigned j = 0; j < assoc; ++j) {
+ // Initialize all blocks
+ unsigned blk_index = 0; // index into blks array
+ for (unsigned sec_blk_index = 0; sec_blk_index < numSectors;
+ sec_blk_index++)
+ {
+ // Locate next cache sector
+ SectorBlk* sec_blk = &secBlks[sec_blk_index];
+
+ // Associate a replacement data entry to the sector
+ sec_blk->replacementData = replacementPolicy->instantiateEntry();
+
+ // Initialize all blocks in this sector
+ sec_blk->blks.resize(numBlocksPerSector);
+ for (unsigned k = 0; k < numBlocksPerSector; ++k){
// Select block within the set to be linked
- SectorBlk*& sec_blk = sets[i][j];
-
- // Locate next cache sector
- sec_blk = &secBlks[sec_blk_index];
+ SectorSubBlk*& blk = sec_blk->blks[k];
- // Associate a replacement data entry to the sector
- sec_blk->replacementData = replacementPolicy->instantiateEntry();
+ // Locate next cache block
+ blk = &blks[blk_index];
- // Initialize all blocks in this sector
- sec_blk->blks.resize(numBlocksPerSector);
- for (unsigned k = 0; k < numBlocksPerSector; ++k){
- // Select block within the set to be linked
- SectorSubBlk*& blk = sec_blk->blks[k];
+ // Associate a data chunk to the block
+ blk->data = &dataBlks[blkSize*blk_index];
- // Locate next cache block
- blk = &blks[blk_index];
+ // Associate sector block to this block
+ blk->setSectorBlock(sec_blk);
- // Associate a data chunk to the block
- blk->data = &dataBlks[blkSize*blk_index];
+ // Associate the sector replacement data to this block
+ blk->replacementData = sec_blk->replacementData;
- // Associate sector block to this block
- blk->setSectorBlock(sec_blk);
+ // Set its index and sector offset
+ blk->setSectorOffset(k);
- // Associate the sector replacement data to this block
- blk->replacementData = sec_blk->replacementData;
-
- // Set its set, way and sector offset
- blk->set = i;
- blk->way = j;
- blk->setSectorOffset(k);
-
- // Update block index
- ++blk_index;
- }
-
- // Update sector block index
- ++sec_blk_index;
+ // Update block index
+ ++blk_index;
}
+
+ // Link block to indexing policy
+ indexingPolicy->setEntry(sec_blk, sec_blk_index);
}
}
// in the sector.
if (!sector_blk->isValid()) {
// Decrease the number of tags in use
- tagsInUse--;
+ stats.tagsInUse--;
+ assert(stats.tagsInUse.value() >= 0);
// Invalidate replacement data, as we're invalidating the sector
replacementPolicy->invalidate(sector_blk->replacementData);
// Access all tags in parallel, hence one in each way. The data side
// either accesses all blocks in parallel, or one block sequentially on
// a hit. Sequential access with a miss doesn't access data.
- tagAccesses += allocAssoc;
+ stats.tagAccesses += allocAssoc;
if (sequentialAccess) {
if (blk != nullptr) {
- dataAccesses += 1;
+ stats.dataAccesses += 1;
}
} else {
- dataAccesses += allocAssoc*numBlocksPerSector;
+ stats.dataAccesses += allocAssoc*numBlocksPerSector;
}
+ // If a cache hit
if (blk != nullptr) {
- // If a cache hit
- lat = accessLatency;
- // Check if the block to be accessed is available. If not,
- // apply the accessLatency on top of block->whenReady.
- if (blk->whenReady > curTick() &&
- cache->ticksToCycles(blk->whenReady - curTick()) >
- accessLatency) {
- lat = cache->ticksToCycles(blk->whenReady - curTick()) +
- accessLatency;
- }
-
// Update number of references to accessed block
- blk->refCount++;
+ blk->increaseRefCount();
// Get block's sector
SectorSubBlk* sub_blk = static_cast<SectorSubBlk*>(blk);
// Update replacement data of accessed block, which is shared with
// the whole sector it belongs to
replacementPolicy->touch(sector_blk->replacementData);
- } else {
- // If a cache miss
- lat = lookupLatency;
}
- return blk;
-}
+ // The tag lookup latency is the same for a hit or a miss
+ lat = lookupLatency;
-const std::vector<SectorBlk*>
-SectorTags::getPossibleLocations(Addr addr) const
-{
- return sets[extractSet(addr)];
+ return blk;
}
void
SectorTags::insertBlock(const PacketPtr pkt, CacheBlk *blk)
{
- // Insert block
- BaseTags::insertBlock(pkt, blk);
-
// Get block's sector
SectorSubBlk* sub_blk = static_cast<SectorSubBlk*>(blk);
const SectorBlk* sector_blk = sub_blk->getSectorBlock();
// When a block is inserted, the tag is only a newly used tag if the
// sector was not previously present in the cache.
- // This assumes BaseTags::insertBlock does not set the valid bit.
if (sector_blk->isValid()) {
// An existing entry's replacement data is just updated
replacementPolicy->touch(sector_blk->replacementData);
} else {
// Increment tag counter
- tagsInUse++;
+ stats.tagsInUse++;
+ assert(stats.tagsInUse.value() <= numSectors);
// A new entry resets the replacement data
replacementPolicy->reset(sector_blk->replacementData);
}
+
+ // Do common block insertion functionality
+ BaseTags::insertBlock(pkt, blk);
+}
+
+void
+SectorTags::moveBlock(CacheBlk *src_blk, CacheBlk *dest_blk)
+{
+ const bool dest_was_valid =
+ static_cast<SectorSubBlk*>(dest_blk)->getSectorBlock()->isValid();
+
+ BaseTags::moveBlock(src_blk, dest_blk);
+
+ // Get blocks' sectors. The blocks have effectively been swapped by now,
+ // so src points to an invalid block, and dest to the moved valid one.
+ SectorSubBlk* src_sub_blk = static_cast<SectorSubBlk*>(src_blk);
+ const SectorBlk* src_sector_blk = src_sub_blk->getSectorBlock();
+ SectorSubBlk* dest_sub_blk = static_cast<SectorSubBlk*>(dest_blk);
+ const SectorBlk* dest_sector_blk = dest_sub_blk->getSectorBlock();
+
+ // Since the blocks were using different replacement data pointers,
+ // we must touch the replacement data of the new entry, and invalidate
+ // the one that is being moved.
+ // When a block in a sector is invalidated, it does not make the tag
+ // invalid automatically, as there might be other blocks in the sector
+ // using it. The tag is invalidated only when there is a single block
+ // in the sector.
+ if (!src_sector_blk->isValid()) {
+ // Invalidate replacement data, as we're invalidating the sector
+ replacementPolicy->invalidate(src_sector_blk->replacementData);
+
+ if (dest_was_valid) {
+ // If destination sector was valid, and the source sector became
+ // invalid, there is one less tag being used
+ stats.tagsInUse--;
+ assert(stats.tagsInUse.value() >= 0);
+ }
+ } else if (!dest_was_valid) {
+ // If destination sector was invalid and became valid, and the source
+ // sector is still valid, there is one extra tag being used
+ stats.tagsInUse++;
+ assert(stats.tagsInUse.value() <= numSectors);
+ }
+
+ if (dest_was_valid) {
+ replacementPolicy->touch(dest_sector_blk->replacementData);
+ } else {
+ replacementPolicy->reset(dest_sector_blk->replacementData);
+ }
}
CacheBlk*
// due to sectors being composed of contiguous-address entries
const Addr offset = extractSectorOffset(addr);
- // Find all possible sector locations for the given address
- const std::vector<SectorBlk*> locations = getPossibleLocations(addr);
+ // Find all possible sector entries that may contain the given address
+ const std::vector<ReplaceableEntry*> entries =
+ indexingPolicy->getPossibleEntries(addr);
// Search for block
- for (const auto& sector : locations) {
- auto blk = sector->blks[offset];
- if (blk->getTag() == tag && blk->isValid() &&
- blk->isSecure() == is_secure) {
+ for (const auto& sector : entries) {
+ auto blk = static_cast<SectorBlk*>(sector)->blks[offset];
+ if (blk->matchTag(tag, is_secure)) {
return blk;
}
}
return nullptr;
}
-ReplaceableEntry*
-SectorTags::findBlockBySetAndWay(int set, int way) const
-{
- return sets[set][way];
-}
-
CacheBlk*
-SectorTags::findVictim(Addr addr, const bool is_secure,
- std::vector<CacheBlk*>& evict_blks) const
+SectorTags::findVictim(Addr addr, const bool is_secure, const std::size_t size,
+ std::vector<CacheBlk*>& evict_blks)
{
- // Get all possible locations of this sector
- const std::vector<SectorBlk*> sector_locations =
- getPossibleLocations(addr);
+ // Get possible entries to be victimized
+ const std::vector<ReplaceableEntry*> sector_entries =
+ indexingPolicy->getPossibleEntries(addr);
// Check if the sector this address belongs to has been allocated
Addr tag = extractTag(addr);
SectorBlk* victim_sector = nullptr;
- for (const auto& sector : sector_locations){
- if ((tag == sector->getTag()) && sector->isValid() &&
- (is_secure == sector->isSecure())){
- victim_sector = sector;
+ for (const auto& sector : sector_entries) {
+ SectorBlk* sector_blk = static_cast<SectorBlk*>(sector);
+ if (sector_blk->matchTag(tag, is_secure)) {
+ victim_sector = sector_blk;
break;
}
}
if (victim_sector == nullptr){
// Choose replacement victim from replacement candidates
victim_sector = static_cast<SectorBlk*>(replacementPolicy->getVictim(
- std::vector<ReplaceableEntry*>(
- sector_locations.begin(), sector_locations.end())));
+ sector_entries));
}
- // Get the location of the victim block within the sector
- CacheBlk* victim = victim_sector->blks[extractSectorOffset(addr)];
+ // Get the entry of the victim block within the sector
+ SectorSubBlk* victim = victim_sector->blks[extractSectorOffset(addr)];
// Get evicted blocks. Blocks are only evicted if the sectors mismatch and
// the currently existing sector is valid.
- if ((tag == victim_sector->getTag()) &&
- (is_secure == victim_sector->isSecure())){
+ if (victim_sector->matchTag(tag, is_secure)) {
// It would be a hit if victim was valid, and upgrades do not call
// findVictim, so it cannot happen
assert(!victim->isValid());
} else {
// The whole sector must be evicted to make room for the new sector
for (const auto& blk : victim_sector->blks){
- evict_blks.push_back(blk);
+ if (blk->isValid()) {
+ evict_blks.push_back(blk);
+ }
}
}
- SectorSubBlk* victim_cast = static_cast<SectorSubBlk*>(victim);
- DPRINTF(CacheRepl, "set %x, way %x, sector offset %x: %s\n",
- "selecting blk for replacement\n", victim->set, victim->way,
- victim_cast->getSectorOffset());
+ // Update number of sub-blocks evicted due to a replacement
+ sectorStats.evictionsReplacement[evict_blks.size()]++;
return victim;
}
-Addr
-SectorTags::extractTag(Addr addr) const
-{
- return addr >> tagShift;
-}
-
-int
-SectorTags::extractSet(Addr addr) const
-{
- return (addr >> setShift) & setMask;
-}
-
int
SectorTags::extractSectorOffset(Addr addr) const
{
SectorTags::regenerateBlkAddr(const CacheBlk* blk) const
{
const SectorSubBlk* blk_cast = static_cast<const SectorSubBlk*>(blk);
- return ((blk_cast->getTag() << tagShift) | ((Addr)blk->set << setShift) |
- ((Addr)blk_cast->getSectorOffset() << sectorShift));
+ const SectorBlk* sec_blk = blk_cast->getSectorBlock();
+ const Addr sec_addr =
+ indexingPolicy->regenerateAddr(blk->getTag(), sec_blk);
+ return sec_addr | ((Addr)blk_cast->getSectorOffset() << sectorShift);
+}
+
+SectorTags::SectorTagsStats::SectorTagsStats(BaseTagStats &base_group,
+ SectorTags& _tags)
+ : Stats::Group(&base_group), tags(_tags),
+ ADD_STAT(evictionsReplacement, UNIT_COUNT,
+ "Number of blocks evicted due to a replacement")
+{
+}
+
+void
+SectorTags::SectorTagsStats::regStats()
+{
+ Stats::Group::regStats();
+
+ evictionsReplacement.init(tags.numBlocksPerSector + 1);
+ for (unsigned i = 0; i <= tags.numBlocksPerSector; ++i) {
+ evictionsReplacement.subname(i, std::to_string(i));
+ evictionsReplacement.subdesc(i, "Number of replacements that caused " \
+ "the eviction of " + std::to_string(i) + " blocks");
+ }
}
void
}
return false;
}
-
-SectorTags *
-SectorTagsParams::create()
-{
- return new SectorTags(this);
-}