Source('base_set_assoc.cc')
Source('fa_lru.cc')
Source('sector_tags.cc')
-Source('skewed_assoc.cc')
from m5.params import *
from m5.proxy import *
from ClockedObject import ClockedObject
+from IndexingPolicies import *
class BaseTags(ClockedObject):
type = 'BaseTags'
sequential_access = Param.Bool(Parent.sequential_access,
"Whether to access tags and data sequentially")
+ # Get indexing policy
+ indexing_policy = Param.BaseIndexingPolicy(SetAssociative(),
+ "Indexing policy")
+
+ # Set the indexing entry size as the block size
+ entry_size = Param.Int(Parent.cache_line_size,
+ "Indexing entry size in bytes")
+
class BaseSetAssoc(BaseTags):
type = 'BaseSetAssoc'
cxx_header = "mem/cache/tags/base_set_assoc.hh"
# Number of sub-sectors (data blocks) per sector
num_blocks_per_sector = Param.Int(1, "Number of sub-sectors per sector");
+ # The indexing entry now is a sector block
+ entry_size = Parent.cache_line_size * Self.num_blocks_per_sector
+
# Get replacement policy from the parent (cache)
replacement_policy = Param.BaseReplacementPolicy(
Parent.replacement_policy, "Replacement policy")
min_tracked_cache_size = Param.MemorySize("128kB", "Minimum cache size for"
" which we track statistics")
-class SkewedAssoc(BaseSetAssoc):
- type = 'SkewedAssoc'
- cxx_header = "mem/cache/tags/skewed_assoc.hh"
+
+ # This tag uses its own embedded indexing
+ indexing_policy = NULL
#include "base/types.hh"
#include "mem/cache/base.hh"
+#include "mem/cache/tags/indexing_policies/base.hh"
#include "mem/request.hh"
#include "sim/core.hh"
#include "sim/sim_exit.hh"
accessLatency(p->sequential_access ?
p->tag_latency + p->data_latency :
std::max(p->tag_latency, p->data_latency)),
- cache(nullptr),
+ cache(nullptr), indexingPolicy(p->indexing_policy),
warmupBound((p->warmup_percentage/100.0) * (p->size / p->block_size)),
warmedUp(false), numBlocks(p->size / p->block_size),
dataBlks(new uint8_t[p->size]) // Allocate data storage in one big chunk
cache = _cache;
}
-std::vector<ReplaceableEntry*>
-BaseTags::getPossibleLocations(const Addr addr) const
+ReplaceableEntry*
+BaseTags::findBlockBySetAndWay(int set, int way) const
{
- panic("Unimplemented getPossibleLocations for tags subclass");
+ return indexingPolicy->getEntry(set, way);
}
CacheBlk*
// Extract block tag
Addr tag = extractTag(addr);
- // Find possible locations for the given address
- const std::vector<ReplaceableEntry*> locations =
- getPossibleLocations(addr);
+ // Find possible entries that may contain the given address
+ const std::vector<ReplaceableEntry*> entries =
+ indexingPolicy->getPossibleEntries(addr);
// Search for block
- for (const auto& location : locations) {
+ for (const auto& location : entries) {
CacheBlk* blk = static_cast<CacheBlk*>(location);
if ((blk->tag == tag) && blk->isValid() &&
(blk->isSecure() == is_secure)) {
dataAccesses += 1;
}
+Addr
+BaseTags::extractTag(const Addr addr) const
+{
+ return indexingPolicy->extractTag(addr);
+}
+
void
BaseTags::cleanupRefsVisitor(CacheBlk &blk)
{
#include "sim/clocked_object.hh"
class BaseCache;
+class IndexingPolicy;
class ReplaceableEntry;
/**
/** Pointer to the parent cache. */
BaseCache *cache;
+ /** Indexing policy */
+ BaseIndexingPolicy *indexingPolicy;
+
/**
* The number of tags that need to be touched to meet the warmup
* percentage.
*/
void setCache(BaseCache *_cache);
- /**
- * Find all possible block locations for insertion and replacement of
- * an address. Should be called immediately before ReplacementPolicy's
- * findVictim() not to break cache resizing.
- * Returns blocks in all ways belonging to the set of the address.
- *
- * @param addr The addr to a find possible locations for.
- * @return The possible locations.
- */
- virtual std::vector<ReplaceableEntry*> getPossibleLocations(
- const Addr addr) const;
-
public:
typedef BaseTagsParams Params;
BaseTags(const Params *p);
* @param way The way of the block.
* @return The block.
*/
- virtual ReplaceableEntry* findBlockBySetAndWay(int set, int way) const = 0;
+ virtual ReplaceableEntry* findBlockBySetAndWay(int set, int way) const;
/**
* Align an address to the block size.
virtual CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat) = 0;
- virtual Addr extractTag(Addr addr) const = 0;
+ /**
+ * Generate the tag from the given address.
+ *
+ * @param addr The address to get the tag from.
+ * @return The tag of the address.
+ */
+ virtual Addr extractTag(const Addr addr) const;
/**
* Insert the new block into the cache and update stats.
/**
* @file
- * Definitions of a base set associative tag store.
+ * Definitions of a conventional tag store.
*/
#include "mem/cache/tags/base_set_assoc.hh"
#include "base/intmath.hh"
BaseSetAssoc::BaseSetAssoc(const Params *p)
- :BaseTags(p), assoc(p->assoc), allocAssoc(p->assoc),
- blks(p->size / p->block_size),
- numSets(p->size / (p->block_size * p->assoc)),
+ :BaseTags(p), allocAssoc(p->assoc), blks(p->size / p->block_size),
sequentialAccess(p->sequential_access),
- sets(p->size / (p->block_size * p->assoc)),
replacementPolicy(p->replacement_policy)
{
// Check parameters
if (blkSize < 4 || !isPowerOf2(blkSize)) {
fatal("Block size must be at least 4 and a power of 2");
}
- if (!isPowerOf2(numSets)) {
- fatal("# of sets must be non-zero and a power of 2");
- }
- if (assoc <= 0) {
- fatal("associativity must be greater than zero");
- }
-
- setShift = floorLog2(blkSize);
- setMask = numSets - 1;
- tagShift = setShift + floorLog2(numSets);
}
void
// Set parent cache
setCache(cache);
- // Initialize blocks
- unsigned blkIndex = 0; // index into blks array
- for (unsigned i = 0; i < numSets; ++i) {
- sets[i].resize(assoc);
-
- // link in the data blocks
- for (unsigned j = 0; j < assoc; ++j) {
- // Select block within the set to be linked
- BlkType*& blk = sets[i][j];
-
- // Locate next cache block
- blk = &blks[blkIndex];
-
- // Associate a data chunk to the block
- blk->data = &dataBlks[blkSize*blkIndex];
+ // Initialize all blocks
+ for (unsigned blk_index = 0; blk_index < numBlocks; blk_index++) {
+ // Locate next cache block
+ BlkType* blk = &blks[blk_index];
- // Associate a replacement data entry to the block
- blk->replacementData = replacementPolicy->instantiateEntry();
+ // Link block to indexing policy
+ indexingPolicy->setEntry(blk, blk_index);
- // Setting the tag to j is just to prevent long chains in the
- // hash table; won't matter because the block is invalid
- blk->tag = j;
+ // Associate a data chunk to the block
+ blk->data = &dataBlks[blkSize*blk_index];
- // Set its index
- blk->setPosition(i, j);
-
- // Update block index
- ++blkIndex;
- }
+ // Associate a replacement data entry to the block
+ blk->replacementData = replacementPolicy->instantiateEntry();
}
}
replacementPolicy->invalidate(blk->replacementData);
}
-ReplaceableEntry*
-BaseSetAssoc::findBlockBySetAndWay(int set, int way) const
-{
- return sets[set][way];
-}
-
BaseSetAssoc *
BaseSetAssocParams::create()
{
+ // There must be a indexing policy
+ fatal_if(!indexing_policy, "An indexing policy is required");
+
return new BaseSetAssoc(this);
}
#include "mem/cache/blk.hh"
#include "mem/cache/replacement_policies/base.hh"
#include "mem/cache/tags/base.hh"
+#include "mem/cache/tags/indexing_policies/base.hh"
#include "params/BaseSetAssoc.hh"
/**
- * A BaseSetAssoc cache tag store.
+ * A basic cache tag store.
* @sa \ref gem5MemorySystem "gem5 Memory System"
*
* The BaseSetAssoc placement policy divides the cache into s sets of w
- * cache lines (ways). A cache line is mapped onto a set, and can be placed
- * into any of the ways of this set.
+ * cache lines (ways).
*/
class BaseSetAssoc : public BaseTags
{
public:
/** Typedef the block type used in this tag store. */
typedef CacheBlk BlkType;
- /** Typedef the set type used in this tag store. */
- typedef std::vector<CacheBlk*> SetType;
protected:
- /** The associativity of the cache. */
- const unsigned assoc;
/** The allocatable associativity of the cache (alloc mask). */
unsigned allocAssoc;
/** The cache blocks. */
std::vector<BlkType> blks;
- /** The number of sets in the cache. */
- const unsigned numSets;
-
/** Whether tags and data are accessed sequentially. */
const bool sequentialAccess;
- /** The cache sets. */
- std::vector<SetType> sets;
-
- /** The amount to shift the address to get the set. */
- int setShift;
- /** The amount to shift the address to get the tag. */
- int tagShift;
- /** Mask out all bits that aren't part of the set index. */
- unsigned setMask;
-
/** Replacement policy */
BaseReplacementPolicy *replacementPolicy;
- /**
- * Find all possible block locations for insertion and replacement of
- * an address. Should be called immediately before ReplacementPolicy's
- * findVictim() not to break cache resizing.
- * Returns blocks in all ways belonging to the set of the address.
- *
- * @param addr The addr to a find possible locations for.
- * @return The possible locations.
- */
- std::vector<ReplaceableEntry*> getPossibleLocations(const Addr addr) const
- override
- {
- std::vector<ReplaceableEntry*> locations;
- for (const auto& blk : sets[extractSet(addr)]) {
- locations.push_back(static_cast<ReplaceableEntry*>(blk));
- }
- return locations;
- }
-
public:
/** Convenience typedef. */
typedef BaseSetAssocParams Params;
return blk;
}
- /**
- * Find a block given set and way.
- *
- * @param set The set of the block.
- * @param way The way of the block.
- * @return The block.
- */
- ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
-
/**
* Find replacement victim based on address. The list of evicted blocks
* only contains the victim.
CacheBlk* findVictim(Addr addr, const bool is_secure,
std::vector<CacheBlk*>& evict_blks) const override
{
- // Get possible locations for the victim block
- std::vector<ReplaceableEntry*> locations = getPossibleLocations(addr);
+ // Get possible entries to be victimized
+ const std::vector<ReplaceableEntry*> entries =
+ indexingPolicy->getPossibleEntries(addr);
// Choose replacement victim from replacement candidates
CacheBlk* victim = static_cast<CacheBlk*>(replacementPolicy->getVictim(
- std::vector<ReplaceableEntry*>(
- locations.begin(), locations.end())));
+ entries));
// There is only one eviction for this replacement
evict_blks.push_back(victim);
}
/**
- * Generate the tag from the given address.
- * @param addr The address to get the tag from.
- * @return The tag of the address.
- */
- Addr extractTag(Addr addr) const override
- {
- return (addr >> tagShift);
- }
-
- /**
- * Regenerate the block address from the tag and set.
+ * Regenerate the block address from the tag and indexing location.
*
* @param block The block.
* @return the block address.
*/
Addr regenerateBlkAddr(const CacheBlk* blk) const override
{
- const Addr set = blk->getSet() << setShift;
- return ((blk->tag << tagShift) | set);
+ return indexingPolicy->regenerateAddr(blk->tag, blk);
}
void forEachBlk(std::function<void(CacheBlk &)> visitor) override {
}
return false;
}
-
- private:
- /**
- * Calculate the set index from the address.
- *
- * @param addr The address to get the set from.
- * @return The set index of the address.
- */
- int extractSet(Addr addr) const
- {
- return ((addr >> setShift) & setMask);
- }
};
#endif //__MEM_CACHE_TAGS_BASE_SET_ASSOC_HH__
--- /dev/null
+# Copyright (c) 2018 Inria
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Daniel Carvalho
+
+from m5.params import *
+from m5.proxy import *
+from m5.SimObject import SimObject
+
+class BaseIndexingPolicy(SimObject):
+ type = 'BaseIndexingPolicy'
+ abstract = True
+ cxx_header = "mem/cache/tags/indexing_policies/base.hh"
+
+ # Get the size from the parent (cache)
+ size = Param.MemorySize(Parent.size, "capacity in bytes")
+
+ # Get the entry size from the parent (tags)
+ entry_size = Param.Int(Parent.entry_size, "entry size in bytes")
+
+ # Get the associativity
+ assoc = Param.Int(Parent.assoc, "associativity")
+
+class SetAssociative(BaseIndexingPolicy):
+ type = 'SetAssociative'
+ cxx_class = 'SetAssociative'
+ cxx_header = "mem/cache/tags/indexing_policies/set_associative.hh"
+
+class SkewedAssociative(BaseIndexingPolicy):
+ type = 'SkewedAssociative'
+ cxx_class = 'SkewedAssociative'
+ cxx_header = "mem/cache/tags/indexing_policies/skewed_associative.hh"
--- /dev/null
+# -*- mode:python -*-
+
+# Copyright (c) 2018 Inria.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Daniel Carvalho
+
+Import('*')
+
+SimObject('IndexingPolicies.py')
+
+Source('base.cc')
+Source('set_associative.cc')
+Source('skewed_associative.cc')
--- /dev/null
+/*
+ * Copyright (c) 2018 Inria
+ * Copyright (c) 2012-2014,2017 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Daniel Carvalho
+ * Erik Hallnor
+ */
+
+/**
+ * @file
+ * Definitions of a common framework for indexing policies.
+ */
+
+#include "mem/cache/tags/indexing_policies/base.hh"
+
+#include <cstdlib>
+
+#include "base/intmath.hh"
+#include "base/logging.hh"
+#include "mem/cache/replacement_policies/base.hh"
+
+BaseIndexingPolicy::BaseIndexingPolicy(const Params *p)
+ : SimObject(p), assoc(p->assoc),
+ numSets(p->size / (p->entry_size * assoc)),
+ setShift(floorLog2(p->entry_size)), setMask(numSets - 1), sets(numSets),
+ tagShift(setShift + floorLog2(numSets))
+{
+ fatal_if(!isPowerOf2(numSets), "# of sets must be non-zero and a power " \
+ "of 2");
+ fatal_if(assoc <= 0, "associativity must be greater than zero");
+
+ // Make space for the entries
+ for (uint32_t i = 0; i < numSets; ++i) {
+ sets[i].resize(assoc);
+ }
+}
+
+ReplaceableEntry*
+BaseIndexingPolicy::getEntry(const uint32_t set, const uint32_t way) const
+{
+ return sets[set][way];
+}
+
+void
+BaseIndexingPolicy::setEntry(ReplaceableEntry* entry, const uint64_t index)
+{
+ // Calculate set and way from entry index
+ const std::lldiv_t div_result = std::div((long long)index, assoc);
+ const uint32_t set = div_result.quot;
+ const uint32_t way = div_result.rem;
+
+ // Sanity check
+ assert(set < numSets);
+
+ // Assign a free pointer
+ sets[set][way] = entry;
+
+ // Inform the entry its position
+ entry->setPosition(set, way);
+}
+
+Addr
+BaseIndexingPolicy::extractTag(const Addr addr) const
+{
+ return (addr >> tagShift);
+}
--- /dev/null
+/*
+ * Copyright (c) 2018 Inria
+ * Copyright (c) 2012-2014,2017 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Daniel Carvalho
+ * Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declaration of a common framework for indexing policies.
+ */
+
+#ifndef __MEM_CACHE_INDEXING_POLICIES_BASE_HH__
+#define __MEM_CACHE_INDEXING_POLICIES_BASE_HH__
+
+#include <vector>
+
+#include "params/BaseIndexingPolicy.hh"
+#include "sim/sim_object.hh"
+
+class ReplaceableEntry;
+
+/**
+ * A common base class for indexing table locations. Classes that inherit
+ * from it determine hash functions that should be applied based on the set
+ * and way. These functions are then applied to re-map the original values.
+ * @sa \ref gem5MemorySystem "gem5 Memory System"
+ */
+class BaseIndexingPolicy : public SimObject
+{
+ protected:
+ /**
+ * The associativity.
+ */
+ const unsigned assoc;
+
+ /**
+ * The number of sets in the cache.
+ */
+ const uint32_t numSets;
+
+ /**
+ * The amount to shift the address to get the set.
+ */
+ const int setShift;
+
+ /**
+ * Mask out all bits that aren't part of the set index.
+ */
+ const unsigned setMask;
+
+ /**
+ * The cache sets.
+ */
+ std::vector<std::vector<ReplaceableEntry*>> sets;
+
+ /**
+ * The amount to shift the address to get the tag.
+ */
+ const int tagShift;
+
+ public:
+ /**
+ * Convenience typedef.
+ */
+ typedef BaseIndexingPolicyParams Params;
+
+ /**
+ * Construct and initialize this policy.
+ */
+ BaseIndexingPolicy(const Params *p);
+
+ /**
+ * Destructor.
+ */
+ ~BaseIndexingPolicy() {};
+
+ /**
+ * Associate a pointer to an entry to its physical counterpart.
+ *
+ * @param entry The entry pointer.
+ * @param index An unique index for the entry.
+ */
+ void setEntry(ReplaceableEntry* entry, const uint64_t index);
+
+ /**
+ * Get an entry based on its set and way. All entries must have been set
+ * already before calling this function.
+ *
+ * @param set The set of the desired entry.
+ * @param way The way of the desired entry.
+ * @return entry The entry pointer.
+ */
+ ReplaceableEntry* getEntry(const uint32_t set, const uint32_t way) const;
+
+ /**
+ * Generate the tag from the given address.
+ *
+ * @param addr The address to get the tag from.
+ * @return The tag of the address.
+ */
+ Addr extractTag(const Addr addr) const;
+
+ /**
+ * Find all possible entries for insertion and replacement of an address.
+ * Should be called immediately before ReplacementPolicy's findVictim()
+ * not to break cache resizing.
+ *
+ * @param addr The addr to a find possible entries for.
+ * @return The possible entries.
+ */
+ virtual std::vector<ReplaceableEntry*> getPossibleEntries(const Addr addr)
+ const = 0;
+
+ /**
+ * Regenerate an entry's address from its tag and assigned indexing bits.
+ *
+ * @param tag The tag bits.
+ * @param entry The entry.
+ * @return the entry's original address.
+ */
+ virtual Addr regenerateAddr(const Addr tag, const ReplaceableEntry* entry)
+ const = 0;
+};
+
+#endif //__MEM_CACHE_INDEXING_POLICIES_BASE_HH__
--- /dev/null
+/*
+ * Copyright (c) 2018 Inria
+ * Copyright (c) 2012-2014,2017 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Daniel Carvalho
+ * Erik Hallnor
+ */
+
+/**
+ * @file
+ * Definitions of a set associative indexing policy.
+ */
+
+#include "mem/cache/tags/indexing_policies/set_associative.hh"
+
+#include "mem/cache/replacement_policies/base.hh"
+
+SetAssociative::SetAssociative(const Params *p)
+ : BaseIndexingPolicy(p)
+{
+}
+
+uint32_t
+SetAssociative::extractSet(const Addr addr) const
+{
+ return (addr >> setShift) & setMask;
+}
+
+Addr
+SetAssociative::regenerateAddr(const Addr tag, const ReplaceableEntry* entry)
+ const
+{
+ return (tag << tagShift) | (entry->getSet() << setShift);
+}
+
+std::vector<ReplaceableEntry*>
+SetAssociative::getPossibleEntries(const Addr addr) const
+{
+ return sets[extractSet(addr)];
+}
+
+SetAssociative*
+SetAssociativeParams::create()
+{
+ return new SetAssociative(this);
+}
--- /dev/null
+/*
+ * Copyright (c) 2018 Inria
+ * Copyright (c) 2012-2014,2017 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2003-2005,2014 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Daniel Carvalho
+ * Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declaration of a set associative indexing policy.
+ */
+
+#ifndef __MEM_CACHE_INDEXING_POLICIES_SET_ASSOCIATIVE_HH__
+#define __MEM_CACHE_INDEXING_POLICIES_SET_ASSOCIATIVE_HH__
+
+#include <vector>
+
+#include "mem/cache/tags/indexing_policies/base.hh"
+#include "params/SetAssociative.hh"
+
+class ReplaceableEntry;
+
+/**
+ * A set associative indexing policy.
+ * @sa \ref gem5MemorySystem "gem5 Memory System"
+ *
+ * The set associative indexing policy has an immutable/identity mapping, so a
+ * value x is always mapped to set x, independent of the way, that is,
+ * Hash(A, 0) = Hash(A, 1) = Hash(A, N-1), where N is the number of ways.
+ *
+ * For example, let's assume address A maps to set 3 on way 0. This policy
+ * makes so that A is also mappable to set 3 on every other way. Visually, the
+ * possible locations of A are, for a table with 4 ways and 8 sets:
+ * Way 0 1 2 3
+ * Set _ _ _ _
+ * 0 |_| |_| |_| |_|
+ * 1 |_| |_| |_| |_|
+ * 2 |_| |_| |_| |_|
+ * 3 |X| |X| |X| |X|
+ * 4 |_| |_| |_| |_|
+ * 5 |_| |_| |_| |_|
+ * 6 |_| |_| |_| |_|
+ * 7 |_| |_| |_| |_|
+ */
+class SetAssociative : public BaseIndexingPolicy
+{
+ private:
+ /**
+ * Apply a hash function to calculate address set.
+ *
+ * @param addr The address to calculate the set for.
+ * @return The set index for given combination of address and way.
+ */
+ uint32_t extractSet(const Addr addr) const;
+
+ public:
+ /**
+ * Convenience typedef.
+ */
+ typedef SetAssociativeParams Params;
+
+ /**
+ * Construct and initialize this policy.
+ */
+ SetAssociative(const Params *p);
+
+ /**
+ * Destructor.
+ */
+ ~SetAssociative() {};
+
+ /**
+ * Find all possible entries for insertion and replacement of an address.
+ * Should be called immediately before ReplacementPolicy's findVictim()
+ * not to break cache resizing.
+ * Returns entries in all ways belonging to the set of the address.
+ *
+ * @param addr The addr to a find possible entries for.
+ * @return The possible entries.
+ */
+ std::vector<ReplaceableEntry*> getPossibleEntries(const Addr addr) const
+ override;
+
+ /**
+ * Regenerate an entry's address from its tag and assigned set and way.
+ *
+ * @param tag The tag bits.
+ * @param entry The entry.
+ * @return the entry's original addr value.
+ */
+ Addr regenerateAddr(const Addr tag, const ReplaceableEntry* entry) const
+ override;
+};
+
+#endif //__MEM_CACHE_INDEXING_POLICIES_SET_ASSOCIATIVE_HH__
--- /dev/null
+/*
+ * Copyright (c) 2018 Inria
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Daniel Carvalho
+ */
+
+/**
+ * @file
+ * Definitions of a skewed associative indexing policy.
+ */
+
+#include "mem/cache/tags/indexing_policies/skewed_associative.hh"
+
+#include "base/bitfield.hh"
+#include "base/intmath.hh"
+#include "base/logging.hh"
+#include "mem/cache/replacement_policies/base.hh"
+
+SkewedAssociative::SkewedAssociative(const Params *p)
+ : BaseIndexingPolicy(p), msbShift(floorLog2(numSets) - 1)
+{
+ if (assoc > NUM_SKEWING_FUNCTIONS) {
+ warn_once("Associativity higher than number of skewing functions. " \
+ "Expect sub-optimal skewing.\n");
+ }
+
+ // Check if set is too big to do skewing. If using big sets, rewrite
+ // skewing functions accordingly to make good use of the hashing function
+ panic_if(setShift + 2 * (msbShift + 1) > 64, "Unsuported number of bits " \
+ "for the skewing functions.");
+
+ // We must have more than two sets, otherwise the MSB and LSB are the same
+ // bit, and the xor of them will always be 0
+ fatal_if(numSets <= 2, "The number of sets must be greater than 2");
+}
+
+Addr
+SkewedAssociative::hash(const Addr addr) const
+{
+ // Get relevant bits
+ const uint8_t lsb = bits<Addr>(addr, 0);
+ const uint8_t msb = bits<Addr>(addr, msbShift);
+ const uint8_t xor_bit = msb ^ lsb;
+
+ // Shift-off LSB and set new MSB as xor of old LSB and MSB
+ return insertBits<Addr, uint8_t>(addr >> 1, msbShift, xor_bit);
+}
+
+Addr
+SkewedAssociative::dehash(const Addr addr) const
+{
+ // Get relevant bits. The original MSB is one bit away on the current MSB
+ // (which is the XOR bit). The original LSB can be retrieved from inverting
+ // the xor that generated the XOR bit.
+ const uint8_t msb = bits<Addr>(addr, msbShift - 1);
+ const uint8_t xor_bit = bits<Addr>(addr, msbShift);
+ const uint8_t lsb = msb ^ xor_bit;
+
+ // Remove current MSB (XOR bit), shift left and add LSB back
+ const Addr addr_no_msb = mbits<Addr>(addr, msbShift - 1, 0);
+ return insertBits<Addr, uint8_t>(addr_no_msb << 1, 0, lsb);
+}
+
+Addr
+SkewedAssociative::skew(const Addr addr, const uint32_t way) const
+{
+ // Assume an address of size A bits can be decomposed into
+ // {addr3, addr2, addr1, addr0}, where:
+ // addr0 (M bits) = Block offset;
+ // addr1 (N bits) = Set bits in conventional cache;
+ // addr3 (A - M - 2*N bits), addr2 (N bits) = Tag bits.
+ // We use addr1 and addr2, as proposed in the original paper
+ Addr addr1 = bits<Addr>(addr, msbShift, 0);
+ const Addr addr2 = bits<Addr>(addr, 2 * (msbShift + 1) - 1, msbShift + 1);
+
+ // Select and apply skewing function for given way
+ switch (way % NUM_SKEWING_FUNCTIONS) {
+ case 0:
+ addr1 = hash(addr1) ^ hash(addr2) ^ addr2;
+ break;
+ case 1:
+ addr1 = hash(addr1) ^ hash(addr2) ^ addr1;
+ break;
+ case 2:
+ addr1 = hash(addr1) ^ dehash(addr2) ^ addr2;
+ break;
+ case 3:
+ addr1 = hash(addr1) ^ dehash(addr2) ^ addr1;
+ break;
+ case 4:
+ addr1 = dehash(addr1) ^ hash(addr2) ^ addr2;
+ break;
+ case 5:
+ addr1 = dehash(addr1) ^ hash(addr2) ^ addr1;
+ break;
+ case 6:
+ addr1 = dehash(addr1) ^ dehash(addr2) ^ addr2;
+ break;
+ case 7:
+ addr1 = dehash(addr1) ^ dehash(addr2) ^ addr1;
+ break;
+ default:
+ panic("A skewing function has not been implemented for this way.");
+ }
+
+ // If we have more than 8 ways, just pile them up on hashes. This is not
+ // the optimal solution, and can be improved by adding more skewing
+ // functions to the previous selector
+ for (uint32_t i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
+ addr1 = hash(addr1);
+ }
+
+ return addr1;
+}
+
+Addr
+SkewedAssociative::deskew(const Addr addr, const uint32_t way) const
+{
+ // Get relevant bits of the addr
+ Addr addr1 = bits<Addr>(addr, msbShift, 0);
+ const Addr addr2 = bits<Addr>(addr, 2 * (msbShift + 1) - 1, msbShift + 1);
+
+ // If we have more than NUM_SKEWING_FUNCTIONS ways, unpile the hashes
+ if (way >= NUM_SKEWING_FUNCTIONS) {
+ for (uint32_t i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
+ addr1 = dehash(addr1);
+ }
+ }
+
+ // Select and apply skewing function for given way
+ switch (way % 8) {
+ case 0:
+ return dehash(addr1 ^ hash(addr2) ^ addr2);
+ case 1:
+ addr1 = addr1 ^ hash(addr2);
+ for (int i = 0; i < msbShift; i++) {
+ addr1 = hash(addr1);
+ }
+ return addr1;
+ case 2:
+ return dehash(addr1 ^ dehash(addr2) ^ addr2);
+ case 3:
+ addr1 = addr1 ^ dehash(addr2);
+ for (int i = 0; i < msbShift; i++) {
+ addr1 = hash(addr1);
+ }
+ return addr1;
+ case 4:
+ return hash(addr1 ^ hash(addr2) ^ addr2);
+ case 5:
+ addr1 = addr1 ^ hash(addr2);
+ for (int i = 0; i <= msbShift; i++) {
+ addr1 = hash(addr1);
+ }
+ return addr1;
+ case 6:
+ return hash(addr1 ^ dehash(addr2) ^ addr2);
+ case 7:
+ addr1 = addr1 ^ dehash(addr2);
+ for (int i = 0; i <= msbShift; i++) {
+ addr1 = hash(addr1);
+ }
+ return addr1;
+ default:
+ panic("A skewing function has not been implemented for this way.");
+ }
+}
+
+uint32_t
+SkewedAssociative::extractSet(const Addr addr, const uint32_t way) const
+{
+ return skew(addr >> setShift, way) & setMask;
+}
+
+Addr
+SkewedAssociative::regenerateAddr(const Addr tag,
+ const ReplaceableEntry* entry) const
+{
+ const Addr addr_set = (tag << (msbShift + 1)) | entry->getSet();
+ return (tag << tagShift) |
+ ((deskew(addr_set, entry->getWay()) & setMask) << setShift);
+}
+
+std::vector<ReplaceableEntry*>
+SkewedAssociative::getPossibleEntries(const Addr addr) const
+{
+ std::vector<ReplaceableEntry*> entries;
+
+ // Parse all ways
+ for (uint32_t way = 0; way < assoc; ++way) {
+ // Apply hash to get set, and get way entry in it
+ entries.push_back(sets[extractSet(addr, way)][way]);
+ }
+
+ return entries;
+}
+
+SkewedAssociative *
+SkewedAssociativeParams::create()
+{
+ return new SkewedAssociative(this);
+}
--- /dev/null
+/*
+ * Copyright (c) 2018 Inria
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Daniel Carvalho
+ */
+
+/**
+ * @file
+ * Declaration of a skewed associative indexing policy.
+ */
+
+#ifndef __MEM_CACHE_INDEXING_POLICIES_SKEWED_ASSOCIATIVE_HH__
+#define __MEM_CACHE_INDEXING_POLICIES_SKEWED_ASSOCIATIVE_HH__
+
+#include <vector>
+
+#include "mem/cache/tags/indexing_policies/base.hh"
+#include "params/SkewedAssociative.hh"
+
+class ReplaceableEntry;
+
+/**
+ * A skewed associative indexing policy.
+ * @sa \ref gem5MemorySystem "gem5 Memory System"
+ *
+ * The skewed indexing policy has a variable mapping based on a hash function,
+ * so a value x can be mapped to different sets, based on the way being used.
+ *
+ * For example, let's assume address A maps to set 3 on way 0. It will likely
+ * have a different set for every other way. Visually, the possible locations
+ * of A are, for a table with 4 ways and 8 sets (arbitrarily chosen sets; these
+ * locations depend on A and the hashing function used):
+ * Way 0 1 2 3
+ * Set _ _ _ _
+ * 0 |_| |_| |X| |_|
+ * 1 |_| |_| |_| |X|
+ * 2 |_| |_| |_| |_|
+ * 3 |X| |_| |_| |_|
+ * 4 |_| |_| |_| |_|
+ * 5 |_| |X| |_| |_|
+ * 6 |_| |_| |_| |_|
+ * 7 |_| |_| |_| |_|
+ *
+ * If provided with an associativity higher than the number of skewing
+ * functions, the skewing functions of the extra ways might be sub-optimal.
+ */
+class SkewedAssociative : public BaseIndexingPolicy
+{
+ private:
+ /**
+ * The number of skewing functions implemented. Should be updated if more
+ * functions are added. If more than this number of skewing functions are
+ * needed (i.e., assoc > this value), we programatically generate new ones,
+ * which may be sub-optimal.
+ */
+ const int NUM_SKEWING_FUNCTIONS = 8;
+
+ /**
+ * The amount to shift a set index to get its MSB.
+ */
+ const int msbShift;
+
+ /**
+ * The hash function itself. Uses the hash function H, as described in
+ * "Skewed-Associative Caches", from Seznec et al. (section 3.3): It
+ * applies an XOR to the MSB and LSB, shifts all bits one bit to the right,
+ * and set the result of the XOR as the new MSB.
+ *
+ * This function is not bijective if the address has only 1 bit, as the MSB
+ * and LSB will be the same, and therefore the xor will always be 0.
+ *
+ * @param addr The address to be hashed.
+ * @param The hashed address.
+ */
+ Addr hash(const Addr addr) const;
+
+ /**
+ * Inverse of the hash function.
+ * @sa hash().
+ *
+ * @param addr The address to be dehashed.
+ * @param The dehashed address.
+ */
+ Addr dehash(const Addr addr) const;
+
+ /**
+ * Address skewing function selection. It selects and applies one of the
+ * skewing functions functions based on the way provided.
+ *
+ * @param addr Address to be skewed. Should contain the set and tag bits.
+ * @param way The cache way, used to select a hash function.
+ * @return The skewed address.
+ */
+ Addr skew(const Addr addr, const uint32_t way) const;
+
+ /**
+ * Address deskewing function (inverse of the skew function) of the given
+ * way.
+ * @sa skew()
+ *
+ * @param addr Address to be deskewed. Should contain the set and tag bits.
+ * @param way The cache way, used to select a hash function.
+ * @return The deskewed address.
+ */
+ Addr deskew(const Addr addr, const uint32_t way) const;
+
+ /**
+ * Apply a skewing function to calculate address' set given a way.
+ *
+ * @param addr The address to calculate the set for.
+ * @param way The way to get the set from.
+ * @return The set index for given combination of address and way.
+ */
+ uint32_t extractSet(const Addr addr, const uint32_t way) const;
+
+ public:
+ /** Convenience typedef. */
+ typedef SkewedAssociativeParams Params;
+
+ /**
+ * Construct and initialize this policy.
+ */
+ SkewedAssociative(const Params *p);
+
+ /**
+ * Destructor.
+ */
+ ~SkewedAssociative() {};
+
+ /**
+ * Find all possible entries for insertion and replacement of an address.
+ * Should be called immediately before ReplacementPolicy's findVictim()
+ * not to break cache resizing.
+ *
+ * @param addr The addr to a find possible entries for.
+ * @return The possible entries.
+ */
+ std::vector<ReplaceableEntry*> getPossibleEntries(const Addr addr) const
+ override;
+
+ /**
+ * Regenerate an entry's address from its tag and assigned set and way.
+ * Uses the inverse of the skewing function.
+ *
+ * @param tag The tag bits.
+ * @param entry The entry.
+ * @return the entry's address.
+ */
+ Addr regenerateAddr(const Addr tag, const ReplaceableEntry* entry) const
+ override;
+};
+
+#endif //__MEM_CACHE_INDEXING_POLICIES_SKEWED_ASSOCIATIVE_HH__
/**
* @file
- * Definitions of a base set associative sector tag store.
+ * Definitions of a sector tag store.
*/
#include "mem/cache/tags/sector_tags.hh"
#include "debug/CacheRepl.hh"
#include "mem/cache/base.hh"
#include "mem/cache/replacement_policies/base.hh"
+#include "mem/cache/tags/indexing_policies/base.hh"
SectorTags::SectorTags(const SectorTagsParams *p)
- : BaseTags(p), assoc(p->assoc), allocAssoc(p->assoc),
+ : BaseTags(p), allocAssoc(p->assoc),
sequentialAccess(p->sequential_access),
replacementPolicy(p->replacement_policy),
numBlocksPerSector(p->num_blocks_per_sector),
- numSectors(numBlocks / p->num_blocks_per_sector),
- numSets(numSectors / p->assoc),
- blks(numBlocks), secBlks(numSectors), sets(numSets),
- sectorShift(floorLog2(blkSize)),
- setShift(sectorShift + floorLog2(numBlocksPerSector)),
- tagShift(setShift + floorLog2(numSets)),
- sectorMask(numBlocksPerSector - 1), setMask(numSets - 1)
+ numSectors(numBlocks / p->num_blocks_per_sector), blks(numBlocks),
+ secBlks(numSectors), sectorShift(floorLog2(blkSize)),
+ sectorMask(numBlocksPerSector - 1)
{
// Check parameters
fatal_if(blkSize < 4 || !isPowerOf2(blkSize),
"Block size must be at least 4 and a power of 2");
- fatal_if(!isPowerOf2(numSets),
- "# of sets must be non-zero and a power of 2");
fatal_if(!isPowerOf2(numBlocksPerSector),
"# of blocks per sector must be non-zero and a power of 2");
- fatal_if(assoc <= 0, "associativity must be greater than zero");
}
void
// Set parent cache
setCache(cache);
- // Initialize all sets
- unsigned sec_blk_index = 0; // index into sector blks array
+ // Initialize all blocks
unsigned blk_index = 0; // index into blks array
- for (unsigned i = 0; i < numSets; ++i) {
- sets[i].resize(assoc);
+ for (unsigned sec_blk_index = 0; sec_blk_index < numSectors;
+ sec_blk_index++)
+ {
+ // Locate next cache sector
+ SectorBlk* sec_blk = &secBlks[sec_blk_index];
- // Initialize all sectors in this set
- for (unsigned j = 0; j < assoc; ++j) {
- // Select block within the set to be linked
- SectorBlk*& sec_blk = sets[i][j];
-
- // Locate next cache sector
- sec_blk = &secBlks[sec_blk_index];
-
- // Associate a replacement data entry to the sector
- sec_blk->replacementData = replacementPolicy->instantiateEntry();
+ // Link block to indexing policy
+ indexingPolicy->setEntry(sec_blk, sec_blk_index);
- // Set its index
- sec_blk->setPosition(i, j);
+ // Associate a replacement data entry to the sector
+ sec_blk->replacementData = replacementPolicy->instantiateEntry();
- // Initialize all blocks in this sector
- sec_blk->blks.resize(numBlocksPerSector);
- for (unsigned k = 0; k < numBlocksPerSector; ++k){
- // Select block within the set to be linked
- SectorSubBlk*& blk = sec_blk->blks[k];
-
- // Locate next cache block
- blk = &blks[blk_index];
+ // Initialize all blocks in this sector
+ sec_blk->blks.resize(numBlocksPerSector);
+ for (unsigned k = 0; k < numBlocksPerSector; ++k){
+ // Select block within the set to be linked
+ SectorSubBlk*& blk = sec_blk->blks[k];
- // Associate a data chunk to the block
- blk->data = &dataBlks[blkSize*blk_index];
+ // Locate next cache block
+ blk = &blks[blk_index];
- // Associate sector block to this block
- blk->setSectorBlock(sec_blk);
+ // Associate a data chunk to the block
+ blk->data = &dataBlks[blkSize*blk_index];
- // Associate the sector replacement data to this block
- blk->replacementData = sec_blk->replacementData;
+ // Associate sector block to this block
+ blk->setSectorBlock(sec_blk);
- // Set its index and sector offset
- blk->setPosition(i, j);
- blk->setSectorOffset(k);
+ // Associate the sector replacement data to this block
+ blk->replacementData = sec_blk->replacementData;
- // Update block index
- ++blk_index;
- }
+ // Set its index and sector offset
+ blk->setSectorOffset(k);
- // Update sector block index
- ++sec_blk_index;
+ // Update block index
+ ++blk_index;
}
}
}
return blk;
}
-std::vector<ReplaceableEntry*>
-SectorTags::getPossibleLocations(const Addr addr) const
-{
- std::vector<ReplaceableEntry*> locations;
- for (const auto& blk : sets[extractSet(addr)]) {
- locations.push_back(static_cast<ReplaceableEntry*>(blk));
- }
- return locations;
-}
-
void
SectorTags::insertBlock(const Addr addr, const bool is_secure,
const int src_master_ID, const uint32_t task_ID,
// due to sectors being composed of contiguous-address entries
const Addr offset = extractSectorOffset(addr);
- // Find all possible sector locations for the given address
- const std::vector<ReplaceableEntry*> locations =
- getPossibleLocations(addr);
+ // Find all possible sector entries that may contain the given address
+ const std::vector<ReplaceableEntry*> entries =
+ indexingPolicy->getPossibleEntries(addr);
// Search for block
- for (const auto& sector : locations) {
+ for (const auto& sector : entries) {
auto blk = static_cast<SectorBlk*>(sector)->blks[offset];
if (blk->getTag() == tag && blk->isValid() &&
blk->isSecure() == is_secure) {
return nullptr;
}
-ReplaceableEntry*
-SectorTags::findBlockBySetAndWay(int set, int way) const
-{
- return sets[set][way];
-}
-
CacheBlk*
SectorTags::findVictim(Addr addr, const bool is_secure,
std::vector<CacheBlk*>& evict_blks) const
{
- // Get all possible locations of this sector
- const std::vector<ReplaceableEntry*> sector_locations =
- getPossibleLocations(addr);
+ // Get possible entries to be victimized
+ const std::vector<ReplaceableEntry*> sector_entries =
+ indexingPolicy->getPossibleEntries(addr);
// Check if the sector this address belongs to has been allocated
Addr tag = extractTag(addr);
SectorBlk* victim_sector = nullptr;
- for (const auto& sector : sector_locations) {
+ for (const auto& sector : sector_entries) {
SectorBlk* sector_blk = static_cast<SectorBlk*>(sector);
if ((tag == sector_blk->getTag()) && sector_blk->isValid() &&
(is_secure == sector_blk->isSecure())){
if (victim_sector == nullptr){
// Choose replacement victim from replacement candidates
victim_sector = static_cast<SectorBlk*>(replacementPolicy->getVictim(
- sector_locations));
+ sector_entries));
}
- // Get the location of the victim block within the sector
+ // Get the entry of the victim block within the sector
SectorSubBlk* victim = victim_sector->blks[extractSectorOffset(addr)];
// Get evicted blocks. Blocks are only evicted if the sectors mismatch and
return victim;
}
-Addr
-SectorTags::extractTag(Addr addr) const
-{
- return addr >> tagShift;
-}
-
-int
-SectorTags::extractSet(Addr addr) const
-{
- return (addr >> setShift) & setMask;
-}
-
int
SectorTags::extractSectorOffset(Addr addr) const
{
SectorTags::regenerateBlkAddr(const CacheBlk* blk) const
{
const SectorSubBlk* blk_cast = static_cast<const SectorSubBlk*>(blk);
- const Addr set = blk_cast->getSectorBlock()->getSet() << setShift;
- return ((blk_cast->getTag() << tagShift) | set |
- ((Addr)blk_cast->getSectorOffset() << sectorShift));
+ const SectorBlk* sec_blk = blk_cast->getSectorBlock();
+ const Addr sec_addr = indexingPolicy->regenerateAddr(blk->tag, sec_blk);
+ return sec_addr | ((Addr)blk_cast->getSectorOffset() << sectorShift);
}
void
SectorTags *
SectorTagsParams::create()
{
+ // There must be a indexing policy
+ fatal_if(!indexing_policy, "An indexing policy is required");
+
return new SectorTags(this);
}
/**
* @file
- * Declaration of a sector set associative tag store.
+ * Declaration of a sector tag store.
*/
#ifndef __MEM_CACHE_TAGS_SECTOR_TAGS_HH__
class SectorTags : public BaseTags
{
protected:
- /** Typedef the set type used in this tag store. */
- typedef std::vector<SectorBlk*> SetType;
-
- /** The associativity of the cache. */
- const unsigned assoc;
/** The allocatable associativity of the cache (alloc mask). */
unsigned allocAssoc;
/** The number of sectors in the cache. */
const unsigned numSectors;
- /** The number of sets in the cache. */
- const unsigned numSets;
/** The cache blocks. */
std::vector<SectorSubBlk> blks;
/** The cache sector blocks. */
std::vector<SectorBlk> secBlks;
- /** The cache sets. */
- std::vector<SetType> sets;
- // Organization of an address: Tag | Set # | Sector Offset # | Offset #
+ // Organization of an address:
+ // Tag | Placement Location | Sector Offset # | Offset #
/** The amount to shift the address to get the sector tag. */
const int sectorShift;
- /** The amount to shift the address to get the set. */
- const int setShift;
- /** The amount to shift the address to get the tag. */
- const int tagShift;
/** Mask out all bits that aren't part of the sector tag. */
const unsigned sectorMask;
- /** Mask out all bits that aren't part of the set index. */
- const unsigned setMask;
-
- /**
- * Find all possible block locations for insertion and replacement of
- * an address. Should be called immediately before ReplacementPolicy's
- * findVictim() not to break cache resizing.
- * Returns sector blocks in all ways belonging to the set of the address.
- *
- * @param addr The addr to a find possible locations for.
- * @return The possible locations.
- */
- std::vector<ReplaceableEntry*> getPossibleLocations(const Addr addr) const
- override;
public:
/** Convenience typedef. */
*/
CacheBlk* findBlock(Addr addr, bool is_secure) const override;
- /**
- * Find a sector block given set and way.
- *
- * @param set The set of the block.
- * @param way The way of the block.
- * @return The block.
- */
- ReplaceableEntry* findBlockBySetAndWay(int set, int way) const override;
-
/**
* Find replacement victim based on address.
*
CacheBlk* findVictim(Addr addr, const bool is_secure,
std::vector<CacheBlk*>& evict_blks) const override;
- /**
- * Generate the sector tag from the given address.
- *
- * @param addr The address to get the sector tag from.
- * @return The sector tag of the address.
- */
- Addr extractTag(Addr addr) const override;
-
- /**
- * Calculate the set index from the address.
- *
- * @param addr The address to get the set from.
- * @return The set index of the address.
- */
- int extractSet(Addr addr) const;
-
/**
* Calculate a block's offset in a sector from the address.
*
int extractSectorOffset(Addr addr) const;
/**
- * Regenerate the block address from the tag and set.
+ * Regenerate the block address from the tag and location.
*
* @param block The block.
* @return the block address.
+++ /dev/null
-/*
- * Copyright (c) 2018 Inria
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Daniel Carvalho
- */
-
-/**
- * @file
- * Definitions of a skewed associative placement policy.
- */
-
-#include "mem/cache/tags/skewed_assoc.hh"
-
-#include <vector>
-
-#include "base/bitfield.hh"
-#include "base/logging.hh"
-
-SkewedAssoc::SkewedAssoc(const Params *p)
- : BaseSetAssoc(p), msbShift(floorLog2(numSets) - 1)
-{
- if (assoc > NUM_SKEWING_FUNCTIONS) {
- warn_once("Associativity higher than number of skewing functions. " \
- "Expect sub-optimal skewing.\n");
- }
-
- // Check if set is too big to do skewing. If using big sets, rewrite
- // skewing functions accordingly to make good use of the hashing function
- panic_if(setShift + 2 * (msbShift + 1) > 64, "Unsuported number of bits " \
- "for the skewing functions.");
-
- // We must have more than two sets, otherwise the MSB and LSB are the same
- // bit, and the xor of them will always be 0
- fatal_if(numSets <= 2, "The number of sets must be greater than 2");
-}
-
-Addr
-SkewedAssoc::hash(const Addr addr) const
-{
- // Get relevant bits
- const uint8_t lsb = bits<Addr>(addr, 0);
- const uint8_t msb = bits<Addr>(addr, msbShift);
- const uint8_t xor_bit = msb ^ lsb;
-
- // Shift-off LSB and set new MSB as xor of old LSB and MSB
- return insertBits<Addr, uint8_t>(addr >> 1, msbShift, xor_bit);
-}
-
-Addr
-SkewedAssoc::dehash(const Addr addr) const
-{
- // Get relevant bits. The original MSB is one bit away on the current MSB
- // (which is the XOR bit). The original LSB can be retrieved from inverting
- // the xor that generated the XOR bit.
- const uint8_t msb = bits<Addr>(addr, msbShift - 1);
- const uint8_t xor_bit = bits<Addr>(addr, msbShift);
- const uint8_t lsb = msb ^ xor_bit;
-
- // Remove current MSB (XOR bit), shift left and add LSB back
- const Addr addr_no_msb = mbits<Addr>(addr, msbShift - 1, 0);
- return insertBits<Addr, uint8_t>(addr_no_msb << 1, 0, lsb);
-}
-
-Addr
-SkewedAssoc::skew(const Addr addr, const unsigned way) const
-{
- // Assume an address of size A bits can be decomposed into
- // {addr3, addr2, addr1, addr0}, where:
- // addr0 (M bits) = Block offset;
- // addr1 (N bits) = Set bits in conventional cache;
- // addr3 (A - M - 2*N bits), addr2 (N bits) = Tag bits.
- // We use addr1 and addr2, as proposed in the original paper
- Addr addr1 = bits<Addr>(addr, msbShift, 0);
- const Addr addr2 = bits<Addr>(addr, 2 * (msbShift + 1) - 1, msbShift + 1);
-
- // Select and apply skewing function for given way
- switch (way % NUM_SKEWING_FUNCTIONS) {
- case 0:
- addr1 = hash(addr1) ^ hash(addr2) ^ addr2;
- break;
- case 1:
- addr1 = hash(addr1) ^ hash(addr2) ^ addr1;
- break;
- case 2:
- addr1 = hash(addr1) ^ dehash(addr2) ^ addr2;
- break;
- case 3:
- addr1 = hash(addr1) ^ dehash(addr2) ^ addr1;
- break;
- case 4:
- addr1 = dehash(addr1) ^ hash(addr2) ^ addr2;
- break;
- case 5:
- addr1 = dehash(addr1) ^ hash(addr2) ^ addr1;
- break;
- case 6:
- addr1 = dehash(addr1) ^ dehash(addr2) ^ addr2;
- break;
- case 7:
- addr1 = dehash(addr1) ^ dehash(addr2) ^ addr1;
- break;
- default:
- panic("A skewing function has not been implemented for this way.");
- }
-
- // If we have more than 8 ways, just pile them up on hashes. This is not
- // the optimal solution, and can be improved by adding more skewing
- // functions to the previous selector
- for (int i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
- addr1 = hash(addr1);
- }
-
- return addr1;
-}
-
-Addr
-SkewedAssoc::deskew(const Addr addr, const unsigned way) const
-{
- // Get relevant bits of the addr
- Addr addr1 = bits<Addr>(addr, msbShift, 0);
- const Addr addr2 = bits<Addr>(addr, 2 * (msbShift + 1) - 1, msbShift + 1);
-
- // If we have more than NUM_SKEWING_FUNCTIONS ways, unpile the hashes
- if (way >= NUM_SKEWING_FUNCTIONS) {
- for (int i = 0; i < way/NUM_SKEWING_FUNCTIONS; i++) {
- addr1 = dehash(addr1);
- }
- }
-
- // Select and apply skewing function for given way
- switch (way % 8) {
- case 0:
- return dehash(addr1 ^ hash(addr2) ^ addr2);
- case 1:
- addr1 = addr1 ^ hash(addr2);
- for (int i = 0; i < msbShift; i++) {
- addr1 = hash(addr1);
- }
- return addr1;
- case 2:
- return dehash(addr1 ^ dehash(addr2) ^ addr2);
- case 3:
- addr1 = addr1 ^ dehash(addr2);
- for (int i = 0; i < msbShift; i++) {
- addr1 = hash(addr1);
- }
- return addr1;
- case 4:
- return hash(addr1 ^ hash(addr2) ^ addr2);
- case 5:
- addr1 = addr1 ^ hash(addr2);
- for (int i = 0; i <= msbShift; i++) {
- addr1 = hash(addr1);
- }
- return addr1;
- case 6:
- return hash(addr1 ^ dehash(addr2) ^ addr2);
- case 7:
- addr1 = addr1 ^ dehash(addr2);
- for (int i = 0; i <= msbShift; i++) {
- addr1 = hash(addr1);
- }
- return addr1;
- default:
- panic("A skewing function has not been implemented for this way.");
- }
-}
-
-unsigned
-SkewedAssoc::extractSet(Addr addr, unsigned way) const
-{
- return skew(addr >> setShift, way) & setMask;
-}
-
-Addr
-SkewedAssoc::regenerateBlkAddr(const CacheBlk* blk) const
-{
- const Addr addr = (blk->tag << (msbShift + 1)) | blk->getSet();
- const Addr set = deskew(addr, blk->getWay()) & setMask;
- return (blk->tag << tagShift) | (set << setShift);
-}
-
-std::vector<ReplaceableEntry*>
-SkewedAssoc::getPossibleLocations(const Addr addr) const
-{
- std::vector<ReplaceableEntry*> locations;
-
- // Parse all ways
- for (int way = 0; way < assoc; ++way) {
- // Apply hash to get set, and get way entry in it
- locations.push_back(sets[extractSet(addr, way)][way]);
- }
-
- return locations;
-}
-
-CacheBlk*
-SkewedAssoc::findBlock(Addr addr, bool is_secure) const
-{
- // Extract block tag
- Addr tag = extractTag(addr);
-
- // Find possible locations for the given address
- std::vector<ReplaceableEntry*> locations = getPossibleLocations(addr);
-
- // Search for block
- for (const auto& location : locations) {
- CacheBlk* blk = static_cast<CacheBlk*>(location);
- if ((blk->tag == tag) && blk->isValid() &&
- (blk->isSecure() == is_secure)) {
- return blk;
- }
- }
-
- // Did not find block
- return nullptr;
-}
-
-SkewedAssoc *
-SkewedAssocParams::create()
-{
- return new SkewedAssoc(this);
-}
+++ /dev/null
-/*
- * Copyright (c) 2018 Inria
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Daniel Carvalho
- */
-
-/**
- * @file
- * Declaration of a skewed associative tag store.
- */
-
-#ifndef __MEM_CACHE_TAGS_SKEWED_ASSOC_HH__
-#define __MEM_CACHE_TAGS_SKEWED_ASSOC_HH__
-
-#include "mem/cache/blk.hh"
-#include "mem/cache/tags/base_set_assoc.hh"
-#include "params/SkewedAssoc.hh"
-
-/**
- * A SkewedAssoc cache tag store.
- * @sa \ref gem5MemorySystem "gem5 Memory System"
- *
- * The SkewedAssoc placement policy divides the cache into s sets of w
- * cache lines (ways). A cache line has a different set mapping for each way,
- * according to a skewing function.
- *
- * If provided with an associativity higher than the number of skewing
- * functions, the skewing functions of the extra ways might be sub-optimal.
- */
-class SkewedAssoc : public BaseSetAssoc
-{
- private:
- /**
- * The number of skewing functions implemented. Should be updated if more
- * functions are added. If more than this number of skewing functions are
- * needed (i.e., assoc > this value), we programatically generate new ones,
- * which may be sub-optimal.
- */
- const int NUM_SKEWING_FUNCTIONS = 8;
-
- /**
- * The amount to shift a set index to get its MSB.
- */
- const int msbShift;
-
- /**
- * The hash function itself. Uses the hash function H, as described in
- * "Skewed-Associative Caches", from Seznec et al. (section 3.3): It
- * applies an XOR to the MSB and LSB, shifts all bits one bit to the right,
- * and set the result of the XOR as the new MSB.
- *
- * This function is not bijective if the address has only 1 bit, as the MSB
- * and LSB will be the same, and therefore the xor will always be 0.
- *
- * @param addr The address to be hashed.
- * @param The hashed address.
- */
- Addr hash(const Addr addr) const;
-
- /**
- * Inverse of the hash function.
- * @sa hash().
- *
- * @param addr The address to be dehashed.
- * @param The dehashed address.
- */
- Addr dehash(const Addr addr) const;
-
- /**
- * Address skewing function selection. It selects and applies one of the
- * skewing functions functions based on the way provided.
- *
- * @param addr Address to be skewed. Should contain the set and tag bits.
- * @param way The cache way, used to select a hash function.
- * @return The skewed address.
- */
- Addr skew(const Addr addr, const unsigned way) const;
-
- /**
- * Address deskewing function (inverse of the skew function) of the given
- * way.
- * @sa skew()
- *
- * @param addr Address to be deskewed. Should contain the set and tag bits.
- * @param way The cache way, used to select a hash function.
- * @return The deskewed address.
- */
- Addr deskew(const Addr addr, const unsigned way) const;
-
- /**
- * Apply a skewing function to calculate address' set given a way.
- *
- * @param addr The address to calculate the set for.
- * @param way The way to get the set from.
- * @return The set index for given combination of address and way.
- */
- unsigned extractSet(Addr addr, unsigned way) const;
-
- protected:
- /**
- * Find all possible block locations for insertion and replacement of
- * an address. Should be called immediately before ReplacementPolicy's
- * findVictim() not to break cache resizing.
- * Returns blocks in all ways belonging to the set of the address.
- *
- * @param addr The addr to a find possible locations for.
- * @return The possible locations.
- */
- std::vector<ReplaceableEntry*> getPossibleLocations(const Addr addr) const
- override;
-
- public:
- /** Convenience typedef. */
- typedef SkewedAssocParams Params;
-
- /**
- * Construct and initialize this tag store.
- */
- SkewedAssoc(const Params *p);
-
- /**
- * Destructor
- */
- ~SkewedAssoc() {};
-
- /**
- * Finds the given address in the cache, do not update replacement data.
- * i.e. This is a no-side-effect find of a block.
- *
- * @param addr The address to find.
- * @param is_secure True if the target memory space is secure.
- * @return Pointer to the cache block if found.
- */
- CacheBlk* findBlock(Addr addr, bool is_secure) const override;
-
- /**
- * Regenerate the block address from the tag, set and way. Uses the
- * inverse of the skewing function.
- *
- * @param block The block.
- * @return the block address.
- */
- Addr regenerateBlkAddr(const CacheBlk* blk) const override;
-};
-
-#endif //__MEM_CACHE_TAGS_SKEWED_ASSOC_HH__