BaseCache *
BaseCacheParams::create()
{
- unsigned numSets = size / (assoc * system->cacheLineSize());
-
assert(tags);
- if (dynamic_cast<FALRU*>(tags)) {
- if (numSets != 1)
- fatal("Got FALRU tags with more than one set\n");
- return new Cache<FALRU>(this);
- } else if (dynamic_cast<LRU*>(tags)) {
- if (numSets == 1)
- warn("Consider using FALRU tags for a fully associative cache\n");
- return new Cache<LRU>(this);
- } else if (dynamic_cast<RandomRepl*>(tags)) {
- return new Cache<RandomRepl>(this);
- } else {
- fatal("No suitable tags selected\n");
- }
+ return new Cache(this);
}
};
/**
- * Wrap a method and present it as a cache block visitor.
- *
- * For example the forEachBlk method in the tag arrays expects a
- * callable object/function as their parameter. This class wraps a
- * method in an object and presents callable object that adheres to
- * the cache block visitor protocol.
- */
-template <typename T, typename BlkType>
-class CacheBlkVisitorWrapper
-{
- public:
- typedef bool (T::*visitorPtr)(BlkType &blk);
-
- CacheBlkVisitorWrapper(T &_obj, visitorPtr _visitor)
- : obj(_obj), visitor(_visitor) {}
-
- bool operator()(BlkType &blk) {
- return (obj.*visitor)(blk);
- }
-
- private:
- T &obj;
- visitorPtr visitor;
-};
-
-/**
- * Cache block visitor that determines if there are dirty blocks in a
- * cache.
- *
- * Use with the forEachBlk method in the tag array to determine if the
- * array contains dirty blocks.
+ * Base class for cache block visitor, operating on the cache block
+ * base class (later subclassed for the various tag classes). This
+ * visitor class is used as part of the forEachBlk interface in the
+ * tag classes.
*/
-template <typename BlkType>
-class CacheBlkIsDirtyVisitor
+class CacheBlkVisitor
{
public:
- CacheBlkIsDirtyVisitor()
- : _isDirty(false) {}
-
- bool operator()(BlkType &blk) {
- if (blk.isDirty()) {
- _isDirty = true;
- return false;
- } else {
- return true;
- }
- }
- /**
- * Does the array contain a dirty line?
- *
- * \return true if yes, false otherwise.
- */
- bool isDirty() const { return _isDirty; };
+ CacheBlkVisitor() {}
+ virtual ~CacheBlkVisitor() {}
- private:
- bool _isDirty;
+ virtual bool operator()(CacheBlk &blk) = 0;
};
#endif //__CACHE_BLK_HH__
#include "mem/cache/tags/random_repl.hh"
#include "mem/cache/cache_impl.hh"
-// Template Instantiations
-#ifndef DOXYGEN_SHOULD_SKIP_THIS
-
-template class Cache<FALRU>;
-template class Cache<LRU>;
-template class Cache<RandomRepl>;
-
-#endif //DOXYGEN_SHOULD_SKIP_THIS
#include "mem/cache/base.hh"
#include "mem/cache/blk.hh"
#include "mem/cache/mshr.hh"
+#include "mem/cache/tags/base.hh"
#include "sim/eventq.hh"
//Forward decleration
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System"
*/
-template <class TagStore>
class Cache : public BaseCache
{
public:
- /** Define the type of cache block to use. */
- typedef typename TagStore::BlkType BlkType;
- /** A typedef for a list of BlkType pointers. */
- typedef typename TagStore::BlkList BlkList;
+
+ /** A typedef for a list of CacheBlk pointers. */
+ typedef std::list<CacheBlk*> BlkList;
protected:
- typedef CacheBlkVisitorWrapper<Cache<TagStore>, BlkType> WrappedBlkVisitor;
/**
* The CPU-side port extends the base cache slave port with access
private:
// a pointer to our specific cache implementation
- Cache<TagStore> *cache;
+ Cache *cache;
protected:
public:
- CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
+ CpuSidePort(const std::string &_name, Cache *_cache,
const std::string &_label);
};
protected:
- Cache<TagStore> &cache;
+ Cache &cache;
SnoopRespPacketQueue &snoopRespQueue;
public:
- CacheReqPacketQueue(Cache<TagStore> &cache, MasterPort &port,
+ CacheReqPacketQueue(Cache &cache, MasterPort &port,
SnoopRespPacketQueue &snoop_resp_queue,
const std::string &label) :
ReqPacketQueue(cache, port, label), cache(cache),
SnoopRespPacketQueue _snoopRespQueue;
// a pointer to our specific cache implementation
- Cache<TagStore> *cache;
+ Cache *cache;
protected:
public:
- MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
+ MemSidePort(const std::string &_name, Cache *_cache,
const std::string &_label);
};
/** Tag and data Storage */
- TagStore *tags;
+ BaseTags *tags;
/** Prefetcher */
BasePrefetcher *prefetcher;
/** Temporary cache block for occasional transitory use */
- BlkType *tempBlock;
+ CacheBlk *tempBlock;
/**
* This cache should allocate a block on a line-sized write miss.
* @param writebacks List for any writebacks that need to be performed.
* @return Boolean indicating whether the request was satisfied.
*/
- bool access(PacketPtr pkt, BlkType *&blk,
+ bool access(PacketPtr pkt, CacheBlk *&blk,
Cycles &lat, PacketList &writebacks);
/**
*Handle doing the Compare and Swap function for SPARC.
*/
- void cmpAndSwap(BlkType *blk, PacketPtr pkt);
+ void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
/**
* Find a block frame for new block at address addr targeting the
* list. Return free block frame. May return NULL if there are
* no replaceable blocks at the moment.
*/
- BlkType *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
+ CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
/**
* Populates a cache block and handles all outstanding requests for the
* @param writebacks List for any writebacks that need to be performed.
* @return Pointer to the new cache block.
*/
- BlkType *handleFill(PacketPtr pkt, BlkType *blk,
+ CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
PacketList &writebacks);
*/
void functionalAccess(PacketPtr pkt, bool fromCpuSide);
- void satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
+ void satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
bool deferred_response = false,
bool pending_downgrade = false);
- bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, BlkType *blk);
+ bool satisfyMSHR(MSHR *mshr, PacketPtr pkt, CacheBlk *blk);
void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
bool already_copied, bool pending_inval);
* @param blk The cache block being snooped.
* @param new_state The new coherence state for the block.
*/
- void handleSnoop(PacketPtr ptk, BlkType *blk,
+ void handleSnoop(PacketPtr ptk, CacheBlk *blk,
bool is_timing, bool is_deferred, bool pending_inval);
/**
* @param blk The block to writeback.
* @return The writeback request for the block.
*/
- PacketPtr writebackBlk(BlkType *blk);
+ PacketPtr writebackBlk(CacheBlk *blk);
void memWriteback();
*
* \return Always returns true.
*/
- bool writebackVisitor(BlkType &blk);
+ bool writebackVisitor(CacheBlk &blk);
/**
* Cache block visitor that invalidates all blocks in the cache.
*
*
* \return Always returns true.
*/
- bool invalidateVisitor(BlkType &blk);
+ bool invalidateVisitor(CacheBlk &blk);
/**
* Squash all requests associated with specified thread.
* @return A new Packet containing the request, or NULL if the
* current request in cpu_pkt should just be forwarded on.
*/
- PacketPtr getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
+ PacketPtr getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
bool needsExclusive) const;
/**
void unserialize(Checkpoint *cp, const std::string §ion);
};
+/**
+ * Wrap a method and present it as a cache block visitor.
+ *
+ * For example the forEachBlk method in the tag arrays expects a
+ * callable object/function as their parameter. This class wraps a
+ * method in an object and presents callable object that adheres to
+ * the cache block visitor protocol.
+ */
+class CacheBlkVisitorWrapper : public CacheBlkVisitor
+{
+ public:
+ typedef bool (Cache::*VisitorPtr)(CacheBlk &blk);
+
+ CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor)
+ : cache(_cache), visitor(_visitor) {}
+
+ bool operator()(CacheBlk &blk) M5_ATTR_OVERRIDE {
+ return (cache.*visitor)(blk);
+ }
+
+ private:
+ Cache &cache;
+ VisitorPtr visitor;
+};
+
+/**
+ * Cache block visitor that determines if there are dirty blocks in a
+ * cache.
+ *
+ * Use with the forEachBlk method in the tag array to determine if the
+ * array contains dirty blocks.
+ */
+class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
+{
+ public:
+ CacheBlkIsDirtyVisitor()
+ : _isDirty(false) {}
+
+ bool operator()(CacheBlk &blk) M5_ATTR_OVERRIDE {
+ if (blk.isDirty()) {
+ _isDirty = true;
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ /**
+ * Does the array contain a dirty line?
+ *
+ * \return true if yes, false otherwise.
+ */
+ bool isDirty() const { return _isDirty; };
+
+ private:
+ bool _isDirty;
+};
+
#endif // __CACHE_HH__
#include "mem/cache/mshr.hh"
#include "sim/sim_exit.hh"
-template<class TagStore>
-Cache<TagStore>::Cache(const Params *p)
+Cache::Cache(const Params *p)
: BaseCache(p),
- tags(dynamic_cast<TagStore*>(p->tags)),
+ tags(p->tags),
prefetcher(p->prefetcher),
doFastWrites(true),
prefetchOnAccess(p->prefetch_on_access)
{
- tempBlock = new BlkType();
+ tempBlock = new CacheBlk();
tempBlock->data = new uint8_t[blkSize];
cpuSidePort = new CpuSidePort(p->name + ".cpu_side", this,
prefetcher->setCache(this);
}
-template<class TagStore>
-Cache<TagStore>::~Cache()
+Cache::~Cache()
{
delete [] tempBlock->data;
delete tempBlock;
delete memSidePort;
}
-template<class TagStore>
void
-Cache<TagStore>::regStats()
+Cache::regStats()
{
BaseCache::regStats();
}
-template<class TagStore>
void
-Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
+Cache::cmpAndSwap(CacheBlk *blk, PacketPtr pkt)
{
assert(pkt->isRequest());
}
-template<class TagStore>
void
-Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
- bool deferred_response,
- bool pending_downgrade)
+Cache::satisfyCpuSideRequest(PacketPtr pkt, CacheBlk *blk,
+ bool deferred_response, bool pending_downgrade)
{
assert(pkt->isRequest());
/////////////////////////////////////////////////////
-template<class TagStore>
void
-Cache<TagStore>::markInService(MSHR *mshr, bool pending_dirty_resp)
+Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
{
markInServiceInternal(mshr, pending_dirty_resp);
#if 0
}
-template<class TagStore>
void
-Cache<TagStore>::squash(int threadNum)
+Cache::squash(int threadNum)
{
bool unblock = false;
BlockedCause cause = NUM_BLOCKED_CAUSES;
//
/////////////////////////////////////////////////////
-template<class TagStore>
bool
-Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
- Cycles &lat, PacketList &writebacks)
+Cache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
+ PacketList &writebacks)
{
// sanity check
assert(pkt->isRequest());
tags->clearLocks();
// flush and invalidate any existing block
- BlkType *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
+ CacheBlk *old_blk(tags->findBlock(pkt->getAddr(), pkt->isSecure()));
if (old_blk && old_blk->isValid()) {
if (old_blk->isDirty())
writebacks.push_back(writebackBlk(old_blk));
ForwardResponseRecord() {}
};
-template<class TagStore>
void
-Cache<TagStore>::recvTimingSnoopResp(PacketPtr pkt)
+Cache::recvTimingSnoopResp(PacketPtr pkt)
{
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
memSidePort->schedTimingSnoopResp(pkt, snoop_resp_time);
}
-template<class TagStore>
void
-Cache<TagStore>::promoteWholeLineWrites(PacketPtr pkt)
+Cache::promoteWholeLineWrites(PacketPtr pkt)
{
// Cache line clearing instructions
if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
}
}
-template<class TagStore>
bool
-Cache<TagStore>::recvTimingReq(PacketPtr pkt)
+Cache::recvTimingReq(PacketPtr pkt)
{
DPRINTF(CacheTags, "%s tags: %s\n", __func__, tags->print());
//@todo Add back in MemDebug Calls
// We use lookupLatency here because it is used to specify the latency
// to access.
Cycles lat = lookupLatency;
- BlkType *blk = NULL;
+ CacheBlk *blk = NULL;
bool satisfied = false;
{
PacketList writebacks;
// See comment in cache.hh.
-template<class TagStore>
PacketPtr
-Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
- bool needsExclusive) const
+Cache::getBusPacket(PacketPtr cpu_pkt, CacheBlk *blk,
+ bool needsExclusive) const
{
bool blkValid = blk && blk->isValid();
}
-template<class TagStore>
Tick
-Cache<TagStore>::recvAtomic(PacketPtr pkt)
+Cache::recvAtomic(PacketPtr pkt)
{
// We are in atomic mode so we pay just for lookupLatency here.
Cycles lat = lookupLatency;
// have to invalidate ourselves and any lower caches even if
// upper cache will be responding
if (pkt->isInvalidate()) {
- BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
if (blk && blk->isValid()) {
tags->invalidate(blk);
blk->invalidate();
// writebacks... that would mean that someone used an atomic
// access in timing mode
- BlkType *blk = NULL;
+ CacheBlk *blk = NULL;
PacketList writebacks;
bool satisfied = access(pkt, blk, lat, writebacks);
}
-template<class TagStore>
void
-Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
+Cache::functionalAccess(PacketPtr pkt, bool fromCpuSide)
{
if (system->bypassCaches()) {
// Packets from the memory side are snoop request and
Addr blk_addr = blockAlign(pkt->getAddr());
bool is_secure = pkt->isSecure();
- BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure);
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
pkt->pushLabel(name());
/////////////////////////////////////////////////////
-template<class TagStore>
void
-Cache<TagStore>::recvTimingResp(PacketPtr pkt)
+Cache::recvTimingResp(PacketPtr pkt)
{
assert(pkt->isResponse());
// Initial target is used just for stats
MSHR::Target *initial_tgt = mshr->getTarget();
- BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
Tick miss_latency = curTick() - initial_tgt->recvTime;
PacketList writebacks;
delete pkt;
}
-
-
-
-template<class TagStore>
PacketPtr
-Cache<TagStore>::writebackBlk(BlkType *blk)
+Cache::writebackBlk(CacheBlk *blk)
{
assert(blk && blk->isValid() && blk->isDirty());
return writeback;
}
-template<class TagStore>
void
-Cache<TagStore>::memWriteback()
+Cache::memWriteback()
{
- WrappedBlkVisitor visitor(*this, &Cache<TagStore>::writebackVisitor);
+ CacheBlkVisitorWrapper visitor(*this, &Cache::writebackVisitor);
tags->forEachBlk(visitor);
}
-template<class TagStore>
void
-Cache<TagStore>::memInvalidate()
+Cache::memInvalidate()
{
- WrappedBlkVisitor visitor(*this, &Cache<TagStore>::invalidateVisitor);
+ CacheBlkVisitorWrapper visitor(*this, &Cache::invalidateVisitor);
tags->forEachBlk(visitor);
}
-template<class TagStore>
bool
-Cache<TagStore>::isDirty() const
+Cache::isDirty() const
{
- CacheBlkIsDirtyVisitor<BlkType> visitor;
+ CacheBlkIsDirtyVisitor visitor;
tags->forEachBlk(visitor);
return visitor.isDirty();
}
-template<class TagStore>
bool
-Cache<TagStore>::writebackVisitor(BlkType &blk)
+Cache::writebackVisitor(CacheBlk &blk)
{
if (blk.isDirty()) {
assert(blk.isValid());
return true;
}
-template<class TagStore>
bool
-Cache<TagStore>::invalidateVisitor(BlkType &blk)
+Cache::invalidateVisitor(CacheBlk &blk)
{
if (blk.isDirty())
if (blk.isValid()) {
assert(!blk.isDirty());
- tags->invalidate(dynamic_cast< BlkType *>(&blk));
+ tags->invalidate(&blk);
blk.invalidate();
}
return true;
}
-template<class TagStore>
-typename Cache<TagStore>::BlkType*
-Cache<TagStore>::allocateBlock(Addr addr, bool is_secure,
- PacketList &writebacks)
+CacheBlk*
+Cache::allocateBlock(Addr addr, bool is_secure, PacketList &writebacks)
{
- BlkType *blk = tags->findVictim(addr);
+ CacheBlk *blk = tags->findVictim(addr);
if (blk->isValid()) {
Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
// is called by both atomic and timing-mode accesses, and in atomic
// mode we don't mess with the write buffer (we just perform the
// writebacks atomically once the original request is complete).
-template<class TagStore>
-typename Cache<TagStore>::BlkType*
-Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
- PacketList &writebacks)
+CacheBlk*
+Cache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks)
{
assert(pkt->isResponse() || pkt->isWriteInvalidate());
Addr addr = pkt->getAddr();
//
/////////////////////////////////////////////////////
-template<class TagStore>
void
-Cache<TagStore>::
-doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
- bool already_copied, bool pending_inval)
+Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
+ bool already_copied, bool pending_inval)
{
// sanity check
assert(req_pkt->isRequest());
memSidePort->schedTimingSnoopResp(pkt, forward_time, true);
}
-template<class TagStore>
void
-Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
- bool is_timing, bool is_deferred,
- bool pending_inval)
+Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
+ bool is_deferred, bool pending_inval)
{
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
}
-template<class TagStore>
void
-Cache<TagStore>::recvTimingSnoopReq(PacketPtr pkt)
+Cache::recvTimingSnoopReq(PacketPtr pkt)
{
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
}
bool is_secure = pkt->isSecure();
- BlkType *blk = tags->findBlock(pkt->getAddr(), is_secure);
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), is_secure);
Addr blk_addr = blockAlign(pkt->getAddr());
MSHR *mshr = mshrQueue.findMatch(blk_addr, is_secure);
handleSnoop(pkt, blk, true, false, false);
}
-template<class TagStore>
bool
-Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
+Cache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
{
// Express snoop responses from master to slave, e.g., from L1 to L2
cache->recvTimingSnoopResp(pkt);
return true;
}
-template<class TagStore>
Tick
-Cache<TagStore>::recvAtomicSnoop(PacketPtr pkt)
+Cache::recvAtomicSnoop(PacketPtr pkt)
{
// Snoops shouldn't happen when bypassing caches
assert(!system->bypassCaches());
return 0;
}
- BlkType *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
+ CacheBlk *blk = tags->findBlock(pkt->getAddr(), pkt->isSecure());
handleSnoop(pkt, blk, false, false, false);
// We consider forwardLatency here because a snoop occurs in atomic mode
return forwardLatency * clockPeriod();
}
-template<class TagStore>
MSHR *
-Cache<TagStore>::getNextMSHR()
+Cache::getNextMSHR()
{
// Check both MSHR queue and write buffer for potential requests,
// note that null does not mean there is no request, it could
}
-template<class TagStore>
PacketPtr
-Cache<TagStore>::getTimingPacket()
+Cache::getTimingPacket()
{
MSHR *mshr = getNextMSHR();
assert(tags->findBlock(mshr->blkAddr, mshr->isSecure) == NULL);
pkt = tgt_pkt;
} else {
- BlkType *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
+ CacheBlk *blk = tags->findBlock(mshr->blkAddr, mshr->isSecure);
if (tgt_pkt->cmd == MemCmd::HardPFReq) {
// We need to check the caches above us to verify that
}
-template<class TagStore>
Tick
-Cache<TagStore>::nextMSHRReadyTime() const
+Cache::nextMSHRReadyTime() const
{
Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
writeBuffer.nextMSHRReadyTime());
return nextReady;
}
-template<class TagStore>
void
-Cache<TagStore>::serialize(std::ostream &os)
+Cache::serialize(std::ostream &os)
{
bool dirty(isDirty());
SERIALIZE_SCALAR(bad_checkpoint);
}
-template<class TagStore>
void
-Cache<TagStore>::unserialize(Checkpoint *cp, const std::string §ion)
+Cache::unserialize(Checkpoint *cp, const std::string §ion)
{
bool bad_checkpoint;
UNSERIALIZE_SCALAR(bad_checkpoint);
//
///////////////
-template<class TagStore>
AddrRangeList
-Cache<TagStore>::CpuSidePort::getAddrRanges() const
+Cache::CpuSidePort::getAddrRanges() const
{
return cache->getAddrRanges();
}
-template<class TagStore>
bool
-Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
+Cache::CpuSidePort::recvTimingReq(PacketPtr pkt)
{
assert(!cache->system->bypassCaches());
return success;
}
-template<class TagStore>
Tick
-Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
+Cache::CpuSidePort::recvAtomic(PacketPtr pkt)
{
return cache->recvAtomic(pkt);
}
-template<class TagStore>
void
-Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
+Cache::CpuSidePort::recvFunctional(PacketPtr pkt)
{
// functional request
cache->functionalAccess(pkt, true);
}
-template<class TagStore>
-Cache<TagStore>::
-CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
+Cache::
+CpuSidePort::CpuSidePort(const std::string &_name, Cache *_cache,
const std::string &_label)
: BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
{
//
///////////////
-template<class TagStore>
bool
-Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
+Cache::MemSidePort::recvTimingResp(PacketPtr pkt)
{
cache->recvTimingResp(pkt);
return true;
}
// Express snooping requests to memside port
-template<class TagStore>
void
-Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
+Cache::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
{
// handle snooping requests
cache->recvTimingSnoopReq(pkt);
}
-template<class TagStore>
Tick
-Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
+Cache::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
{
return cache->recvAtomicSnoop(pkt);
}
-template<class TagStore>
void
-Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
+Cache::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
{
// functional snoop (note that in contrast to atomic we don't have
// a specific functionalSnoop method, as they have the same
cache->functionalAccess(pkt, false);
}
-template<class TagStore>
void
-Cache<TagStore>::CacheReqPacketQueue::sendDeferredPacket()
+Cache::CacheReqPacketQueue::sendDeferredPacket()
{
// sanity check
assert(!waitingOnRetry);
}
}
-template<class TagStore>
-Cache<TagStore>::
-MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
+Cache::
+MemSidePort::MemSidePort(const std::string &_name, Cache *_cache,
const std::string &_label)
: BaseCache::CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
_reqQueue(*_cache, *this, _snoopRespQueue, _label),
#include "base/callback.hh"
#include "base/statistics.hh"
+#include "mem/cache/blk.hh"
#include "params/BaseTags.hh"
#include "sim/clocked_object.hh"
* Print all tags used
*/
virtual std::string print() const = 0;
+
+ /**
+ * Find a block using the memory address
+ */
+ virtual CacheBlk * findBlock(Addr addr, bool is_secure) const = 0;
+
+ /**
+ * Calculate the block offset of an address.
+ * @param addr the address to get the offset of.
+ * @return the block offset.
+ */
+ int extractBlkOffset(Addr addr) const
+ {
+ return (addr & (Addr)(blkSize-1));
+ }
+
+ virtual void invalidate(CacheBlk *blk) = 0;
+
+ virtual CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ int context_src) = 0;
+
+ virtual Addr extractTag(Addr addr) const = 0;
+
+ virtual void insertBlock(PacketPtr pkt, CacheBlk *blk) = 0;
+
+ virtual Addr regenerateBlkAddr(Addr tag, unsigned set) const = 0;
+
+ virtual CacheBlk* findVictim(Addr addr) = 0;
+
+ virtual int extractSet(Addr addr) const = 0;
+
+ virtual void forEachBlk(CacheBlkVisitor &visitor) = 0;
};
class BaseTagsCallback : public Callback
delete [] sets;
}
-BaseSetAssoc::BlkType*
+CacheBlk*
BaseSetAssoc::findBlock(Addr addr, bool is_secure) const
{
Addr tag = extractTag(addr);
* Invalidate the given block.
* @param blk The block to invalidate.
*/
- void invalidate(BlkType *blk)
+ void invalidate(CacheBlk *blk)
{
assert(blk);
assert(blk->isValid());
* @param lat The access latency.
* @return Pointer to the cache block if found.
*/
- BlkType* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src)
{
Addr tag = extractTag(addr);
* @param asid The address space ID.
* @return Pointer to the cache block if found.
*/
- BlkType* findBlock(Addr addr, bool is_secure) const;
+ CacheBlk* findBlock(Addr addr, bool is_secure) const;
/**
* Find an invalid block to evict for the address provided.
* @param addr The addr to a find a replacement candidate for.
* @return The candidate block.
*/
- BlkType* findVictim(Addr addr) const
+ CacheBlk* findVictim(Addr addr)
{
BlkType *blk = NULL;
int set = extractSet(addr);
* @param pkt Packet holding the address to update
* @param blk The block to update.
*/
- void insertBlock(PacketPtr pkt, BlkType *blk)
+ void insertBlock(PacketPtr pkt, CacheBlk *blk)
{
Addr addr = pkt->getAddr();
MasterID master_id = pkt->req->masterId();
return ((addr >> setShift) & setMask);
}
- /**
- * Get the block offset from an address.
- * @param addr The address to get the offset of.
- * @return The block offset.
- */
- int extractBlkOffset(Addr addr) const
- {
- return (addr & blkMask);
- }
-
/**
* Align an address to the block size.
* @param addr the address to align.
*
* \param visitor Visitor to call on each block.
*/
- template <typename V>
- void forEachBlk(V &visitor) {
+ void forEachBlk(CacheBlkVisitor &visitor) M5_ATTR_OVERRIDE {
for (unsigned i = 0; i < numSets * assoc; ++i) {
if (!visitor(blks[i]))
return;
}
void
-FALRU::invalidate(FALRU::BlkType *blk)
+FALRU::invalidate(CacheBlk *blk)
{
assert(blk);
tagsInUse--;
}
-FALRUBlk*
+CacheBlk*
+FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src)
+{
+ return accessBlock(addr, is_secure, lat, context_src, 0);
+}
+
+CacheBlk*
FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int context_src,
int *inCache)
{
}
-FALRUBlk*
+CacheBlk*
FALRU::findBlock(Addr addr, bool is_secure) const
{
Addr blkAddr = blkAlign(addr);
return blk;
}
-FALRUBlk*
+CacheBlk*
FALRU::findVictim(Addr addr)
{
FALRUBlk * blk = tail;
}
void
-FALRU::insertBlock(PacketPtr pkt, FALRU::BlkType *blk)
+FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
{
}
* Invalidate a cache block.
* @param blk The block to invalidate.
*/
- void invalidate(BlkType *blk);
+ void invalidate(CacheBlk *blk);
/**
* Access block and update replacement data. May not succeed, in which case
* @param inCache The FALRUBlk::inCache flags.
* @return Pointer to the cache block.
*/
- FALRUBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
- int context_src, int *inCache = 0);
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ int context_src, int *inCache);
+
+ /**
+ * Just a wrapper of above function to conform with the base interface.
+ */
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ int context_src);
/**
* Find the block in the cache, do not update the replacement data.
* @param asid The address space ID.
* @return Pointer to the cache block.
*/
- FALRUBlk* findBlock(Addr addr, bool is_secure) const;
+ CacheBlk* findBlock(Addr addr, bool is_secure) const;
/**
* Find a replacement block for the address provided.
* @param pkt The request to a find a replacement candidate for.
* @return The block to place the replacement in.
*/
- FALRUBlk* findVictim(Addr addr);
+ CacheBlk* findVictim(Addr addr);
- void insertBlock(PacketPtr pkt, BlkType *blk);
+ void insertBlock(PacketPtr pkt, CacheBlk *blk);
/**
* Return the block size of this cache.
return 0;
}
- /**
- * Calculate the block offset of an address.
- * @param addr the address to get the offset of.
- * @return the block offset.
- */
- int extractBlkOffset(Addr addr) const
- {
- return (addr & (Addr)(blkSize-1));
- }
-
/**
* Regenerate the block address from the tag and the set.
* @param tag The tag of the block.
* @param set The set the block belongs to.
* @return the block address.
*/
- Addr regenerateBlkAddr(Addr tag, int set) const
+ Addr regenerateBlkAddr(Addr tag, unsigned set) const
{
return (tag);
}
*
* \param visitor Visitor to call on each block.
*/
- template <typename V>
- void forEachBlk(V &visitor) {
+ void forEachBlk(CacheBlkVisitor &visitor) M5_ATTR_OVERRIDE {
for (int i = 0; i < numBlocks; i++) {
if (!visitor(blks[i]))
return;
{
}
-BaseSetAssoc::BlkType*
+CacheBlk*
LRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id)
{
- BlkType *blk = BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id);
+ CacheBlk *blk = BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id);
if (blk != NULL) {
// move this block to head of the MRU list
return blk;
}
-BaseSetAssoc::BlkType*
-LRU::findVictim(Addr addr) const
+CacheBlk*
+LRU::findVictim(Addr addr)
{
int set = extractSet(addr);
// grab a replacement candidate
}
void
-LRU::invalidate(BlkType *blk)
+LRU::invalidate(CacheBlk *blk)
{
BaseSetAssoc::invalidate(blk);
*/
~LRU() {}
- BlkType* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src);
- BlkType* findVictim(Addr addr) const;
+ CacheBlk* findVictim(Addr addr);
void insertBlock(PacketPtr pkt, BlkType *blk);
- void invalidate(BlkType *blk);
+ void invalidate(CacheBlk *blk);
};
#endif // __MEM_CACHE_TAGS_LRU_HH__
{
}
-BaseSetAssoc::BlkType*
+CacheBlk*
RandomRepl::accessBlock(Addr addr, bool is_secure, Cycles &lat, int master_id)
{
return BaseSetAssoc::accessBlock(addr, is_secure, lat, master_id);
}
-BaseSetAssoc::BlkType*
-RandomRepl::findVictim(Addr addr) const
+CacheBlk*
+RandomRepl::findVictim(Addr addr)
{
- BlkType *blk = BaseSetAssoc::findVictim(addr);
+ CacheBlk *blk = BaseSetAssoc::findVictim(addr);
// if all blocks are valid, pick a replacement at random
if (blk->isValid()) {
}
void
-RandomRepl::invalidate(BlkType *blk)
+RandomRepl::invalidate(CacheBlk *blk)
{
BaseSetAssoc::invalidate(blk);
}
*/
~RandomRepl() {}
- BlkType* accessBlock(Addr addr, bool is_secure, Cycles &lat,
+ CacheBlk* accessBlock(Addr addr, bool is_secure, Cycles &lat,
int context_src);
- BlkType* findVictim(Addr addr) const;
+ CacheBlk* findVictim(Addr addr);
void insertBlock(PacketPtr pkt, BlkType *blk);
- void invalidate(BlkType *blk);
+ void invalidate(CacheBlk *blk);
};
#endif // __MEM_CACHE_TAGS_RANDOM_REPL_HH__