/*
+ * Copyright (c) 2013,2016 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Definitions a fully associative LRU tagstore.
*/
+#include "mem/cache/tags/fa_lru.hh"
+
#include <cassert>
#include <sstream>
#include "base/intmath.hh"
-#include "base/misc.hh"
-#include "mem/cache/tags/fa_lru.hh"
+#include "base/logging.hh"
using namespace std;
-FALRU::FALRU(unsigned _blkSize, unsigned _size, unsigned hit_latency)
- : blkSize(_blkSize), size(_size), hitLatency(hit_latency)
+FALRU::FALRU(const Params *p)
+ : BaseTags(p), cacheBoundaries(nullptr)
{
if (!isPowerOf2(blkSize))
fatal("cache block size (in bytes) `%d' must be a power of two",
blkSize);
- if (!(hitLatency > 0))
- fatal("Access latency in cycles must be at least one cycle");
if (!isPowerOf2(size))
fatal("Cache Size must be power of 2 for now");
numCaches = floorLog2(size) - 17;
if (numCaches >0){
cacheBoundaries = new FALRUBlk *[numCaches];
- cacheMask = (1 << numCaches) - 1;
+ cacheMask = (ULL(1) << numCaches) - 1;
} else {
cacheMask = 0;
}
- warmedUp = false;
warmupBound = size/blkSize;
numBlocks = size/blkSize;
head = &(blks[0]);
tail = &(blks[numBlocks-1]);
- head->prev = NULL;
+ head->prev = nullptr;
head->next = &(blks[1]);
head->inCache = cacheMask;
tail->prev = &(blks[numBlocks-2]);
- tail->next = NULL;
+ tail->next = nullptr;
tail->inCache = 0;
unsigned index = (1 << 17) / blkSize;
blks[i].prev = &(blks[i-1]);
blks[i].next = &(blks[i+1]);
blks[i].isTouched = false;
+ blks[i].set = 0;
+ blks[i].way = i;
}
assert(j == numCaches);
assert(index == numBlocks);
//assert(check());
}
+FALRU::~FALRU()
+{
+ if (numCaches)
+ delete[] cacheBoundaries;
+
+ delete[] blks;
+}
+
void
-FALRU::regStats(const string &name)
+FALRU::regStats()
{
using namespace Stats;
- BaseTags::regStats(name);
+ BaseTags::regStats();
hits
.init(numCaches+1)
- .name(name + ".falru_hits")
+ .name(name() + ".falru_hits")
.desc("The number of hits in each cache size.")
;
misses
.init(numCaches+1)
- .name(name + ".falru_misses")
+ .name(name() + ".falru_misses")
.desc("The number of misses in each cache size.")
;
accesses
- .name(name + ".falru_accesses")
+ .name(name() + ".falru_accesses")
.desc("The number of accesses to the FA LRU cache.")
;
if (iter != tagHash.end()) {
return (*iter).second;
}
- return NULL;
+ return nullptr;
}
void
-FALRU::invalidateBlk(FALRU::BlkType *blk)
+FALRU::invalidate(CacheBlk *blk)
{
- if (blk) {
- blk->status = 0;
- blk->isTouched = false;
- tagsInUse--;
- }
+ assert(blk);
+ tagsInUse--;
}
-FALRUBlk*
-FALRU::accessBlock(Addr addr, int &lat, int context_src, int *inCache)
+CacheBlk*
+FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat)
+{
+ return accessBlock(addr, is_secure, lat, 0);
+}
+
+CacheBlk*
+FALRU::accessBlock(Addr addr, bool is_secure, Cycles &lat, int *inCache)
{
accesses++;
int tmp_in_cache = 0;
FALRUBlk* blk = hashLookup(blkAddr);
if (blk && blk->isValid()) {
+ // If a cache hit
+ lat = accessLatency;
+ // Check if the block to be accessed is available. If not,
+ // apply the accessLatency on top of block->whenReady.
+ if (blk->whenReady > curTick() &&
+ cache->ticksToCycles(blk->whenReady - curTick()) >
+ accessLatency) {
+ lat = cache->ticksToCycles(blk->whenReady - curTick()) +
+ accessLatency;
+ }
assert(blk->tag == blkAddr);
tmp_in_cache = blk->inCache;
for (unsigned i = 0; i < numCaches; i++) {
moveToHead(blk);
}
} else {
- blk = NULL;
+ // If a cache miss
+ lat = lookupLatency;
+ blk = nullptr;
for (unsigned i = 0; i <= numCaches; ++i) {
misses[i]++;
}
*inCache = tmp_in_cache;
}
- lat = hitLatency;
//assert(check());
return blk;
}
-FALRUBlk*
-FALRU::findBlock(Addr addr) const
+CacheBlk*
+FALRU::findBlock(Addr addr, bool is_secure) const
{
Addr blkAddr = blkAlign(addr);
FALRUBlk* blk = hashLookup(blkAddr);
if (blk && blk->isValid()) {
assert(blk->tag == blkAddr);
} else {
- blk = NULL;
+ blk = nullptr;
}
return blk;
}
-FALRUBlk*
-FALRU::findVictim(Addr addr, PacketList &writebacks)
+CacheBlk*
+FALRU::findBlockBySetAndWay(int set, int way) const
+{
+ assert(set == 0);
+ return &blks[way];
+}
+
+CacheBlk*
+FALRU::findVictim(Addr addr)
{
FALRUBlk * blk = tail;
assert(blk->inCache == 0);
blk->isTouched = true;
if (!warmedUp && tagsInUse.value() >= warmupBound) {
warmedUp = true;
- warmupCycle = curTick;
+ warmupCycle = curTick();
}
}
//assert(check());
}
void
-FALRU::insertBlock(Addr addr, FALRU::BlkType *blk, int context_src)
+FALRU::insertBlock(PacketPtr pkt, CacheBlk *blk)
{
}
blk->inCache = cacheMask;
if (blk != head) {
if (blk == tail){
- assert(blk->next == NULL);
+ assert(blk->next == nullptr);
tail = blk->prev;
- tail->next = NULL;
+ tail->next = nullptr;
} else {
blk->prev->next = blk->next;
blk->next->prev = blk->prev;
}
blk->next = head;
- blk->prev = NULL;
+ blk->prev = nullptr;
head->prev = blk;
head = blk;
}
FALRU::check()
{
FALRUBlk* blk = head;
- int size = 0;
+ int tot_size = 0;
int boundary = 1<<17;
int j = 0;
int flags = cacheMask;
while (blk) {
- size += blkSize;
+ tot_size += blkSize;
if (blk->inCache != flags) {
return false;
}
- if (size == boundary && blk != tail) {
+ if (tot_size == boundary && blk != tail) {
if (cacheBoundaries[j] != blk) {
return false;
}
}
return true;
}
+
+FALRU *
+FALRUParams::create()
+{
+ return new FALRU(this);
+}
+