O3_ARM_v7a_DCache, O3_ARM_v7a_ICache, O3_ARM_v7aL2
else:
dcache_class, icache_class, l2_cache_class = \
- L1Cache, L1Cache, L2Cache
+ L1_DCache, L1_ICache, L2Cache
# Set the cache line size of the system
system.cache_line_size = options.cacheline_size
tgts_per_mshr = 20
is_top_level = True
+class L1_ICache(L1Cache):
+ is_read_only = True
+
+class L1_DCache(L1Cache):
+ pass
+
class L2Cache(BaseCache):
assoc = 8
hit_latency = 20
tgts_per_mshr = 12
forward_snoops = False
is_top_level = True
+ # the x86 table walker actually writes to the table-walker cache
+ if buildEnv['TARGET_ISA'] == 'x86':
+ is_read_only = False
+ else:
+ is_read_only = True
assoc = 2
is_top_level = True
forward_snoops = False
+ is_read_only = True
# Data Cache
class O3_ARM_v7a_DCache(BaseCache):
write_buffers = 16
is_top_level = True
forward_snoops = False
+ is_read_only = True
# L2 Cache
class O3_ARM_v7aL2(BaseCache):
forward_snoops = Param.Bool(True,
"Forward snoops from mem side to cpu side")
is_top_level = Param.Bool(False, "Is this cache at the top level (e.g. L1)")
+ is_read_only = Param.Bool(False, "Is this cache read only (e.g. inst)")
prefetcher = Param.BasePrefetcher(NULL,"Prefetcher attached to cache")
prefetch_on_access = Param.Bool(False,
numTarget(p->tgts_per_mshr),
forwardSnoops(p->forward_snoops),
isTopLevel(p->is_top_level),
+ isReadOnly(p->is_read_only),
blocked(0),
order(0),
noTargetMSHR(NULL),
* side */
const bool isTopLevel;
+ /**
+ * Is this cache read only, for example the instruction cache, or
+ * table-walker cache. A cache that is read only should never see
+ * any writes, and should never get any dirty data (and hence
+ * never have to do any writebacks).
+ */
+ const bool isReadOnly;
+
/**
* Bit vector of the blocking reasons for the access path.
* @sa #BlockedCause
MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
{
+ // should only see clean evictions in a read-only cache
+ assert(!isReadOnly || pkt->cmd == MemCmd::CleanEvict);
assert(pkt->isWrite() && !pkt->isRead());
return allocateBufferInternal(&writeBuffer,
blockAlign(pkt->getAddr()), blkSize,
// sanity check
assert(pkt->isRequest());
+ chatty_assert(!(isReadOnly && pkt->isWrite()),
+ "Should never see a write in a read-only cache %s\n",
+ name());
+
DPRINTF(Cache, "%s for %s addr %#llx size %d\n", __func__,
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
+
if (pkt->req->isUncacheable()) {
DPRINTF(Cache, "%s%s addr %#llx uncacheable\n", pkt->cmdString(),
pkt->req->isInstFetch() ? " (ifetch)" : "",
PacketPtr
Cache::writebackBlk(CacheBlk *blk)
{
+ chatty_assert(!isReadOnly, "Writeback from read-only cache");
assert(blk && blk->isValid() && blk->isDirty());
writebacks[Request::wbMasterId]++;
blk->status |= BlkValid | BlkReadable;
if (!pkt->sharedAsserted()) {
+ // we could get non-shared responses from memory (rather than
+ // a cache) even in a read-only cache, note that we set this
+ // bit even for a read-only cache as we use it to represent
+ // the exclusive state
blk->status |= BlkWritable;
+
// If we got this via cache-to-cache transfer (i.e., from a
// cache that was an owner) and took away that owner's copy,
// then we need to write it back. Normally this happens
// there are cases (such as failed store conditionals or
// compare-and-swaps) where we'll demand an exclusive copy but
// end up not writing it.
- if (pkt->memInhibitAsserted())
+ if (pkt->memInhibitAsserted()) {
blk->status |= BlkDirty;
+
+ chatty_assert(!isReadOnly, "Should never see dirty snoop response "
+ "in read-only cache %s\n", name());
+ }
}
DPRINTF(Cache, "Block addr %#llx (%s) moving from state %x to %s\n",
pkt->getAddr(), pkt->getSize(), blk->print());
}
+ chatty_assert(!(isReadOnly && blk->isDirty()),
+ "Should never have a dirty block in a read-only cache %s\n",
+ name());
+
// we may end up modifying both the block state and the packet (if
// we respond in atomic mode), so just figure out what to do now
// and then do it later. If we find dirty data while snooping for a
Arguments:
cpu -- CPU instance to work on.
"""
- cpu.addPrivateSplitL1Caches(L1Cache(size='32kB', assoc=1),
- L1Cache(size='32kB', assoc=4))
+ cpu.addPrivateSplitL1Caches(L1_ICache(size='32kB', assoc=1),
+ L1_DCache(size='32kB', assoc=4))
def create_caches_shared(self, system):
"""Add shared caches to a system.
# The atomic SE configurations do not use caches
if self.mem_mode == "timing":
# @todo We might want to revisit these rather enthusiastic L1 sizes
- cpu.addTwoLevelCacheHierarchy(L1Cache(size='128kB'),
- L1Cache(size='256kB'),
+ cpu.addTwoLevelCacheHierarchy(L1_ICache(size='128kB'),
+ L1_DCache(size='256kB'),
L2Cache(size='2MB'))
def create_caches_shared(self, system):
BaseFSSystem.__init__(self, **kwargs)
def create_caches_private(self, cpu):
- cpu.addTwoLevelCacheHierarchy(L1Cache(size='32kB', assoc=1),
- L1Cache(size='32kB', assoc=4),
+ cpu.addTwoLevelCacheHierarchy(L1_ICache(size='32kB', assoc=1),
+ L1_DCache(size='32kB', assoc=4),
L2Cache(size='4MB', assoc=8))
def create_caches_shared(self, system):
LinuxX86SystemBuilder.__init__(self)
def create_caches_private(self, cpu):
- cpu.addPrivateSplitL1Caches(L1Cache(size='32kB', assoc=1),
- L1Cache(size='32kB', assoc=4),
+ cpu.addPrivateSplitL1Caches(L1_ICache(size='32kB', assoc=1),
+ L1_DCache(size='32kB', assoc=4),
PageTableWalkerCache(),
PageTableWalkerCache())
LinuxX86SystemBuilder.__init__(self)
def create_caches_private(self, cpu):
- cpu.addTwoLevelCacheHierarchy(L1Cache(size='32kB', assoc=1),
- L1Cache(size='32kB', assoc=4),
+ cpu.addTwoLevelCacheHierarchy(L1_ICache(size='32kB', assoc=1),
+ L1_DCache(size='32kB', assoc=4),
L2Cache(size='4MB', assoc=8),
PageTableWalkerCache(),
PageTableWalkerCache())