From: Jason Lowe-Power Date: Fri, 6 Oct 2017 21:54:02 +0000 (-0700) Subject: learning_gem5: Adding code for SimpleCache X-Git-Tag: v19.0.0.0~2480 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=43a1ea88b89aa3611d3b73cc0d4dc5de3f1f5e6b;p=gem5.git learning_gem5: Adding code for SimpleCache This is the rest of the code for part 2. See http://learning.gem5.org/book/part2/simplecache.html Change-Id: I5db099266a1196914656be3858fdd5fb4f8eab48 Signed-off-by: Jason Lowe-Power Reviewed-on: https://gem5-review.googlesource.com/5023 Reviewed-by: Nikos Nikoleris --- diff --git a/configs/learning_gem5/part2/simple_cache.py b/configs/learning_gem5/part2/simple_cache.py new file mode 100644 index 000000000..a07147caf --- /dev/null +++ b/configs/learning_gem5/part2/simple_cache.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Jason Lowe-Power +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Jason Lowe-Power + +""" This file creates a barebones system and executes 'hello', a simple Hello +World application. Adds a simple cache between the CPU and the membus. + +This config file assumes that the x86 ISA was built. +""" + +# import the m5 (gem5) library created when gem5 is built +import m5 +# import all of the SimObjects +from m5.objects import * + +# create the system we are going to simulate +system = System() + +# Set the clock fequency of the system (and all of its children) +system.clk_domain = SrcClockDomain() +system.clk_domain.clock = '1GHz' +system.clk_domain.voltage_domain = VoltageDomain() + +# Set up the system +system.mem_mode = 'timing' # Use timing accesses +system.mem_ranges = [AddrRange('512MB')] # Create an address range + +# Create a simple CPU +system.cpu = TimingSimpleCPU() + +# Create a memory bus, a coherent crossbar, in this case +system.membus = SystemXBar() + +# Create a simple cache +system.cache = SimpleCache(size='1kB') + +# Connect the I and D cache ports of the CPU to the memobj. +# Since cpu_side is a vector port, each time one of these is connected, it will +# create a new instance of the CPUSidePort class +system.cpu.icache_port = system.cache.cpu_side +system.cpu.dcache_port = system.cache.cpu_side + +# Hook the cache up to the memory bus +system.cache.mem_side = system.membus.slave + +# create the interrupt controller for the CPU and connect to the membus +system.cpu.createInterruptController() +system.cpu.interrupts[0].pio = system.membus.master +system.cpu.interrupts[0].int_master = system.membus.slave +system.cpu.interrupts[0].int_slave = system.membus.master + +# Create a DDR3 memory controller and connect it to the membus +system.mem_ctrl = DDR3_1600_8x8() +system.mem_ctrl.range = system.mem_ranges[0] +system.mem_ctrl.port = system.membus.master + +# Connect the system up to the membus +system.system_port = system.membus.slave + +# Create a process for a simple "Hello World" application +process = Process() +# Set the command +# cmd is a list which begins with the executable (like argv) +process.cmd = ['tests/test-progs/hello/bin/x86/linux/hello'] +# Set the cpu to use the process as its workload and create thread contexts +system.cpu.workload = process +system.cpu.createThreads() + +# set up the root SimObject and start the simulation +root = Root(full_system = False, system = system) +# instantiate all of the objects we've created above +m5.instantiate() + +print "Beginning simulation!" +exit_event = m5.simulate() +print 'Exiting @ tick %i because %s' % (m5.curTick(), exit_event.getCause()) diff --git a/src/learning_gem5/part2/SConscript b/src/learning_gem5/part2/SConscript index f4605f39d..1a01d6de4 100644 --- a/src/learning_gem5/part2/SConscript +++ b/src/learning_gem5/part2/SConscript @@ -32,11 +32,14 @@ Import('*') SimObject('SimpleObject.py') SimObject('HelloObject.py') SimObject('SimpleMemobj.py') +SimObject('SimpleCache.py') Source('simple_object.cc') Source('hello_object.cc') Source('goodbye_object.cc') Source('simple_memobj.cc') +Source('simple_cache.cc') DebugFlag('HelloExample', "For Learning gem5 Part 2. Simple example debug flag") DebugFlag('SimpleMemobj', "For Learning gem5 Part 2.") +DebugFlag('SimpleCache', "For Learning gem5 Part 2.") diff --git a/src/learning_gem5/part2/SimpleCache.py b/src/learning_gem5/part2/SimpleCache.py new file mode 100644 index 000000000..c0cdef9b8 --- /dev/null +++ b/src/learning_gem5/part2/SimpleCache.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2017 Jason Lowe-Power +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer; +# redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution; +# neither the name of the copyright holders nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# Authors: Jason Lowe-Power + +from m5.params import * +from m5.proxy import * +from MemObject import MemObject + +class SimpleCache(MemObject): + type = 'SimpleCache' + cxx_header = "learning_gem5/part2/simple_cache.hh" + + # Vector port example. Both the instruction and data ports connect to this + # port which is automatically split out into two ports. + cpu_side = VectorSlavePort("CPU side port, receives requests") + mem_side = MasterPort("Memory side port, sends requests") + + latency = Param.Cycles(1, "Cycles taken on a hit or to resolve a miss") + + size = Param.MemorySize('16kB', "The size of the cache") + + system = Param.System(Parent.any, "The system this cache is part of") diff --git a/src/learning_gem5/part2/simple_cache.cc b/src/learning_gem5/part2/simple_cache.cc new file mode 100644 index 000000000..4db2cfa52 --- /dev/null +++ b/src/learning_gem5/part2/simple_cache.cc @@ -0,0 +1,466 @@ +/* + * Copyright (c) 2017 Jason Lowe-Power + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Jason Lowe-Power + */ + +#include "learning_gem5/part2/simple_cache.hh" + +#include "base/random.hh" +#include "debug/SimpleCache.hh" +#include "sim/system.hh" + +SimpleCache::SimpleCache(SimpleCacheParams *params) : + MemObject(params), + latency(params->latency), + blockSize(params->system->cacheLineSize()), + capacity(params->size / blockSize), + memPort(params->name + ".mem_side", this), + blocked(false), originalPacket(nullptr), waitingPortId(-1) +{ + // Since the CPU side ports are a vector of ports, create an instance of + // the CPUSidePort for each connection. This member of params is + // automatically created depending on the name of the vector port and + // holds the number of connections to this port name + for (int i = 0; i < params->port_cpu_side_connection_count; ++i) { + cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i), i, this); + } +} + +BaseMasterPort& +SimpleCache::getMasterPort(const std::string& if_name, PortID idx) +{ + panic_if(idx != InvalidPortID, "This object doesn't support vector ports"); + + // This is the name from the Python SimObject declaration in SimpleCache.py + if (if_name == "mem_side") { + return memPort; + } else { + // pass it along to our super class + return MemObject::getMasterPort(if_name, idx); + } +} + +BaseSlavePort& +SimpleCache::getSlavePort(const std::string& if_name, PortID idx) +{ + // This is the name from the Python SimObject declaration (SimpleMemobj.py) + if (if_name == "cpu_side" && idx < cpuPorts.size()) { + // We should have already created all of the ports in the constructor + return cpuPorts[idx]; + } else { + // pass it along to our super class + return MemObject::getSlavePort(if_name, idx); + } +} + +void +SimpleCache::CPUSidePort::sendPacket(PacketPtr pkt) +{ + // Note: This flow control is very simple since the cache is blocking. + + panic_if(blockedPacket != nullptr, "Should never try to send if blocked!"); + + // If we can't send the packet across the port, store it for later. + DPRINTF(SimpleCache, "Sending %s to CPU\n", pkt->print()); + if (!sendTimingResp(pkt)) { + DPRINTF(SimpleCache, "failed!\n"); + blockedPacket = pkt; + } +} + +AddrRangeList +SimpleCache::CPUSidePort::getAddrRanges() const +{ + return owner->getAddrRanges(); +} + +void +SimpleCache::CPUSidePort::trySendRetry() +{ + if (needRetry && blockedPacket == nullptr) { + // Only send a retry if the port is now completely free + needRetry = false; + DPRINTF(SimpleCache, "Sending retry req.\n"); + sendRetryReq(); + } +} + +void +SimpleCache::CPUSidePort::recvFunctional(PacketPtr pkt) +{ + // Just forward to the cache. + return owner->handleFunctional(pkt); +} + +bool +SimpleCache::CPUSidePort::recvTimingReq(PacketPtr pkt) +{ + DPRINTF(SimpleCache, "Got request %s\n", pkt->print()); + + if (blockedPacket || needRetry) { + // The cache may not be able to send a reply if this is blocked + DPRINTF(SimpleCache, "Request blocked\n"); + needRetry = true; + return false; + } + // Just forward to the cache. + if (!owner->handleRequest(pkt, id)) { + DPRINTF(SimpleCache, "Request failed\n"); + // stalling + needRetry = true; + return false; + } else { + DPRINTF(SimpleCache, "Request succeeded\n"); + return true; + } +} + +void +SimpleCache::CPUSidePort::recvRespRetry() +{ + // We should have a blocked packet if this function is called. + assert(blockedPacket != nullptr); + + // Grab the blocked packet. + PacketPtr pkt = blockedPacket; + blockedPacket = nullptr; + + DPRINTF(SimpleCache, "Retrying response pkt %s\n", pkt->print()); + // Try to resend it. It's possible that it fails again. + sendPacket(pkt); + + // We may now be able to accept new packets + trySendRetry(); +} + +void +SimpleCache::MemSidePort::sendPacket(PacketPtr pkt) +{ + // Note: This flow control is very simple since the cache is blocking. + + panic_if(blockedPacket != nullptr, "Should never try to send if blocked!"); + + // If we can't send the packet across the port, store it for later. + if (!sendTimingReq(pkt)) { + blockedPacket = pkt; + } +} + +bool +SimpleCache::MemSidePort::recvTimingResp(PacketPtr pkt) +{ + // Just forward to the cache. + return owner->handleResponse(pkt); +} + +void +SimpleCache::MemSidePort::recvReqRetry() +{ + // We should have a blocked packet if this function is called. + assert(blockedPacket != nullptr); + + // Grab the blocked packet. + PacketPtr pkt = blockedPacket; + blockedPacket = nullptr; + + // Try to resend it. It's possible that it fails again. + sendPacket(pkt); +} + +void +SimpleCache::MemSidePort::recvRangeChange() +{ + owner->sendRangeChange(); +} + +bool +SimpleCache::handleRequest(PacketPtr pkt, int port_id) +{ + if (blocked) { + // There is currently an outstanding request so we can't respond. Stall + return false; + } + + DPRINTF(SimpleCache, "Got request for addr %#x\n", pkt->getAddr()); + + // This cache is now blocked waiting for the response to this packet. + blocked = true; + + // Store the port for when we get the response + assert(waitingPortId == -1); + waitingPortId = port_id; + + // Schedule an event after cache access latency to actually access + schedule(new EventFunctionWrapper([this, pkt]{ accessTiming(pkt); }, + name() + ".accessEvent", true), + clockEdge(latency)); + + return true; +} + +bool +SimpleCache::handleResponse(PacketPtr pkt) +{ + assert(blocked); + DPRINTF(SimpleCache, "Got response for addr %#x\n", pkt->getAddr()); + + // For now assume that inserts are off of the critical path and don't count + // for any added latency. + insert(pkt); + + missLatency.sample(curTick() - missTime); + + // If we had to upgrade the request packet to a full cache line, now we + // can use that packet to construct the response. + if (originalPacket != nullptr) { + DPRINTF(SimpleCache, "Copying data from new packet to old\n"); + // We had to upgrade a previous packet. We can functionally deal with + // the cache access now. It better be a hit. + bool hit M5_VAR_USED = accessFunctional(originalPacket); + panic_if(!hit, "Should always hit after inserting"); + originalPacket->makeResponse(); + delete pkt; // We may need to delay this, I'm not sure. + pkt = originalPacket; + originalPacket = nullptr; + } // else, pkt contains the data it needs + + sendResponse(pkt); + + return true; +} + +void SimpleCache::sendResponse(PacketPtr pkt) +{ + assert(blocked); + DPRINTF(SimpleCache, "Sending resp for addr %#x\n", pkt->getAddr()); + + int port = waitingPortId; + + // The packet is now done. We're about to put it in the port, no need for + // this object to continue to stall. + // We need to free the resource before sending the packet in case the CPU + // tries to send another request immediately (e.g., in the same callchain). + blocked = false; + waitingPortId = -1; + + // Simply forward to the memory port + cpuPorts[port].sendPacket(pkt); + + // For each of the cpu ports, if it needs to send a retry, it should do it + // now since this memory object may be unblocked now. + for (auto& port : cpuPorts) { + port.trySendRetry(); + } +} + +void +SimpleCache::handleFunctional(PacketPtr pkt) +{ + if (accessFunctional(pkt)) { + pkt->makeResponse(); + } else { + memPort.sendFunctional(pkt); + } +} + +void +SimpleCache::accessTiming(PacketPtr pkt) +{ + bool hit = accessFunctional(pkt); + + DPRINTF(SimpleCache, "%s for packet: %s\n", hit ? "Hit" : "Miss", + pkt->print()); + + if (hit) { + // Respond to the CPU side + hits++; // update stats + DDUMP(SimpleCache, pkt->getConstPtr(), pkt->getSize()); + pkt->makeResponse(); + sendResponse(pkt); + } else { + misses++; // update stats + missTime = curTick(); + // Forward to the memory side. + // We can't directly forward the packet unless it is exactly the size + // of the cache line, and aligned. Check for that here. + Addr addr = pkt->getAddr(); + Addr block_addr = pkt->getBlockAddr(blockSize); + unsigned size = pkt->getSize(); + if (addr == block_addr && size == blockSize) { + // Aligned and block size. We can just forward. + DPRINTF(SimpleCache, "forwarding packet\n"); + memPort.sendPacket(pkt); + } else { + DPRINTF(SimpleCache, "Upgrading packet to block size\n"); + panic_if(addr - block_addr + size > blockSize, + "Cannot handle accesses that span multiple cache lines"); + // Unaligned access to one cache block + assert(pkt->needsResponse()); + MemCmd cmd; + if (pkt->isWrite() || pkt->isRead()) { + // Read the data from memory to write into the block. + // We'll write the data in the cache (i.e., a writeback cache) + cmd = MemCmd::ReadReq; + } else { + panic("Unknown packet type in upgrade size"); + } + + // Create a new packet that is blockSize + PacketPtr new_pkt = new Packet(pkt->req, cmd, blockSize); + new_pkt->allocate(); + + // Should now be block aligned + assert(new_pkt->getAddr() == new_pkt->getBlockAddr(blockSize)); + + // Save the old packet + originalPacket = pkt; + + DPRINTF(SimpleCache, "forwarding packet\n"); + memPort.sendPacket(new_pkt); + } + } +} + +bool +SimpleCache::accessFunctional(PacketPtr pkt) +{ + Addr block_addr = pkt->getBlockAddr(blockSize); + auto it = cacheStore.find(block_addr); + if (it != cacheStore.end()) { + if (pkt->isWrite()) { + // Write the data into the block in the cache + pkt->writeDataToBlock(it->second, blockSize); + } else if (pkt->isRead()) { + // Read the data out of the cache block into the packet + pkt->setDataFromBlock(it->second, blockSize); + } else { + panic("Unknown packet type!"); + } + return true; + } + return false; +} + +void +SimpleCache::insert(PacketPtr pkt) +{ + // The packet should be aligned. + assert(pkt->getAddr() == pkt->getBlockAddr(blockSize)); + // The address should not be in the cache + assert(cacheStore.find(pkt->getAddr()) == cacheStore.end()); + // The pkt should be a response + assert(pkt->isResponse()); + + if (cacheStore.size() >= capacity) { + // Select random thing to evict. This is a little convoluted since we + // are using a std::unordered_map. See http://bit.ly/2hrnLP2 + int bucket, bucket_size; + do { + bucket = random_mt.random(0, (int)cacheStore.bucket_count() - 1); + } while ( (bucket_size = cacheStore.bucket_size(bucket)) == 0 ); + auto block = std::next(cacheStore.begin(bucket), + random_mt.random(0, bucket_size - 1)); + + DPRINTF(SimpleCache, "Removing addr %#x\n", block->first); + + // Write back the data. + // Create a new request-packet pair + RequestPtr req = new Request(block->first, blockSize, 0, 0); + PacketPtr new_pkt = new Packet(req, MemCmd::WritebackDirty, blockSize); + new_pkt->dataDynamic(block->second); // This will be deleted later + + DPRINTF(SimpleCache, "Writing packet back %s\n", pkt->print()); + // Send the write to memory + memPort.sendPacket(new_pkt); + + // Delete this entry + cacheStore.erase(block->first); + } + + DPRINTF(SimpleCache, "Inserting %s\n", pkt->print()); + DDUMP(SimpleCache, pkt->getConstPtr(), blockSize); + + // Allocate space for the cache block data + uint8_t *data = new uint8_t[blockSize]; + + // Insert the data and address into the cache store + cacheStore[pkt->getAddr()] = data; + + // Write the data into the cache + pkt->writeDataToBlock(data, blockSize); +} + +AddrRangeList +SimpleCache::getAddrRanges() const +{ + DPRINTF(SimpleCache, "Sending new ranges\n"); + // Just use the same ranges as whatever is on the memory side. + return memPort.getAddrRanges(); +} + +void +SimpleCache::sendRangeChange() const +{ + for (auto& port : cpuPorts) { + port.sendRangeChange(); + } +} + +void +SimpleCache::regStats() +{ + // If you don't do this you get errors about uninitialized stats. + MemObject::regStats(); + + hits.name(name() + ".hits") + .desc("Number of hits") + ; + + misses.name(name() + ".misses") + .desc("Number of misses") + ; + + missLatency.name(name() + ".missLatency") + .desc("Ticks for misses to the cache") + .init(16) // number of buckets + ; + + hitRatio.name(name() + ".hitRatio") + .desc("The ratio of hits to the total accesses to the cache") + ; + + hitRatio = hits / (hits + misses); + +} + + +SimpleCache* +SimpleCacheParams::create() +{ + return new SimpleCache(this); +} diff --git a/src/learning_gem5/part2/simple_cache.hh b/src/learning_gem5/part2/simple_cache.hh new file mode 100644 index 000000000..7d53ffed0 --- /dev/null +++ b/src/learning_gem5/part2/simple_cache.hh @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2017 Jason Lowe-Power + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Authors: Jason Lowe-Power + */ + +#ifndef __LEARNING_GEM5_SIMPLE_CACHE_SIMPLE_CACHE_HH__ +#define __LEARNING_GEM5_SIMPLE_CACHE_SIMPLE_CACHE_HH__ + +#include + +#include "mem/mem_object.hh" +#include "params/SimpleCache.hh" + +/** + * A very simple cache object. Has a fully-associative data store with random + * replacement. + * This cache is fully blocking (not non-blocking). Only a single request can + * be outstanding at a time. + * This cache is a writeback cache. + */ +class SimpleCache : public MemObject +{ + private: + + /** + * Port on the CPU-side that receives requests. + * Mostly just forwards requests to the cache (owner) + */ + class CPUSidePort : public SlavePort + { + private: + /// Since this is a vector port, need to know what number this one is + int id; + + /// The object that owns this object (SimpleCache) + SimpleCache *owner; + + /// True if the port needs to send a retry req. + bool needRetry; + + /// If we tried to send a packet and it was blocked, store it here + PacketPtr blockedPacket; + + public: + /** + * Constructor. Just calls the superclass constructor. + */ + CPUSidePort(const std::string& name, int id, SimpleCache *owner) : + SlavePort(name, owner), id(id), owner(owner), needRetry(false), + blockedPacket(nullptr) + { } + + /** + * Send a packet across this port. This is called by the owner and + * all of the flow control is hanled in this function. + * This is a convenience function for the SimpleCache to send pkts. + * + * @param packet to send. + */ + void sendPacket(PacketPtr pkt); + + /** + * Get a list of the non-overlapping address ranges the owner is + * responsible for. All slave ports must override this function + * and return a populated list with at least one item. + * + * @return a list of ranges responded to + */ + AddrRangeList getAddrRanges() const override; + + /** + * Send a retry to the peer port only if it is needed. This is called + * from the SimpleCache whenever it is unblocked. + */ + void trySendRetry(); + + protected: + /** + * Receive an atomic request packet from the master port. + * No need to implement in this simple cache. + */ + Tick recvAtomic(PacketPtr pkt) override + { panic("recvAtomic unimpl."); } + + /** + * Receive a functional request packet from the master port. + * Performs a "debug" access updating/reading the data in place. + * + * @param packet the requestor sent. + */ + void recvFunctional(PacketPtr pkt) override; + + /** + * Receive a timing request from the master port. + * + * @param the packet that the requestor sent + * @return whether this object can consume to packet. If false, we + * will call sendRetry() when we can try to receive this + * request again. + */ + bool recvTimingReq(PacketPtr pkt) override; + + /** + * Called by the master port if sendTimingResp was called on this + * slave port (causing recvTimingResp to be called on the master + * port) and was unsuccesful. + */ + void recvRespRetry() override; + }; + + /** + * Port on the memory-side that receives responses. + * Mostly just forwards requests to the cache (owner) + */ + class MemSidePort : public MasterPort + { + private: + /// The object that owns this object (SimpleCache) + SimpleCache *owner; + + /// If we tried to send a packet and it was blocked, store it here + PacketPtr blockedPacket; + + public: + /** + * Constructor. Just calls the superclass constructor. + */ + MemSidePort(const std::string& name, SimpleCache *owner) : + MasterPort(name, owner), owner(owner), blockedPacket(nullptr) + { } + + /** + * Send a packet across this port. This is called by the owner and + * all of the flow control is hanled in this function. + * This is a convenience function for the SimpleCache to send pkts. + * + * @param packet to send. + */ + void sendPacket(PacketPtr pkt); + + protected: + /** + * Receive a timing response from the slave port. + */ + bool recvTimingResp(PacketPtr pkt) override; + + /** + * Called by the slave port if sendTimingReq was called on this + * master port (causing recvTimingReq to be called on the slave + * port) and was unsuccesful. + */ + void recvReqRetry() override; + + /** + * Called to receive an address range change from the peer slave + * port. The default implementation ignores the change and does + * nothing. Override this function in a derived class if the owner + * needs to be aware of the address ranges, e.g. in an + * interconnect component like a bus. + */ + void recvRangeChange() override; + }; + + /** + * Handle the request from the CPU side. Called from the CPU port + * on a timing request. + * + * @param requesting packet + * @param id of the port to send the response + * @return true if we can handle the request this cycle, false if the + * requestor needs to retry later + */ + bool handleRequest(PacketPtr pkt, int port_id); + + /** + * Handle the respone from the memory side. Called from the memory port + * on a timing response. + * + * @param responding packet + * @return true if we can handle the response this cycle, false if the + * responder needs to retry later + */ + bool handleResponse(PacketPtr pkt); + + /** + * Send the packet to the CPU side. + * This function assumes the pkt is already a response packet and forwards + * it to the correct port. This function also unblocks this object and + * cleans up the whole request. + * + * @param the packet to send to the cpu side + */ + void sendResponse(PacketPtr pkt); + + /** + * Handle a packet functionally. Update the data on a write and get the + * data on a read. Called from CPU port on a recv functional. + * + * @param packet to functionally handle + */ + void handleFunctional(PacketPtr pkt); + + /** + * Access the cache for a timing access. This is called after the cache + * access latency has already elapsed. + */ + void accessTiming(PacketPtr pkt); + + /** + * This is where we actually update / read from the cache. This function + * is executed on both timing and functional accesses. + * + * @return true if a hit, false otherwise + */ + bool accessFunctional(PacketPtr pkt); + + /** + * Insert a block into the cache. If there is no room left in the cache, + * then this function evicts a random entry t make room for the new block. + * + * @param packet with the data (and address) to insert into the cache + */ + void insert(PacketPtr pkt); + + /** + * Return the address ranges this cache is responsible for. Just use the + * same as the next upper level of the hierarchy. + * + * @return the address ranges this cache is responsible for + */ + AddrRangeList getAddrRanges() const; + + /** + * Tell the CPU side to ask for our memory ranges. + */ + void sendRangeChange() const; + + /// Latency to check the cache. Number of cycles for both hit and miss + const Cycles latency; + + /// The block size for the cache + const unsigned blockSize; + + /// Number of blocks in the cache (size of cache / block size) + const unsigned capacity; + + /// Instantiation of the CPU-side port + std::vector cpuPorts; + + /// Instantiation of the memory-side port + MemSidePort memPort; + + /// True if this cache is currently blocked waiting for a response. + bool blocked; + + /// Packet that we are currently handling. Used for upgrading to larger + /// cache line sizes + PacketPtr originalPacket; + + /// The port to send the response when we recieve it back + int waitingPortId; + + /// For tracking the miss latency + Tick missTime; + + /// An incredibly simple cache storage. Maps block addresses to data + std::unordered_map cacheStore; + + /// Cache statistics + Stats::Scalar hits; + Stats::Scalar misses; + Stats::Histogram missLatency; + Stats::Formula hitRatio; + + public: + + /** constructor + */ + SimpleCache(SimpleCacheParams *params); + + /** + * Get a master port with a given name and index. This is used at + * binding time and returns a reference to a protocol-agnostic + * base master port. + * + * @param if_name Port name + * @param idx Index in the case of a VectorPort + * + * @return A reference to the given port + */ + virtual BaseMasterPort& getMasterPort(const std::string& if_name, + PortID idx = InvalidPortID) override; + + /** + * Get a slave port with a given name and index. This is used at + * binding time and returns a reference to a protocol-agnostic + * base master port. + * + * @param if_name Port name + * @param idx Index in the case of a VectorPort + * + * @return A reference to the given port + */ + virtual BaseSlavePort& getSlavePort(const std::string& if_name, + PortID idx = InvalidPortID) override; + + /** + * Register the stats + */ + void regStats() override; +}; + + +#endif // __LEARNING_GEM5_SIMPLE_CACHE_SIMPLE_CACHE_HH__