SimObject('BaseCache.py')
-Source('base_cache.cc')
+Source('base.cc')
Source('cache.cc')
-Source('cache_blk.cc')
-Source('cache_builder.cc')
+Source('blk.cc')
+Source('builder.cc')
+Source('mshr.cc')
+Source('mshr_queue.cc')
TraceFlag('Cache')
TraceFlag('CachePort')
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Definition of BaseCache functions.
+ */
+
+#include "cpu/base.hh"
+#include "cpu/smt.hh"
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/miss/mshr.hh"
+
+using namespace std;
+
+BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
+ const std::string &_label,
+ std::vector<Range<Addr> > filter_ranges)
+ : SimpleTimingPort(_name, _cache), cache(_cache),
+ label(_label), otherPort(NULL),
+ blocked(false), mustSendRetry(false), filterRanges(filter_ranges)
+{
+}
+
+
+BaseCache::BaseCache(const Params *p)
+ : MemObject(p),
+ mshrQueue("MSHRs", p->mshrs, 4, MSHRQueue_MSHRs),
+ writeBuffer("write buffer", p->write_buffers, p->mshrs+1000,
+ MSHRQueue_WriteBuffer),
+ blkSize(p->block_size),
+ hitLatency(p->latency),
+ numTarget(p->tgts_per_mshr),
+ blocked(0),
+ noTargetMSHR(NULL),
+ missCount(p->max_miss_count),
+ drainEvent(NULL)
+{
+}
+
+void
+BaseCache::CachePort::recvStatusChange(Port::Status status)
+{
+ if (status == Port::RangeChange) {
+ otherPort->sendStatusChange(Port::RangeChange);
+ }
+}
+
+
+bool
+BaseCache::CachePort::checkFunctional(PacketPtr pkt)
+{
+ pkt->pushLabel(label);
+ bool done = SimpleTimingPort::checkFunctional(pkt);
+ pkt->popLabel();
+ return done;
+}
+
+
+int
+BaseCache::CachePort::deviceBlockSize()
+{
+ return cache->getBlockSize();
+}
+
+
+bool
+BaseCache::CachePort::recvRetryCommon()
+{
+ assert(waitingOnRetry);
+ waitingOnRetry = false;
+ return false;
+}
+
+
+void
+BaseCache::CachePort::setBlocked()
+{
+ assert(!blocked);
+ DPRINTF(Cache, "Cache Blocking\n");
+ blocked = true;
+ //Clear the retry flag
+ mustSendRetry = false;
+}
+
+void
+BaseCache::CachePort::clearBlocked()
+{
+ assert(blocked);
+ DPRINTF(Cache, "Cache Unblocking\n");
+ blocked = false;
+ if (mustSendRetry)
+ {
+ DPRINTF(Cache, "Cache Sending Retry\n");
+ mustSendRetry = false;
+ SendRetryEvent *ev = new SendRetryEvent(this, true);
+ // @TODO: need to find a better time (next bus cycle?)
+ ev->schedule(curTick + 1);
+ }
+}
+
+
+void
+BaseCache::init()
+{
+ if (!cpuSidePort || !memSidePort)
+ panic("Cache not hooked up on both sides\n");
+ cpuSidePort->sendStatusChange(Port::RangeChange);
+}
+
+
+void
+BaseCache::regStats()
+{
+ using namespace Stats;
+
+ // Hit statistics
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ hits[access_idx]
+ .init(maxThreadsPerCPU)
+ .name(name() + "." + cstr + "_hits")
+ .desc("number of " + cstr + " hits")
+ .flags(total | nozero | nonan)
+ ;
+ }
+
+// These macros make it easier to sum the right subset of commands and
+// to change the subset of commands that are considered "demand" vs
+// "non-demand"
+#define SUM_DEMAND(s) \
+ (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::ReadExReq])
+
+// should writebacks be included here? prior code was inconsistent...
+#define SUM_NON_DEMAND(s) \
+ (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq])
+
+ demandHits
+ .name(name() + ".demand_hits")
+ .desc("number of demand (read+write) hits")
+ .flags(total)
+ ;
+ demandHits = SUM_DEMAND(hits);
+
+ overallHits
+ .name(name() + ".overall_hits")
+ .desc("number of overall hits")
+ .flags(total)
+ ;
+ overallHits = demandHits + SUM_NON_DEMAND(hits);
+
+ // Miss statistics
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ misses[access_idx]
+ .init(maxThreadsPerCPU)
+ .name(name() + "." + cstr + "_misses")
+ .desc("number of " + cstr + " misses")
+ .flags(total | nozero | nonan)
+ ;
+ }
+
+ demandMisses
+ .name(name() + ".demand_misses")
+ .desc("number of demand (read+write) misses")
+ .flags(total)
+ ;
+ demandMisses = SUM_DEMAND(misses);
+
+ overallMisses
+ .name(name() + ".overall_misses")
+ .desc("number of overall misses")
+ .flags(total)
+ ;
+ overallMisses = demandMisses + SUM_NON_DEMAND(misses);
+
+ // Miss latency statistics
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ missLatency[access_idx]
+ .init(maxThreadsPerCPU)
+ .name(name() + "." + cstr + "_miss_latency")
+ .desc("number of " + cstr + " miss cycles")
+ .flags(total | nozero | nonan)
+ ;
+ }
+
+ demandMissLatency
+ .name(name() + ".demand_miss_latency")
+ .desc("number of demand (read+write) miss cycles")
+ .flags(total)
+ ;
+ demandMissLatency = SUM_DEMAND(missLatency);
+
+ overallMissLatency
+ .name(name() + ".overall_miss_latency")
+ .desc("number of overall miss cycles")
+ .flags(total)
+ ;
+ overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
+
+ // access formulas
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ accesses[access_idx]
+ .name(name() + "." + cstr + "_accesses")
+ .desc("number of " + cstr + " accesses(hits+misses)")
+ .flags(total | nozero | nonan)
+ ;
+
+ accesses[access_idx] = hits[access_idx] + misses[access_idx];
+ }
+
+ demandAccesses
+ .name(name() + ".demand_accesses")
+ .desc("number of demand (read+write) accesses")
+ .flags(total)
+ ;
+ demandAccesses = demandHits + demandMisses;
+
+ overallAccesses
+ .name(name() + ".overall_accesses")
+ .desc("number of overall (read+write) accesses")
+ .flags(total)
+ ;
+ overallAccesses = overallHits + overallMisses;
+
+ // miss rate formulas
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ missRate[access_idx]
+ .name(name() + "." + cstr + "_miss_rate")
+ .desc("miss rate for " + cstr + " accesses")
+ .flags(total | nozero | nonan)
+ ;
+
+ missRate[access_idx] = misses[access_idx] / accesses[access_idx];
+ }
+
+ demandMissRate
+ .name(name() + ".demand_miss_rate")
+ .desc("miss rate for demand accesses")
+ .flags(total)
+ ;
+ demandMissRate = demandMisses / demandAccesses;
+
+ overallMissRate
+ .name(name() + ".overall_miss_rate")
+ .desc("miss rate for overall accesses")
+ .flags(total)
+ ;
+ overallMissRate = overallMisses / overallAccesses;
+
+ // miss latency formulas
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ avgMissLatency[access_idx]
+ .name(name() + "." + cstr + "_avg_miss_latency")
+ .desc("average " + cstr + " miss latency")
+ .flags(total | nozero | nonan)
+ ;
+
+ avgMissLatency[access_idx] =
+ missLatency[access_idx] / misses[access_idx];
+ }
+
+ demandAvgMissLatency
+ .name(name() + ".demand_avg_miss_latency")
+ .desc("average overall miss latency")
+ .flags(total)
+ ;
+ demandAvgMissLatency = demandMissLatency / demandMisses;
+
+ overallAvgMissLatency
+ .name(name() + ".overall_avg_miss_latency")
+ .desc("average overall miss latency")
+ .flags(total)
+ ;
+ overallAvgMissLatency = overallMissLatency / overallMisses;
+
+ blocked_cycles.init(NUM_BLOCKED_CAUSES);
+ blocked_cycles
+ .name(name() + ".blocked_cycles")
+ .desc("number of cycles access was blocked")
+ .subname(Blocked_NoMSHRs, "no_mshrs")
+ .subname(Blocked_NoTargets, "no_targets")
+ ;
+
+
+ blocked_causes.init(NUM_BLOCKED_CAUSES);
+ blocked_causes
+ .name(name() + ".blocked")
+ .desc("number of cycles access was blocked")
+ .subname(Blocked_NoMSHRs, "no_mshrs")
+ .subname(Blocked_NoTargets, "no_targets")
+ ;
+
+ avg_blocked
+ .name(name() + ".avg_blocked_cycles")
+ .desc("average number of cycles each access was blocked")
+ .subname(Blocked_NoMSHRs, "no_mshrs")
+ .subname(Blocked_NoTargets, "no_targets")
+ ;
+
+ avg_blocked = blocked_cycles / blocked_causes;
+
+ fastWrites
+ .name(name() + ".fast_writes")
+ .desc("number of fast writes performed")
+ ;
+
+ cacheCopies
+ .name(name() + ".cache_copies")
+ .desc("number of cache copies performed")
+ ;
+
+ writebacks
+ .init(maxThreadsPerCPU)
+ .name(name() + ".writebacks")
+ .desc("number of writebacks")
+ .flags(total)
+ ;
+
+ // MSHR statistics
+ // MSHR hit statistics
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ mshr_hits[access_idx]
+ .init(maxThreadsPerCPU)
+ .name(name() + "." + cstr + "_mshr_hits")
+ .desc("number of " + cstr + " MSHR hits")
+ .flags(total | nozero | nonan)
+ ;
+ }
+
+ demandMshrHits
+ .name(name() + ".demand_mshr_hits")
+ .desc("number of demand (read+write) MSHR hits")
+ .flags(total)
+ ;
+ demandMshrHits = SUM_DEMAND(mshr_hits);
+
+ overallMshrHits
+ .name(name() + ".overall_mshr_hits")
+ .desc("number of overall MSHR hits")
+ .flags(total)
+ ;
+ overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
+
+ // MSHR miss statistics
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ mshr_misses[access_idx]
+ .init(maxThreadsPerCPU)
+ .name(name() + "." + cstr + "_mshr_misses")
+ .desc("number of " + cstr + " MSHR misses")
+ .flags(total | nozero | nonan)
+ ;
+ }
+
+ demandMshrMisses
+ .name(name() + ".demand_mshr_misses")
+ .desc("number of demand (read+write) MSHR misses")
+ .flags(total)
+ ;
+ demandMshrMisses = SUM_DEMAND(mshr_misses);
+
+ overallMshrMisses
+ .name(name() + ".overall_mshr_misses")
+ .desc("number of overall MSHR misses")
+ .flags(total)
+ ;
+ overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
+
+ // MSHR miss latency statistics
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ mshr_miss_latency[access_idx]
+ .init(maxThreadsPerCPU)
+ .name(name() + "." + cstr + "_mshr_miss_latency")
+ .desc("number of " + cstr + " MSHR miss cycles")
+ .flags(total | nozero | nonan)
+ ;
+ }
+
+ demandMshrMissLatency
+ .name(name() + ".demand_mshr_miss_latency")
+ .desc("number of demand (read+write) MSHR miss cycles")
+ .flags(total)
+ ;
+ demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
+
+ overallMshrMissLatency
+ .name(name() + ".overall_mshr_miss_latency")
+ .desc("number of overall MSHR miss cycles")
+ .flags(total)
+ ;
+ overallMshrMissLatency =
+ demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
+
+ // MSHR uncacheable statistics
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ mshr_uncacheable[access_idx]
+ .init(maxThreadsPerCPU)
+ .name(name() + "." + cstr + "_mshr_uncacheable")
+ .desc("number of " + cstr + " MSHR uncacheable")
+ .flags(total | nozero | nonan)
+ ;
+ }
+
+ overallMshrUncacheable
+ .name(name() + ".overall_mshr_uncacheable_misses")
+ .desc("number of overall MSHR uncacheable misses")
+ .flags(total)
+ ;
+ overallMshrUncacheable =
+ SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
+
+ // MSHR miss latency statistics
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ mshr_uncacheable_lat[access_idx]
+ .init(maxThreadsPerCPU)
+ .name(name() + "." + cstr + "_mshr_uncacheable_latency")
+ .desc("number of " + cstr + " MSHR uncacheable cycles")
+ .flags(total | nozero | nonan)
+ ;
+ }
+
+ overallMshrUncacheableLatency
+ .name(name() + ".overall_mshr_uncacheable_latency")
+ .desc("number of overall MSHR uncacheable cycles")
+ .flags(total)
+ ;
+ overallMshrUncacheableLatency =
+ SUM_DEMAND(mshr_uncacheable_lat) +
+ SUM_NON_DEMAND(mshr_uncacheable_lat);
+
+#if 0
+ // MSHR access formulas
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ mshrAccesses[access_idx]
+ .name(name() + "." + cstr + "_mshr_accesses")
+ .desc("number of " + cstr + " mshr accesses(hits+misses)")
+ .flags(total | nozero | nonan)
+ ;
+ mshrAccesses[access_idx] =
+ mshr_hits[access_idx] + mshr_misses[access_idx]
+ + mshr_uncacheable[access_idx];
+ }
+
+ demandMshrAccesses
+ .name(name() + ".demand_mshr_accesses")
+ .desc("number of demand (read+write) mshr accesses")
+ .flags(total | nozero | nonan)
+ ;
+ demandMshrAccesses = demandMshrHits + demandMshrMisses;
+
+ overallMshrAccesses
+ .name(name() + ".overall_mshr_accesses")
+ .desc("number of overall (read+write) mshr accesses")
+ .flags(total | nozero | nonan)
+ ;
+ overallMshrAccesses = overallMshrHits + overallMshrMisses
+ + overallMshrUncacheable;
+#endif
+
+ // MSHR miss rate formulas
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ mshrMissRate[access_idx]
+ .name(name() + "." + cstr + "_mshr_miss_rate")
+ .desc("mshr miss rate for " + cstr + " accesses")
+ .flags(total | nozero | nonan)
+ ;
+
+ mshrMissRate[access_idx] =
+ mshr_misses[access_idx] / accesses[access_idx];
+ }
+
+ demandMshrMissRate
+ .name(name() + ".demand_mshr_miss_rate")
+ .desc("mshr miss rate for demand accesses")
+ .flags(total)
+ ;
+ demandMshrMissRate = demandMshrMisses / demandAccesses;
+
+ overallMshrMissRate
+ .name(name() + ".overall_mshr_miss_rate")
+ .desc("mshr miss rate for overall accesses")
+ .flags(total)
+ ;
+ overallMshrMissRate = overallMshrMisses / overallAccesses;
+
+ // mshrMiss latency formulas
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ avgMshrMissLatency[access_idx]
+ .name(name() + "." + cstr + "_avg_mshr_miss_latency")
+ .desc("average " + cstr + " mshr miss latency")
+ .flags(total | nozero | nonan)
+ ;
+
+ avgMshrMissLatency[access_idx] =
+ mshr_miss_latency[access_idx] / mshr_misses[access_idx];
+ }
+
+ demandAvgMshrMissLatency
+ .name(name() + ".demand_avg_mshr_miss_latency")
+ .desc("average overall mshr miss latency")
+ .flags(total)
+ ;
+ demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
+
+ overallAvgMshrMissLatency
+ .name(name() + ".overall_avg_mshr_miss_latency")
+ .desc("average overall mshr miss latency")
+ .flags(total)
+ ;
+ overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
+
+ // mshrUncacheable latency formulas
+ for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
+ MemCmd cmd(access_idx);
+ const string &cstr = cmd.toString();
+
+ avgMshrUncacheableLatency[access_idx]
+ .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
+ .desc("average " + cstr + " mshr uncacheable latency")
+ .flags(total | nozero | nonan)
+ ;
+
+ avgMshrUncacheableLatency[access_idx] =
+ mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
+ }
+
+ overallAvgMshrUncacheableLatency
+ .name(name() + ".overall_avg_mshr_uncacheable_latency")
+ .desc("average overall mshr uncacheable latency")
+ .flags(total)
+ ;
+ overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable;
+
+ mshr_cap_events
+ .init(maxThreadsPerCPU)
+ .name(name() + ".mshr_cap_events")
+ .desc("number of times MSHR cap was activated")
+ .flags(total)
+ ;
+
+ //software prefetching stats
+ soft_prefetch_mshr_full
+ .init(maxThreadsPerCPU)
+ .name(name() + ".soft_prefetch_mshr_full")
+ .desc("number of mshr full events for SW prefetching instrutions")
+ .flags(total)
+ ;
+
+ mshr_no_allocate_misses
+ .name(name() +".no_allocate_misses")
+ .desc("Number of misses that were no-allocate")
+ ;
+
+}
+
+unsigned int
+BaseCache::drain(Event *de)
+{
+ int count = memSidePort->drain(de) + cpuSidePort->drain(de);
+
+ // Set status
+ if (count != 0) {
+ drainEvent = de;
+
+ changeState(SimObject::Draining);
+ return count;
+ }
+
+ changeState(SimObject::Drained);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ * Steve Reinhardt
+ * Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Declares a basic cache interface BaseCache.
+ */
+
+#ifndef __BASE_CACHE_HH__
+#define __BASE_CACHE_HH__
+
+#include <vector>
+#include <string>
+#include <list>
+#include <algorithm>
+#include <inttypes.h>
+
+#include "base/misc.hh"
+#include "base/statistics.hh"
+#include "base/trace.hh"
+#include "mem/cache/miss/mshr_queue.hh"
+#include "mem/mem_object.hh"
+#include "mem/packet.hh"
+#include "mem/tport.hh"
+#include "mem/request.hh"
+#include "params/BaseCache.hh"
+#include "sim/eventq.hh"
+#include "sim/sim_exit.hh"
+
+class MSHR;
+/**
+ * A basic cache interface. Implements some common functions for speed.
+ */
+class BaseCache : public MemObject
+{
+ /**
+ * Indexes to enumerate the MSHR queues.
+ */
+ enum MSHRQueueIndex {
+ MSHRQueue_MSHRs,
+ MSHRQueue_WriteBuffer
+ };
+
+ /**
+ * Reasons for caches to be blocked.
+ */
+ enum BlockedCause {
+ Blocked_NoMSHRs = MSHRQueue_MSHRs,
+ Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
+ Blocked_NoTargets,
+ NUM_BLOCKED_CAUSES
+ };
+
+ public:
+ /**
+ * Reasons for cache to request a bus.
+ */
+ enum RequestCause {
+ Request_MSHR = MSHRQueue_MSHRs,
+ Request_WB = MSHRQueue_WriteBuffer,
+ Request_PF,
+ NUM_REQUEST_CAUSES
+ };
+
+ private:
+
+ class CachePort : public SimpleTimingPort
+ {
+ public:
+ BaseCache *cache;
+
+ protected:
+ CachePort(const std::string &_name, BaseCache *_cache,
+ const std::string &_label,
+ std::vector<Range<Addr> > filter_ranges);
+
+ virtual void recvStatusChange(Status status);
+
+ virtual int deviceBlockSize();
+
+ bool recvRetryCommon();
+
+ typedef EventWrapper<Port, &Port::sendRetry>
+ SendRetryEvent;
+
+ const std::string label;
+
+ public:
+ void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; }
+
+ void setBlocked();
+
+ void clearBlocked();
+
+ bool checkFunctional(PacketPtr pkt);
+
+ CachePort *otherPort;
+
+ bool blocked;
+
+ bool mustSendRetry;
+
+ /** filter ranges */
+ std::vector<Range<Addr> > filterRanges;
+
+ void requestBus(RequestCause cause, Tick time)
+ {
+ DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
+ if (!waitingOnRetry) {
+ schedSendEvent(time);
+ }
+ }
+
+ void respond(PacketPtr pkt, Tick time) {
+ schedSendTiming(pkt, time);
+ }
+ };
+
+ public: //Made public so coherence can get at it.
+ CachePort *cpuSidePort;
+ CachePort *memSidePort;
+
+ protected:
+
+ /** Miss status registers */
+ MSHRQueue mshrQueue;
+
+ /** Write/writeback buffer */
+ MSHRQueue writeBuffer;
+
+ MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
+ PacketPtr pkt, Tick time, bool requestBus)
+ {
+ MSHR *mshr = mq->allocate(addr, size, pkt, time, order++);
+
+ if (mq->isFull()) {
+ setBlocked((BlockedCause)mq->index);
+ }
+
+ if (requestBus) {
+ requestMemSideBus((RequestCause)mq->index, time);
+ }
+
+ return mshr;
+ }
+
+ void markInServiceInternal(MSHR *mshr)
+ {
+ MSHRQueue *mq = mshr->queue;
+ bool wasFull = mq->isFull();
+ mq->markInService(mshr);
+ if (wasFull && !mq->isFull()) {
+ clearBlocked((BlockedCause)mq->index);
+ }
+ }
+
+ /** Block size of this cache */
+ const int blkSize;
+
+ /**
+ * The latency of a hit in this device.
+ */
+ int hitLatency;
+
+ /** The number of targets for each MSHR. */
+ const int numTarget;
+
+ /** Increasing order number assigned to each incoming request. */
+ uint64_t order;
+
+ /**
+ * Bit vector of the blocking reasons for the access path.
+ * @sa #BlockedCause
+ */
+ uint8_t blocked;
+
+ /** Stores time the cache blocked for statistics. */
+ Tick blockedCycle;
+
+ /** Pointer to the MSHR that has no targets. */
+ MSHR *noTargetMSHR;
+
+ /** The number of misses to trigger an exit event. */
+ Counter missCount;
+
+ /** The drain event. */
+ Event *drainEvent;
+
+ public:
+ // Statistics
+ /**
+ * @addtogroup CacheStatistics
+ * @{
+ */
+
+ /** Number of hits per thread for each type of command. @sa Packet::Command */
+ Stats::Vector<> hits[MemCmd::NUM_MEM_CMDS];
+ /** Number of hits for demand accesses. */
+ Stats::Formula demandHits;
+ /** Number of hit for all accesses. */
+ Stats::Formula overallHits;
+
+ /** Number of misses per thread for each type of command. @sa Packet::Command */
+ Stats::Vector<> misses[MemCmd::NUM_MEM_CMDS];
+ /** Number of misses for demand accesses. */
+ Stats::Formula demandMisses;
+ /** Number of misses for all accesses. */
+ Stats::Formula overallMisses;
+
+ /**
+ * Total number of cycles per thread/command spent waiting for a miss.
+ * Used to calculate the average miss latency.
+ */
+ Stats::Vector<> missLatency[MemCmd::NUM_MEM_CMDS];
+ /** Total number of cycles spent waiting for demand misses. */
+ Stats::Formula demandMissLatency;
+ /** Total number of cycles spent waiting for all misses. */
+ Stats::Formula overallMissLatency;
+
+ /** The number of accesses per command and thread. */
+ Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
+ /** The number of demand accesses. */
+ Stats::Formula demandAccesses;
+ /** The number of overall accesses. */
+ Stats::Formula overallAccesses;
+
+ /** The miss rate per command and thread. */
+ Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
+ /** The miss rate of all demand accesses. */
+ Stats::Formula demandMissRate;
+ /** The miss rate for all accesses. */
+ Stats::Formula overallMissRate;
+
+ /** The average miss latency per command and thread. */
+ Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
+ /** The average miss latency for demand misses. */
+ Stats::Formula demandAvgMissLatency;
+ /** The average miss latency for all misses. */
+ Stats::Formula overallAvgMissLatency;
+
+ /** The total number of cycles blocked for each blocked cause. */
+ Stats::Vector<> blocked_cycles;
+ /** The number of times this cache blocked for each blocked cause. */
+ Stats::Vector<> blocked_causes;
+
+ /** The average number of cycles blocked for each blocked cause. */
+ Stats::Formula avg_blocked;
+
+ /** The number of fast writes (WH64) performed. */
+ Stats::Scalar<> fastWrites;
+
+ /** The number of cache copies performed. */
+ Stats::Scalar<> cacheCopies;
+
+ /** Number of blocks written back per thread. */
+ Stats::Vector<> writebacks;
+
+ /** Number of misses that hit in the MSHRs per command and thread. */
+ Stats::Vector<> mshr_hits[MemCmd::NUM_MEM_CMDS];
+ /** Demand misses that hit in the MSHRs. */
+ Stats::Formula demandMshrHits;
+ /** Total number of misses that hit in the MSHRs. */
+ Stats::Formula overallMshrHits;
+
+ /** Number of misses that miss in the MSHRs, per command and thread. */
+ Stats::Vector<> mshr_misses[MemCmd::NUM_MEM_CMDS];
+ /** Demand misses that miss in the MSHRs. */
+ Stats::Formula demandMshrMisses;
+ /** Total number of misses that miss in the MSHRs. */
+ Stats::Formula overallMshrMisses;
+
+ /** Number of misses that miss in the MSHRs, per command and thread. */
+ Stats::Vector<> mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
+ /** Total number of misses that miss in the MSHRs. */
+ Stats::Formula overallMshrUncacheable;
+
+ /** Total cycle latency of each MSHR miss, per command and thread. */
+ Stats::Vector<> mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
+ /** Total cycle latency of demand MSHR misses. */
+ Stats::Formula demandMshrMissLatency;
+ /** Total cycle latency of overall MSHR misses. */
+ Stats::Formula overallMshrMissLatency;
+
+ /** Total cycle latency of each MSHR miss, per command and thread. */
+ Stats::Vector<> mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
+ /** Total cycle latency of overall MSHR misses. */
+ Stats::Formula overallMshrUncacheableLatency;
+
+ /** The total number of MSHR accesses per command and thread. */
+ Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
+ /** The total number of demand MSHR accesses. */
+ Stats::Formula demandMshrAccesses;
+ /** The total number of MSHR accesses. */
+ Stats::Formula overallMshrAccesses;
+
+ /** The miss rate in the MSHRs pre command and thread. */
+ Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
+ /** The demand miss rate in the MSHRs. */
+ Stats::Formula demandMshrMissRate;
+ /** The overall miss rate in the MSHRs. */
+ Stats::Formula overallMshrMissRate;
+
+ /** The average latency of an MSHR miss, per command and thread. */
+ Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
+ /** The average latency of a demand MSHR miss. */
+ Stats::Formula demandAvgMshrMissLatency;
+ /** The average overall latency of an MSHR miss. */
+ Stats::Formula overallAvgMshrMissLatency;
+
+ /** The average latency of an MSHR miss, per command and thread. */
+ Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
+ /** The average overall latency of an MSHR miss. */
+ Stats::Formula overallAvgMshrUncacheableLatency;
+
+ /** The number of times a thread hit its MSHR cap. */
+ Stats::Vector<> mshr_cap_events;
+ /** The number of times software prefetches caused the MSHR to block. */
+ Stats::Vector<> soft_prefetch_mshr_full;
+
+ Stats::Scalar<> mshr_no_allocate_misses;
+
+ /**
+ * @}
+ */
+
+ /**
+ * Register stats for this object.
+ */
+ virtual void regStats();
+
+ public:
+ typedef BaseCacheParams Params;
+ BaseCache(const Params *p);
+ ~BaseCache() {}
+
+ virtual void init();
+
+ /**
+ * Query block size of a cache.
+ * @return The block size
+ */
+ int getBlockSize() const
+ {
+ return blkSize;
+ }
+
+
+ Addr blockAlign(Addr addr) const { return (addr & ~(blkSize - 1)); }
+
+
+ MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
+ {
+ assert(!pkt->req->isUncacheable());
+ return allocateBufferInternal(&mshrQueue,
+ blockAlign(pkt->getAddr()), blkSize,
+ pkt, time, requestBus);
+ }
+
+ MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
+ {
+ assert(pkt->isWrite() && !pkt->isRead());
+ return allocateBufferInternal(&writeBuffer,
+ pkt->getAddr(), pkt->getSize(),
+ pkt, time, requestBus);
+ }
+
+ MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus)
+ {
+ assert(pkt->req->isUncacheable());
+ assert(pkt->isRead());
+ return allocateBufferInternal(&mshrQueue,
+ pkt->getAddr(), pkt->getSize(),
+ pkt, time, requestBus);
+ }
+
+ /**
+ * Returns true if the cache is blocked for accesses.
+ */
+ bool isBlocked()
+ {
+ return blocked != 0;
+ }
+
+ /**
+ * Marks the access path of the cache as blocked for the given cause. This
+ * also sets the blocked flag in the slave interface.
+ * @param cause The reason for the cache blocking.
+ */
+ void setBlocked(BlockedCause cause)
+ {
+ uint8_t flag = 1 << cause;
+ if (blocked == 0) {
+ blocked_causes[cause]++;
+ blockedCycle = curTick;
+ cpuSidePort->setBlocked();
+ }
+ blocked |= flag;
+ DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
+ }
+
+ /**
+ * Marks the cache as unblocked for the given cause. This also clears the
+ * blocked flags in the appropriate interfaces.
+ * @param cause The newly unblocked cause.
+ * @warning Calling this function can cause a blocked request on the bus to
+ * access the cache. The cache must be in a state to handle that request.
+ */
+ void clearBlocked(BlockedCause cause)
+ {
+ uint8_t flag = 1 << cause;
+ blocked &= ~flag;
+ DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
+ if (blocked == 0) {
+ blocked_cycles[cause] += curTick - blockedCycle;
+ cpuSidePort->clearBlocked();
+ }
+ }
+
+ Tick nextMSHRReadyTime()
+ {
+ return std::min(mshrQueue.nextMSHRReadyTime(),
+ writeBuffer.nextMSHRReadyTime());
+ }
+
+ /**
+ * Request the master bus for the given cause and time.
+ * @param cause The reason for the request.
+ * @param time The time to make the request.
+ */
+ void requestMemSideBus(RequestCause cause, Tick time)
+ {
+ memSidePort->requestBus(cause, time);
+ }
+
+ /**
+ * Clear the master bus request for the given cause.
+ * @param cause The request reason to clear.
+ */
+ void deassertMemSideBusRequest(RequestCause cause)
+ {
+ // obsolete!!
+ assert(false);
+ // memSidePort->deassertBusRequest(cause);
+ // checkDrain();
+ }
+
+ virtual unsigned int drain(Event *de);
+
+ virtual bool inCache(Addr addr) = 0;
+
+ virtual bool inMissQueue(Addr addr) = 0;
+
+ void incMissCount(PacketPtr pkt)
+ {
+ misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
+
+ if (missCount) {
+ --missCount;
+ if (missCount == 0)
+ exitSimLoop("A cache reached the maximum miss count");
+ }
+ }
+
+};
+
+#endif //__BASE_CACHE_HH__
+++ /dev/null
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- */
-
-/**
- * @file
- * Definition of BaseCache functions.
- */
-
-#include "cpu/base.hh"
-#include "cpu/smt.hh"
-#include "mem/cache/base_cache.hh"
-#include "mem/cache/miss/mshr.hh"
-
-using namespace std;
-
-BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
- const std::string &_label,
- std::vector<Range<Addr> > filter_ranges)
- : SimpleTimingPort(_name, _cache), cache(_cache),
- label(_label), otherPort(NULL),
- blocked(false), mustSendRetry(false), filterRanges(filter_ranges)
-{
-}
-
-
-BaseCache::BaseCache(const Params *p)
- : MemObject(p),
- mshrQueue("MSHRs", p->mshrs, 4, MSHRQueue_MSHRs),
- writeBuffer("write buffer", p->write_buffers, p->mshrs+1000,
- MSHRQueue_WriteBuffer),
- blkSize(p->block_size),
- hitLatency(p->latency),
- numTarget(p->tgts_per_mshr),
- blocked(0),
- noTargetMSHR(NULL),
- missCount(p->max_miss_count),
- drainEvent(NULL)
-{
-}
-
-void
-BaseCache::CachePort::recvStatusChange(Port::Status status)
-{
- if (status == Port::RangeChange) {
- otherPort->sendStatusChange(Port::RangeChange);
- }
-}
-
-
-bool
-BaseCache::CachePort::checkFunctional(PacketPtr pkt)
-{
- pkt->pushLabel(label);
- bool done = SimpleTimingPort::checkFunctional(pkt);
- pkt->popLabel();
- return done;
-}
-
-
-int
-BaseCache::CachePort::deviceBlockSize()
-{
- return cache->getBlockSize();
-}
-
-
-bool
-BaseCache::CachePort::recvRetryCommon()
-{
- assert(waitingOnRetry);
- waitingOnRetry = false;
- return false;
-}
-
-
-void
-BaseCache::CachePort::setBlocked()
-{
- assert(!blocked);
- DPRINTF(Cache, "Cache Blocking\n");
- blocked = true;
- //Clear the retry flag
- mustSendRetry = false;
-}
-
-void
-BaseCache::CachePort::clearBlocked()
-{
- assert(blocked);
- DPRINTF(Cache, "Cache Unblocking\n");
- blocked = false;
- if (mustSendRetry)
- {
- DPRINTF(Cache, "Cache Sending Retry\n");
- mustSendRetry = false;
- SendRetryEvent *ev = new SendRetryEvent(this, true);
- // @TODO: need to find a better time (next bus cycle?)
- ev->schedule(curTick + 1);
- }
-}
-
-
-void
-BaseCache::init()
-{
- if (!cpuSidePort || !memSidePort)
- panic("Cache not hooked up on both sides\n");
- cpuSidePort->sendStatusChange(Port::RangeChange);
-}
-
-
-void
-BaseCache::regStats()
-{
- using namespace Stats;
-
- // Hit statistics
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- hits[access_idx]
- .init(maxThreadsPerCPU)
- .name(name() + "." + cstr + "_hits")
- .desc("number of " + cstr + " hits")
- .flags(total | nozero | nonan)
- ;
- }
-
-// These macros make it easier to sum the right subset of commands and
-// to change the subset of commands that are considered "demand" vs
-// "non-demand"
-#define SUM_DEMAND(s) \
- (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::ReadExReq])
-
-// should writebacks be included here? prior code was inconsistent...
-#define SUM_NON_DEMAND(s) \
- (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq])
-
- demandHits
- .name(name() + ".demand_hits")
- .desc("number of demand (read+write) hits")
- .flags(total)
- ;
- demandHits = SUM_DEMAND(hits);
-
- overallHits
- .name(name() + ".overall_hits")
- .desc("number of overall hits")
- .flags(total)
- ;
- overallHits = demandHits + SUM_NON_DEMAND(hits);
-
- // Miss statistics
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- misses[access_idx]
- .init(maxThreadsPerCPU)
- .name(name() + "." + cstr + "_misses")
- .desc("number of " + cstr + " misses")
- .flags(total | nozero | nonan)
- ;
- }
-
- demandMisses
- .name(name() + ".demand_misses")
- .desc("number of demand (read+write) misses")
- .flags(total)
- ;
- demandMisses = SUM_DEMAND(misses);
-
- overallMisses
- .name(name() + ".overall_misses")
- .desc("number of overall misses")
- .flags(total)
- ;
- overallMisses = demandMisses + SUM_NON_DEMAND(misses);
-
- // Miss latency statistics
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- missLatency[access_idx]
- .init(maxThreadsPerCPU)
- .name(name() + "." + cstr + "_miss_latency")
- .desc("number of " + cstr + " miss cycles")
- .flags(total | nozero | nonan)
- ;
- }
-
- demandMissLatency
- .name(name() + ".demand_miss_latency")
- .desc("number of demand (read+write) miss cycles")
- .flags(total)
- ;
- demandMissLatency = SUM_DEMAND(missLatency);
-
- overallMissLatency
- .name(name() + ".overall_miss_latency")
- .desc("number of overall miss cycles")
- .flags(total)
- ;
- overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
-
- // access formulas
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- accesses[access_idx]
- .name(name() + "." + cstr + "_accesses")
- .desc("number of " + cstr + " accesses(hits+misses)")
- .flags(total | nozero | nonan)
- ;
-
- accesses[access_idx] = hits[access_idx] + misses[access_idx];
- }
-
- demandAccesses
- .name(name() + ".demand_accesses")
- .desc("number of demand (read+write) accesses")
- .flags(total)
- ;
- demandAccesses = demandHits + demandMisses;
-
- overallAccesses
- .name(name() + ".overall_accesses")
- .desc("number of overall (read+write) accesses")
- .flags(total)
- ;
- overallAccesses = overallHits + overallMisses;
-
- // miss rate formulas
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- missRate[access_idx]
- .name(name() + "." + cstr + "_miss_rate")
- .desc("miss rate for " + cstr + " accesses")
- .flags(total | nozero | nonan)
- ;
-
- missRate[access_idx] = misses[access_idx] / accesses[access_idx];
- }
-
- demandMissRate
- .name(name() + ".demand_miss_rate")
- .desc("miss rate for demand accesses")
- .flags(total)
- ;
- demandMissRate = demandMisses / demandAccesses;
-
- overallMissRate
- .name(name() + ".overall_miss_rate")
- .desc("miss rate for overall accesses")
- .flags(total)
- ;
- overallMissRate = overallMisses / overallAccesses;
-
- // miss latency formulas
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- avgMissLatency[access_idx]
- .name(name() + "." + cstr + "_avg_miss_latency")
- .desc("average " + cstr + " miss latency")
- .flags(total | nozero | nonan)
- ;
-
- avgMissLatency[access_idx] =
- missLatency[access_idx] / misses[access_idx];
- }
-
- demandAvgMissLatency
- .name(name() + ".demand_avg_miss_latency")
- .desc("average overall miss latency")
- .flags(total)
- ;
- demandAvgMissLatency = demandMissLatency / demandMisses;
-
- overallAvgMissLatency
- .name(name() + ".overall_avg_miss_latency")
- .desc("average overall miss latency")
- .flags(total)
- ;
- overallAvgMissLatency = overallMissLatency / overallMisses;
-
- blocked_cycles.init(NUM_BLOCKED_CAUSES);
- blocked_cycles
- .name(name() + ".blocked_cycles")
- .desc("number of cycles access was blocked")
- .subname(Blocked_NoMSHRs, "no_mshrs")
- .subname(Blocked_NoTargets, "no_targets")
- ;
-
-
- blocked_causes.init(NUM_BLOCKED_CAUSES);
- blocked_causes
- .name(name() + ".blocked")
- .desc("number of cycles access was blocked")
- .subname(Blocked_NoMSHRs, "no_mshrs")
- .subname(Blocked_NoTargets, "no_targets")
- ;
-
- avg_blocked
- .name(name() + ".avg_blocked_cycles")
- .desc("average number of cycles each access was blocked")
- .subname(Blocked_NoMSHRs, "no_mshrs")
- .subname(Blocked_NoTargets, "no_targets")
- ;
-
- avg_blocked = blocked_cycles / blocked_causes;
-
- fastWrites
- .name(name() + ".fast_writes")
- .desc("number of fast writes performed")
- ;
-
- cacheCopies
- .name(name() + ".cache_copies")
- .desc("number of cache copies performed")
- ;
-
- writebacks
- .init(maxThreadsPerCPU)
- .name(name() + ".writebacks")
- .desc("number of writebacks")
- .flags(total)
- ;
-
- // MSHR statistics
- // MSHR hit statistics
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- mshr_hits[access_idx]
- .init(maxThreadsPerCPU)
- .name(name() + "." + cstr + "_mshr_hits")
- .desc("number of " + cstr + " MSHR hits")
- .flags(total | nozero | nonan)
- ;
- }
-
- demandMshrHits
- .name(name() + ".demand_mshr_hits")
- .desc("number of demand (read+write) MSHR hits")
- .flags(total)
- ;
- demandMshrHits = SUM_DEMAND(mshr_hits);
-
- overallMshrHits
- .name(name() + ".overall_mshr_hits")
- .desc("number of overall MSHR hits")
- .flags(total)
- ;
- overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
-
- // MSHR miss statistics
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- mshr_misses[access_idx]
- .init(maxThreadsPerCPU)
- .name(name() + "." + cstr + "_mshr_misses")
- .desc("number of " + cstr + " MSHR misses")
- .flags(total | nozero | nonan)
- ;
- }
-
- demandMshrMisses
- .name(name() + ".demand_mshr_misses")
- .desc("number of demand (read+write) MSHR misses")
- .flags(total)
- ;
- demandMshrMisses = SUM_DEMAND(mshr_misses);
-
- overallMshrMisses
- .name(name() + ".overall_mshr_misses")
- .desc("number of overall MSHR misses")
- .flags(total)
- ;
- overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
-
- // MSHR miss latency statistics
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- mshr_miss_latency[access_idx]
- .init(maxThreadsPerCPU)
- .name(name() + "." + cstr + "_mshr_miss_latency")
- .desc("number of " + cstr + " MSHR miss cycles")
- .flags(total | nozero | nonan)
- ;
- }
-
- demandMshrMissLatency
- .name(name() + ".demand_mshr_miss_latency")
- .desc("number of demand (read+write) MSHR miss cycles")
- .flags(total)
- ;
- demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
-
- overallMshrMissLatency
- .name(name() + ".overall_mshr_miss_latency")
- .desc("number of overall MSHR miss cycles")
- .flags(total)
- ;
- overallMshrMissLatency =
- demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
-
- // MSHR uncacheable statistics
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- mshr_uncacheable[access_idx]
- .init(maxThreadsPerCPU)
- .name(name() + "." + cstr + "_mshr_uncacheable")
- .desc("number of " + cstr + " MSHR uncacheable")
- .flags(total | nozero | nonan)
- ;
- }
-
- overallMshrUncacheable
- .name(name() + ".overall_mshr_uncacheable_misses")
- .desc("number of overall MSHR uncacheable misses")
- .flags(total)
- ;
- overallMshrUncacheable =
- SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
-
- // MSHR miss latency statistics
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- mshr_uncacheable_lat[access_idx]
- .init(maxThreadsPerCPU)
- .name(name() + "." + cstr + "_mshr_uncacheable_latency")
- .desc("number of " + cstr + " MSHR uncacheable cycles")
- .flags(total | nozero | nonan)
- ;
- }
-
- overallMshrUncacheableLatency
- .name(name() + ".overall_mshr_uncacheable_latency")
- .desc("number of overall MSHR uncacheable cycles")
- .flags(total)
- ;
- overallMshrUncacheableLatency =
- SUM_DEMAND(mshr_uncacheable_lat) +
- SUM_NON_DEMAND(mshr_uncacheable_lat);
-
-#if 0
- // MSHR access formulas
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- mshrAccesses[access_idx]
- .name(name() + "." + cstr + "_mshr_accesses")
- .desc("number of " + cstr + " mshr accesses(hits+misses)")
- .flags(total | nozero | nonan)
- ;
- mshrAccesses[access_idx] =
- mshr_hits[access_idx] + mshr_misses[access_idx]
- + mshr_uncacheable[access_idx];
- }
-
- demandMshrAccesses
- .name(name() + ".demand_mshr_accesses")
- .desc("number of demand (read+write) mshr accesses")
- .flags(total | nozero | nonan)
- ;
- demandMshrAccesses = demandMshrHits + demandMshrMisses;
-
- overallMshrAccesses
- .name(name() + ".overall_mshr_accesses")
- .desc("number of overall (read+write) mshr accesses")
- .flags(total | nozero | nonan)
- ;
- overallMshrAccesses = overallMshrHits + overallMshrMisses
- + overallMshrUncacheable;
-#endif
-
- // MSHR miss rate formulas
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- mshrMissRate[access_idx]
- .name(name() + "." + cstr + "_mshr_miss_rate")
- .desc("mshr miss rate for " + cstr + " accesses")
- .flags(total | nozero | nonan)
- ;
-
- mshrMissRate[access_idx] =
- mshr_misses[access_idx] / accesses[access_idx];
- }
-
- demandMshrMissRate
- .name(name() + ".demand_mshr_miss_rate")
- .desc("mshr miss rate for demand accesses")
- .flags(total)
- ;
- demandMshrMissRate = demandMshrMisses / demandAccesses;
-
- overallMshrMissRate
- .name(name() + ".overall_mshr_miss_rate")
- .desc("mshr miss rate for overall accesses")
- .flags(total)
- ;
- overallMshrMissRate = overallMshrMisses / overallAccesses;
-
- // mshrMiss latency formulas
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- avgMshrMissLatency[access_idx]
- .name(name() + "." + cstr + "_avg_mshr_miss_latency")
- .desc("average " + cstr + " mshr miss latency")
- .flags(total | nozero | nonan)
- ;
-
- avgMshrMissLatency[access_idx] =
- mshr_miss_latency[access_idx] / mshr_misses[access_idx];
- }
-
- demandAvgMshrMissLatency
- .name(name() + ".demand_avg_mshr_miss_latency")
- .desc("average overall mshr miss latency")
- .flags(total)
- ;
- demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
-
- overallAvgMshrMissLatency
- .name(name() + ".overall_avg_mshr_miss_latency")
- .desc("average overall mshr miss latency")
- .flags(total)
- ;
- overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
-
- // mshrUncacheable latency formulas
- for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
- MemCmd cmd(access_idx);
- const string &cstr = cmd.toString();
-
- avgMshrUncacheableLatency[access_idx]
- .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
- .desc("average " + cstr + " mshr uncacheable latency")
- .flags(total | nozero | nonan)
- ;
-
- avgMshrUncacheableLatency[access_idx] =
- mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
- }
-
- overallAvgMshrUncacheableLatency
- .name(name() + ".overall_avg_mshr_uncacheable_latency")
- .desc("average overall mshr uncacheable latency")
- .flags(total)
- ;
- overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable;
-
- mshr_cap_events
- .init(maxThreadsPerCPU)
- .name(name() + ".mshr_cap_events")
- .desc("number of times MSHR cap was activated")
- .flags(total)
- ;
-
- //software prefetching stats
- soft_prefetch_mshr_full
- .init(maxThreadsPerCPU)
- .name(name() + ".soft_prefetch_mshr_full")
- .desc("number of mshr full events for SW prefetching instrutions")
- .flags(total)
- ;
-
- mshr_no_allocate_misses
- .name(name() +".no_allocate_misses")
- .desc("Number of misses that were no-allocate")
- ;
-
-}
-
-unsigned int
-BaseCache::drain(Event *de)
-{
- int count = memSidePort->drain(de) + cpuSidePort->drain(de);
-
- // Set status
- if (count != 0) {
- drainEvent = de;
-
- changeState(SimObject::Draining);
- return count;
- }
-
- changeState(SimObject::Drained);
- return 0;
-}
+++ /dev/null
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- * Steve Reinhardt
- * Ron Dreslinski
- */
-
-/**
- * @file
- * Declares a basic cache interface BaseCache.
- */
-
-#ifndef __BASE_CACHE_HH__
-#define __BASE_CACHE_HH__
-
-#include <vector>
-#include <string>
-#include <list>
-#include <algorithm>
-#include <inttypes.h>
-
-#include "base/misc.hh"
-#include "base/statistics.hh"
-#include "base/trace.hh"
-#include "mem/cache/miss/mshr_queue.hh"
-#include "mem/mem_object.hh"
-#include "mem/packet.hh"
-#include "mem/tport.hh"
-#include "mem/request.hh"
-#include "params/BaseCache.hh"
-#include "sim/eventq.hh"
-#include "sim/sim_exit.hh"
-
-class MSHR;
-/**
- * A basic cache interface. Implements some common functions for speed.
- */
-class BaseCache : public MemObject
-{
- /**
- * Indexes to enumerate the MSHR queues.
- */
- enum MSHRQueueIndex {
- MSHRQueue_MSHRs,
- MSHRQueue_WriteBuffer
- };
-
- /**
- * Reasons for caches to be blocked.
- */
- enum BlockedCause {
- Blocked_NoMSHRs = MSHRQueue_MSHRs,
- Blocked_NoWBBuffers = MSHRQueue_WriteBuffer,
- Blocked_NoTargets,
- NUM_BLOCKED_CAUSES
- };
-
- public:
- /**
- * Reasons for cache to request a bus.
- */
- enum RequestCause {
- Request_MSHR = MSHRQueue_MSHRs,
- Request_WB = MSHRQueue_WriteBuffer,
- Request_PF,
- NUM_REQUEST_CAUSES
- };
-
- private:
-
- class CachePort : public SimpleTimingPort
- {
- public:
- BaseCache *cache;
-
- protected:
- CachePort(const std::string &_name, BaseCache *_cache,
- const std::string &_label,
- std::vector<Range<Addr> > filter_ranges);
-
- virtual void recvStatusChange(Status status);
-
- virtual int deviceBlockSize();
-
- bool recvRetryCommon();
-
- typedef EventWrapper<Port, &Port::sendRetry>
- SendRetryEvent;
-
- const std::string label;
-
- public:
- void setOtherPort(CachePort *_otherPort) { otherPort = _otherPort; }
-
- void setBlocked();
-
- void clearBlocked();
-
- bool checkFunctional(PacketPtr pkt);
-
- CachePort *otherPort;
-
- bool blocked;
-
- bool mustSendRetry;
-
- /** filter ranges */
- std::vector<Range<Addr> > filterRanges;
-
- void requestBus(RequestCause cause, Tick time)
- {
- DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
- if (!waitingOnRetry) {
- schedSendEvent(time);
- }
- }
-
- void respond(PacketPtr pkt, Tick time) {
- schedSendTiming(pkt, time);
- }
- };
-
- public: //Made public so coherence can get at it.
- CachePort *cpuSidePort;
- CachePort *memSidePort;
-
- protected:
-
- /** Miss status registers */
- MSHRQueue mshrQueue;
-
- /** Write/writeback buffer */
- MSHRQueue writeBuffer;
-
- MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
- PacketPtr pkt, Tick time, bool requestBus)
- {
- MSHR *mshr = mq->allocate(addr, size, pkt, time, order++);
-
- if (mq->isFull()) {
- setBlocked((BlockedCause)mq->index);
- }
-
- if (requestBus) {
- requestMemSideBus((RequestCause)mq->index, time);
- }
-
- return mshr;
- }
-
- void markInServiceInternal(MSHR *mshr)
- {
- MSHRQueue *mq = mshr->queue;
- bool wasFull = mq->isFull();
- mq->markInService(mshr);
- if (wasFull && !mq->isFull()) {
- clearBlocked((BlockedCause)mq->index);
- }
- }
-
- /** Block size of this cache */
- const int blkSize;
-
- /**
- * The latency of a hit in this device.
- */
- int hitLatency;
-
- /** The number of targets for each MSHR. */
- const int numTarget;
-
- /** Increasing order number assigned to each incoming request. */
- uint64_t order;
-
- /**
- * Bit vector of the blocking reasons for the access path.
- * @sa #BlockedCause
- */
- uint8_t blocked;
-
- /** Stores time the cache blocked for statistics. */
- Tick blockedCycle;
-
- /** Pointer to the MSHR that has no targets. */
- MSHR *noTargetMSHR;
-
- /** The number of misses to trigger an exit event. */
- Counter missCount;
-
- /** The drain event. */
- Event *drainEvent;
-
- public:
- // Statistics
- /**
- * @addtogroup CacheStatistics
- * @{
- */
-
- /** Number of hits per thread for each type of command. @sa Packet::Command */
- Stats::Vector<> hits[MemCmd::NUM_MEM_CMDS];
- /** Number of hits for demand accesses. */
- Stats::Formula demandHits;
- /** Number of hit for all accesses. */
- Stats::Formula overallHits;
-
- /** Number of misses per thread for each type of command. @sa Packet::Command */
- Stats::Vector<> misses[MemCmd::NUM_MEM_CMDS];
- /** Number of misses for demand accesses. */
- Stats::Formula demandMisses;
- /** Number of misses for all accesses. */
- Stats::Formula overallMisses;
-
- /**
- * Total number of cycles per thread/command spent waiting for a miss.
- * Used to calculate the average miss latency.
- */
- Stats::Vector<> missLatency[MemCmd::NUM_MEM_CMDS];
- /** Total number of cycles spent waiting for demand misses. */
- Stats::Formula demandMissLatency;
- /** Total number of cycles spent waiting for all misses. */
- Stats::Formula overallMissLatency;
-
- /** The number of accesses per command and thread. */
- Stats::Formula accesses[MemCmd::NUM_MEM_CMDS];
- /** The number of demand accesses. */
- Stats::Formula demandAccesses;
- /** The number of overall accesses. */
- Stats::Formula overallAccesses;
-
- /** The miss rate per command and thread. */
- Stats::Formula missRate[MemCmd::NUM_MEM_CMDS];
- /** The miss rate of all demand accesses. */
- Stats::Formula demandMissRate;
- /** The miss rate for all accesses. */
- Stats::Formula overallMissRate;
-
- /** The average miss latency per command and thread. */
- Stats::Formula avgMissLatency[MemCmd::NUM_MEM_CMDS];
- /** The average miss latency for demand misses. */
- Stats::Formula demandAvgMissLatency;
- /** The average miss latency for all misses. */
- Stats::Formula overallAvgMissLatency;
-
- /** The total number of cycles blocked for each blocked cause. */
- Stats::Vector<> blocked_cycles;
- /** The number of times this cache blocked for each blocked cause. */
- Stats::Vector<> blocked_causes;
-
- /** The average number of cycles blocked for each blocked cause. */
- Stats::Formula avg_blocked;
-
- /** The number of fast writes (WH64) performed. */
- Stats::Scalar<> fastWrites;
-
- /** The number of cache copies performed. */
- Stats::Scalar<> cacheCopies;
-
- /** Number of blocks written back per thread. */
- Stats::Vector<> writebacks;
-
- /** Number of misses that hit in the MSHRs per command and thread. */
- Stats::Vector<> mshr_hits[MemCmd::NUM_MEM_CMDS];
- /** Demand misses that hit in the MSHRs. */
- Stats::Formula demandMshrHits;
- /** Total number of misses that hit in the MSHRs. */
- Stats::Formula overallMshrHits;
-
- /** Number of misses that miss in the MSHRs, per command and thread. */
- Stats::Vector<> mshr_misses[MemCmd::NUM_MEM_CMDS];
- /** Demand misses that miss in the MSHRs. */
- Stats::Formula demandMshrMisses;
- /** Total number of misses that miss in the MSHRs. */
- Stats::Formula overallMshrMisses;
-
- /** Number of misses that miss in the MSHRs, per command and thread. */
- Stats::Vector<> mshr_uncacheable[MemCmd::NUM_MEM_CMDS];
- /** Total number of misses that miss in the MSHRs. */
- Stats::Formula overallMshrUncacheable;
-
- /** Total cycle latency of each MSHR miss, per command and thread. */
- Stats::Vector<> mshr_miss_latency[MemCmd::NUM_MEM_CMDS];
- /** Total cycle latency of demand MSHR misses. */
- Stats::Formula demandMshrMissLatency;
- /** Total cycle latency of overall MSHR misses. */
- Stats::Formula overallMshrMissLatency;
-
- /** Total cycle latency of each MSHR miss, per command and thread. */
- Stats::Vector<> mshr_uncacheable_lat[MemCmd::NUM_MEM_CMDS];
- /** Total cycle latency of overall MSHR misses. */
- Stats::Formula overallMshrUncacheableLatency;
-
- /** The total number of MSHR accesses per command and thread. */
- Stats::Formula mshrAccesses[MemCmd::NUM_MEM_CMDS];
- /** The total number of demand MSHR accesses. */
- Stats::Formula demandMshrAccesses;
- /** The total number of MSHR accesses. */
- Stats::Formula overallMshrAccesses;
-
- /** The miss rate in the MSHRs pre command and thread. */
- Stats::Formula mshrMissRate[MemCmd::NUM_MEM_CMDS];
- /** The demand miss rate in the MSHRs. */
- Stats::Formula demandMshrMissRate;
- /** The overall miss rate in the MSHRs. */
- Stats::Formula overallMshrMissRate;
-
- /** The average latency of an MSHR miss, per command and thread. */
- Stats::Formula avgMshrMissLatency[MemCmd::NUM_MEM_CMDS];
- /** The average latency of a demand MSHR miss. */
- Stats::Formula demandAvgMshrMissLatency;
- /** The average overall latency of an MSHR miss. */
- Stats::Formula overallAvgMshrMissLatency;
-
- /** The average latency of an MSHR miss, per command and thread. */
- Stats::Formula avgMshrUncacheableLatency[MemCmd::NUM_MEM_CMDS];
- /** The average overall latency of an MSHR miss. */
- Stats::Formula overallAvgMshrUncacheableLatency;
-
- /** The number of times a thread hit its MSHR cap. */
- Stats::Vector<> mshr_cap_events;
- /** The number of times software prefetches caused the MSHR to block. */
- Stats::Vector<> soft_prefetch_mshr_full;
-
- Stats::Scalar<> mshr_no_allocate_misses;
-
- /**
- * @}
- */
-
- /**
- * Register stats for this object.
- */
- virtual void regStats();
-
- public:
- typedef BaseCacheParams Params;
- BaseCache(const Params *p);
- ~BaseCache() {}
-
- virtual void init();
-
- /**
- * Query block size of a cache.
- * @return The block size
- */
- int getBlockSize() const
- {
- return blkSize;
- }
-
-
- Addr blockAlign(Addr addr) const { return (addr & ~(blkSize - 1)); }
-
-
- MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
- {
- assert(!pkt->req->isUncacheable());
- return allocateBufferInternal(&mshrQueue,
- blockAlign(pkt->getAddr()), blkSize,
- pkt, time, requestBus);
- }
-
- MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
- {
- assert(pkt->isWrite() && !pkt->isRead());
- return allocateBufferInternal(&writeBuffer,
- pkt->getAddr(), pkt->getSize(),
- pkt, time, requestBus);
- }
-
- MSHR *allocateUncachedReadBuffer(PacketPtr pkt, Tick time, bool requestBus)
- {
- assert(pkt->req->isUncacheable());
- assert(pkt->isRead());
- return allocateBufferInternal(&mshrQueue,
- pkt->getAddr(), pkt->getSize(),
- pkt, time, requestBus);
- }
-
- /**
- * Returns true if the cache is blocked for accesses.
- */
- bool isBlocked()
- {
- return blocked != 0;
- }
-
- /**
- * Marks the access path of the cache as blocked for the given cause. This
- * also sets the blocked flag in the slave interface.
- * @param cause The reason for the cache blocking.
- */
- void setBlocked(BlockedCause cause)
- {
- uint8_t flag = 1 << cause;
- if (blocked == 0) {
- blocked_causes[cause]++;
- blockedCycle = curTick;
- cpuSidePort->setBlocked();
- }
- blocked |= flag;
- DPRINTF(Cache,"Blocking for cause %d, mask=%d\n", cause, blocked);
- }
-
- /**
- * Marks the cache as unblocked for the given cause. This also clears the
- * blocked flags in the appropriate interfaces.
- * @param cause The newly unblocked cause.
- * @warning Calling this function can cause a blocked request on the bus to
- * access the cache. The cache must be in a state to handle that request.
- */
- void clearBlocked(BlockedCause cause)
- {
- uint8_t flag = 1 << cause;
- blocked &= ~flag;
- DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
- if (blocked == 0) {
- blocked_cycles[cause] += curTick - blockedCycle;
- cpuSidePort->clearBlocked();
- }
- }
-
- Tick nextMSHRReadyTime()
- {
- return std::min(mshrQueue.nextMSHRReadyTime(),
- writeBuffer.nextMSHRReadyTime());
- }
-
- /**
- * Request the master bus for the given cause and time.
- * @param cause The reason for the request.
- * @param time The time to make the request.
- */
- void requestMemSideBus(RequestCause cause, Tick time)
- {
- memSidePort->requestBus(cause, time);
- }
-
- /**
- * Clear the master bus request for the given cause.
- * @param cause The request reason to clear.
- */
- void deassertMemSideBusRequest(RequestCause cause)
- {
- // obsolete!!
- assert(false);
- // memSidePort->deassertBusRequest(cause);
- // checkDrain();
- }
-
- virtual unsigned int drain(Event *de);
-
- virtual bool inCache(Addr addr) = 0;
-
- virtual bool inMissQueue(Addr addr) = 0;
-
- void incMissCount(PacketPtr pkt)
- {
- misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
-
- if (missCount) {
- --missCount;
- if (missCount == 0)
- exitSimLoop("A cache reached the maximum miss count");
- }
- }
-
-};
-
-#endif //__BASE_CACHE_HH__
--- /dev/null
+/*
+ * Copyright (c) 2007 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "base/cprintf.hh"
+#include "mem/cache/cache_blk.hh"
+
+void
+CacheBlkPrintWrapper::print(std::ostream &os, int verbosity,
+ const std::string &prefix) const
+{
+ ccprintf(os, "%sblk %c%c%c\n", prefix,
+ blk->isValid() ? 'V' : '-',
+ blk->isWritable() ? 'E' : '-',
+ blk->isDirty() ? 'M' : '-');
+}
+
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/** @file
+ * Definitions of a simple cache block class.
+ */
+
+#ifndef __CACHE_BLK_HH__
+#define __CACHE_BLK_HH__
+
+#include <list>
+
+#include "base/printable.hh"
+#include "sim/core.hh" // for Tick
+#include "arch/isa_traits.hh" // for Addr
+#include "mem/packet.hh"
+#include "mem/request.hh"
+
+/**
+ * Cache block status bit assignments
+ */
+enum CacheBlkStatusBits {
+ /** valid, readable */
+ BlkValid = 0x01,
+ /** write permission */
+ BlkWritable = 0x02,
+ /** dirty (modified) */
+ BlkDirty = 0x04,
+ /** block was referenced */
+ BlkReferenced = 0x10,
+ /** block was a hardware prefetch yet unaccessed*/
+ BlkHWPrefetched = 0x20
+};
+
+/**
+ * A Basic Cache block.
+ * Contains the tag, status, and a pointer to data.
+ */
+class CacheBlk
+{
+ public:
+ /** The address space ID of this block. */
+ int asid;
+ /** Data block tag value. */
+ Addr tag;
+ /**
+ * Contains a copy of the data in this block for easy access. This is used
+ * for efficient execution when the data could be actually stored in
+ * another format (COW, compressed, sub-blocked, etc). In all cases the
+ * data stored here should be kept consistant with the actual data
+ * referenced by this block.
+ */
+ uint8_t *data;
+ /** the number of bytes stored in this block. */
+ int size;
+
+ /** block state: OR of CacheBlkStatusBit */
+ typedef unsigned State;
+
+ /** The current status of this block. @sa CacheBlockStatusBits */
+ State status;
+
+ /** Which curTick will this block be accessable */
+ Tick whenReady;
+
+ /**
+ * The set this block belongs to.
+ * @todo Move this into subclasses when we fix CacheTags to use them.
+ */
+ int set;
+
+ /** Number of references to this block since it was brought in. */
+ int refCount;
+
+ protected:
+ /**
+ * Represents that the indicated thread context has a "lock" on
+ * the block, in the LL/SC sense.
+ */
+ class Lock {
+ public:
+ int cpuNum; // locking CPU
+ int threadNum; // locking thread ID within CPU
+
+ // check for matching execution context
+ bool matchesContext(Request *req)
+ {
+ return (cpuNum == req->getCpuNum() &&
+ threadNum == req->getThreadNum());
+ }
+
+ Lock(Request *req)
+ : cpuNum(req->getCpuNum()), threadNum(req->getThreadNum())
+ {
+ }
+ };
+
+ /** List of thread contexts that have performed a load-locked (LL)
+ * on the block since the last store. */
+ std::list<Lock> lockList;
+
+ public:
+
+ CacheBlk()
+ : asid(-1), tag(0), data(0) ,size(0), status(0), whenReady(0),
+ set(-1), refCount(0)
+ {}
+
+ /**
+ * Copy the state of the given block into this one.
+ * @param rhs The block to copy.
+ * @return a const reference to this block.
+ */
+ const CacheBlk& operator=(const CacheBlk& rhs)
+ {
+ asid = rhs.asid;
+ tag = rhs.tag;
+ data = rhs.data;
+ size = rhs.size;
+ status = rhs.status;
+ whenReady = rhs.whenReady;
+ set = rhs.set;
+ refCount = rhs.refCount;
+ return *this;
+ }
+
+ /**
+ * Checks the write permissions of this block.
+ * @return True if the block is writable.
+ */
+ bool isWritable() const
+ {
+ const int needed_bits = BlkWritable | BlkValid;
+ return (status & needed_bits) == needed_bits;
+ }
+
+ /**
+ * Checks that a block is valid (readable).
+ * @return True if the block is valid.
+ */
+ bool isValid() const
+ {
+ return (status & BlkValid) != 0;
+ }
+
+ /**
+ * Check to see if a block has been written.
+ * @return True if the block is dirty.
+ */
+ bool isDirty() const
+ {
+ return (status & BlkDirty) != 0;
+ }
+
+ /**
+ * Check if this block has been referenced.
+ * @return True if the block has been referenced.
+ */
+ bool isReferenced() const
+ {
+ return (status & BlkReferenced) != 0;
+ }
+
+ /**
+ * Check if this block was the result of a hardware prefetch, yet to
+ * be touched.
+ * @return True if the block was a hardware prefetch, unaccesed.
+ */
+ bool isPrefetch() const
+ {
+ return (status & BlkHWPrefetched) != 0;
+ }
+
+ /**
+ * Track the fact that a local locked was issued to the block. If
+ * multiple LLs get issued from the same context we could have
+ * redundant records on the list, but that's OK, as they'll all
+ * get blown away at the next store.
+ */
+ void trackLoadLocked(PacketPtr pkt)
+ {
+ assert(pkt->isLocked());
+ lockList.push_front(Lock(pkt->req));
+ }
+
+ /**
+ * Clear the list of valid load locks. Should be called whenever
+ * block is written to or invalidated.
+ */
+ void clearLoadLocks() { lockList.clear(); }
+
+ /**
+ * Handle interaction of load-locked operations and stores.
+ * @return True if write should proceed, false otherwise. Returns
+ * false only in the case of a failed store conditional.
+ */
+ bool checkWrite(PacketPtr pkt)
+ {
+ Request *req = pkt->req;
+ if (pkt->isLocked()) {
+ // it's a store conditional... have to check for matching
+ // load locked.
+ bool success = false;
+
+ for (std::list<Lock>::iterator i = lockList.begin();
+ i != lockList.end(); ++i)
+ {
+ if (i->matchesContext(req)) {
+ // it's a store conditional, and as far as the memory
+ // system can tell, the requesting context's lock is
+ // still valid.
+ success = true;
+ break;
+ }
+ }
+
+ req->setExtraData(success ? 1 : 0);
+ clearLoadLocks();
+ return success;
+ } else {
+ // for *all* stores (conditional or otherwise) we have to
+ // clear the list of load-locks as they're all invalid now.
+ clearLoadLocks();
+ return true;
+ }
+ }
+};
+
+/**
+ * Simple class to provide virtual print() method on cache blocks
+ * without allocating a vtable pointer for every single cache block.
+ * Just wrap the CacheBlk object in an instance of this before passing
+ * to a function that requires a Printable object.
+ */
+class CacheBlkPrintWrapper : public Printable
+{
+ CacheBlk *blk;
+ public:
+ CacheBlkPrintWrapper(CacheBlk *_blk) : blk(_blk) {}
+ virtual ~CacheBlkPrintWrapper() {}
+ void print(std::ostream &o, int verbosity = 0,
+ const std::string &prefix = "") const;
+};
+
+
+
+#endif //__CACHE_BLK_HH__
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ * Nathan Binkert
+ */
+
+/**
+ * @file
+ * Simobject instatiation of caches.
+ */
+#include <vector>
+
+// Must be included first to determine which caches we want
+#include "enums/Prefetch.hh"
+#include "mem/config/cache.hh"
+#include "mem/config/prefetch.hh"
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/cache.hh"
+#include "mem/bus.hh"
+#include "params/BaseCache.hh"
+
+// Tag Templates
+#if defined(USE_CACHE_LRU)
+#include "mem/cache/tags/lru.hh"
+#endif
+
+#if defined(USE_CACHE_FALRU)
+#include "mem/cache/tags/fa_lru.hh"
+#endif
+
+#if defined(USE_CACHE_IIC)
+#include "mem/cache/tags/iic.hh"
+#endif
+
+#if defined(USE_CACHE_SPLIT)
+#include "mem/cache/tags/split.hh"
+#endif
+
+#if defined(USE_CACHE_SPLIT_LIFO)
+#include "mem/cache/tags/split_lifo.hh"
+#endif
+
+//Prefetcher Headers
+#if defined(USE_GHB)
+#include "mem/cache/prefetch/ghb_prefetcher.hh"
+#endif
+#if defined(USE_TAGGED)
+#include "mem/cache/prefetch/tagged_prefetcher.hh"
+#endif
+#if defined(USE_STRIDED)
+#include "mem/cache/prefetch/stride_prefetcher.hh"
+#endif
+
+
+using namespace std;
+using namespace TheISA;
+
+#define BUILD_CACHE(TAGS, tags) \
+ do { \
+ BasePrefetcher *pf; \
+ if (prefetch_policy == Enums::tagged) { \
+ BUILD_TAGGED_PREFETCHER(TAGS); \
+ } \
+ else if (prefetch_policy == Enums::stride) { \
+ BUILD_STRIDED_PREFETCHER(TAGS); \
+ } \
+ else if (prefetch_policy == Enums::ghb) { \
+ BUILD_GHB_PREFETCHER(TAGS); \
+ } \
+ else { \
+ BUILD_NULL_PREFETCHER(TAGS); \
+ } \
+ Cache<TAGS> *retval = \
+ new Cache<TAGS>(this, tags, pf); \
+ return retval; \
+ } while (0)
+
+#define BUILD_CACHE_PANIC(x) do { \
+ panic("%s not compiled into M5", x); \
+ } while (0)
+
+#if defined(USE_CACHE_FALRU)
+#define BUILD_FALRU_CACHE do { \
+ FALRU *tags = new FALRU(block_size, size, latency); \
+ BUILD_CACHE(FALRU, tags); \
+ } while (0)
+#else
+#define BUILD_FALRU_CACHE BUILD_CACHE_PANIC("falru cache")
+#endif
+
+#if defined(USE_CACHE_LRU)
+#define BUILD_LRU_CACHE do { \
+ LRU *tags = new LRU(numSets, block_size, assoc, latency); \
+ BUILD_CACHE(LRU, tags); \
+ } while (0)
+#else
+#define BUILD_LRU_CACHE BUILD_CACHE_PANIC("lru cache")
+#endif
+
+#if defined(USE_CACHE_SPLIT)
+#define BUILD_SPLIT_CACHE do { \
+ Split *tags = new Split(numSets, block_size, assoc, split_size, lifo, \
+ two_queue, latency); \
+ BUILD_CACHE(Split, tags); \
+ } while (0)
+#else
+#define BUILD_SPLIT_CACHE BUILD_CACHE_PANIC("split cache")
+#endif
+
+#if defined(USE_CACHE_SPLIT_LIFO)
+#define BUILD_SPLIT_LIFO_CACHE do { \
+ SplitLIFO *tags = new SplitLIFO(block_size, size, assoc, \
+ latency, two_queue, -1); \
+ BUILD_CACHE(SplitLIFO, tags); \
+ } while (0)
+#else
+#define BUILD_SPLIT_LIFO_CACHE BUILD_CACHE_PANIC("lifo cache")
+#endif
+
+#if defined(USE_CACHE_IIC)
+#define BUILD_IIC_CACHE do { \
+ IIC *tags = new IIC(iic_params); \
+ BUILD_CACHE(IIC, tags); \
+ } while (0)
+#else
+#define BUILD_IIC_CACHE BUILD_CACHE_PANIC("iic")
+#endif
+
+#define BUILD_CACHES do { \
+ if (repl == NULL) { \
+ if (numSets == 1) { \
+ BUILD_FALRU_CACHE; \
+ } else { \
+ if (split == true) { \
+ BUILD_SPLIT_CACHE; \
+ } else if (lifo == true) { \
+ BUILD_SPLIT_LIFO_CACHE; \
+ } else { \
+ BUILD_LRU_CACHE; \
+ } \
+ } \
+ } else { \
+ BUILD_IIC_CACHE; \
+ } \
+ } while (0)
+
+#define BUILD_COHERENCE(b) do { \
+ } while (0)
+
+#if defined(USE_TAGGED)
+#define BUILD_TAGGED_PREFETCHER(t) \
+ pf = new TaggedPrefetcher(this)
+#else
+#define BUILD_TAGGED_PREFETCHER(t) BUILD_CACHE_PANIC("Tagged Prefetcher")
+#endif
+
+#if defined(USE_STRIDED)
+#define BUILD_STRIDED_PREFETCHER(t) \
+ pf = new StridePrefetcher(this)
+#else
+#define BUILD_STRIDED_PREFETCHER(t) BUILD_CACHE_PANIC("Stride Prefetcher")
+#endif
+
+#if defined(USE_GHB)
+#define BUILD_GHB_PREFETCHER(t) \
+ pf = new GHBPrefetcher(this)
+#else
+#define BUILD_GHB_PREFETCHER(t) BUILD_CACHE_PANIC("GHB Prefetcher")
+#endif
+
+#if defined(USE_TAGGED)
+#define BUILD_NULL_PREFETCHER(t) \
+ pf = new TaggedPrefetcher(this)
+#else
+#define BUILD_NULL_PREFETCHER(t) BUILD_CACHE_PANIC("NULL Prefetcher (uses Tagged)")
+#endif
+
+BaseCache *
+BaseCacheParams::create()
+{
+ int numSets = size / (assoc * block_size);
+ if (subblock_size == 0) {
+ subblock_size = block_size;
+ }
+
+ //Warnings about prefetcher policy
+ if (prefetch_policy == Enums::none) {
+ if (prefetch_miss || prefetch_access)
+ panic("With no prefetcher, you shouldn't prefetch from"
+ " either miss or access stream\n");
+ }
+
+ if (prefetch_policy == Enums::tagged || prefetch_policy == Enums::stride ||
+ prefetch_policy == Enums::ghb) {
+
+ if (!prefetch_miss && !prefetch_access)
+ warn("With this prefetcher you should chose a prefetch"
+ " stream (miss or access)\nNo Prefetching will occur\n");
+
+ if (prefetch_miss && prefetch_access)
+ panic("Can't do prefetches from both miss and access stream");
+ }
+
+#if defined(USE_CACHE_IIC)
+ // Build IIC params
+ IIC::Params iic_params;
+ iic_params.size = size;
+ iic_params.numSets = numSets;
+ iic_params.blkSize = block_size;
+ iic_params.assoc = assoc;
+ iic_params.hashDelay = hash_delay;
+ iic_params.hitLatency = latency;
+ iic_params.rp = repl;
+ iic_params.subblockSize = subblock_size;
+#else
+ const void *repl = NULL;
+#endif
+
+ BUILD_CACHES;
+ return NULL;
+}
+++ /dev/null
-/*
- * Copyright (c) 2007 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "base/cprintf.hh"
-#include "mem/cache/cache_blk.hh"
-
-void
-CacheBlkPrintWrapper::print(std::ostream &os, int verbosity,
- const std::string &prefix) const
-{
- ccprintf(os, "%sblk %c%c%c\n", prefix,
- blk->isValid() ? 'V' : '-',
- blk->isWritable() ? 'E' : '-',
- blk->isDirty() ? 'M' : '-');
-}
-
+++ /dev/null
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- */
-
-/** @file
- * Definitions of a simple cache block class.
- */
-
-#ifndef __CACHE_BLK_HH__
-#define __CACHE_BLK_HH__
-
-#include <list>
-
-#include "base/printable.hh"
-#include "sim/core.hh" // for Tick
-#include "arch/isa_traits.hh" // for Addr
-#include "mem/packet.hh"
-#include "mem/request.hh"
-
-/**
- * Cache block status bit assignments
- */
-enum CacheBlkStatusBits {
- /** valid, readable */
- BlkValid = 0x01,
- /** write permission */
- BlkWritable = 0x02,
- /** dirty (modified) */
- BlkDirty = 0x04,
- /** block was referenced */
- BlkReferenced = 0x10,
- /** block was a hardware prefetch yet unaccessed*/
- BlkHWPrefetched = 0x20
-};
-
-/**
- * A Basic Cache block.
- * Contains the tag, status, and a pointer to data.
- */
-class CacheBlk
-{
- public:
- /** The address space ID of this block. */
- int asid;
- /** Data block tag value. */
- Addr tag;
- /**
- * Contains a copy of the data in this block for easy access. This is used
- * for efficient execution when the data could be actually stored in
- * another format (COW, compressed, sub-blocked, etc). In all cases the
- * data stored here should be kept consistant with the actual data
- * referenced by this block.
- */
- uint8_t *data;
- /** the number of bytes stored in this block. */
- int size;
-
- /** block state: OR of CacheBlkStatusBit */
- typedef unsigned State;
-
- /** The current status of this block. @sa CacheBlockStatusBits */
- State status;
-
- /** Which curTick will this block be accessable */
- Tick whenReady;
-
- /**
- * The set this block belongs to.
- * @todo Move this into subclasses when we fix CacheTags to use them.
- */
- int set;
-
- /** Number of references to this block since it was brought in. */
- int refCount;
-
- protected:
- /**
- * Represents that the indicated thread context has a "lock" on
- * the block, in the LL/SC sense.
- */
- class Lock {
- public:
- int cpuNum; // locking CPU
- int threadNum; // locking thread ID within CPU
-
- // check for matching execution context
- bool matchesContext(Request *req)
- {
- return (cpuNum == req->getCpuNum() &&
- threadNum == req->getThreadNum());
- }
-
- Lock(Request *req)
- : cpuNum(req->getCpuNum()), threadNum(req->getThreadNum())
- {
- }
- };
-
- /** List of thread contexts that have performed a load-locked (LL)
- * on the block since the last store. */
- std::list<Lock> lockList;
-
- public:
-
- CacheBlk()
- : asid(-1), tag(0), data(0) ,size(0), status(0), whenReady(0),
- set(-1), refCount(0)
- {}
-
- /**
- * Copy the state of the given block into this one.
- * @param rhs The block to copy.
- * @return a const reference to this block.
- */
- const CacheBlk& operator=(const CacheBlk& rhs)
- {
- asid = rhs.asid;
- tag = rhs.tag;
- data = rhs.data;
- size = rhs.size;
- status = rhs.status;
- whenReady = rhs.whenReady;
- set = rhs.set;
- refCount = rhs.refCount;
- return *this;
- }
-
- /**
- * Checks the write permissions of this block.
- * @return True if the block is writable.
- */
- bool isWritable() const
- {
- const int needed_bits = BlkWritable | BlkValid;
- return (status & needed_bits) == needed_bits;
- }
-
- /**
- * Checks that a block is valid (readable).
- * @return True if the block is valid.
- */
- bool isValid() const
- {
- return (status & BlkValid) != 0;
- }
-
- /**
- * Check to see if a block has been written.
- * @return True if the block is dirty.
- */
- bool isDirty() const
- {
- return (status & BlkDirty) != 0;
- }
-
- /**
- * Check if this block has been referenced.
- * @return True if the block has been referenced.
- */
- bool isReferenced() const
- {
- return (status & BlkReferenced) != 0;
- }
-
- /**
- * Check if this block was the result of a hardware prefetch, yet to
- * be touched.
- * @return True if the block was a hardware prefetch, unaccesed.
- */
- bool isPrefetch() const
- {
- return (status & BlkHWPrefetched) != 0;
- }
-
- /**
- * Track the fact that a local locked was issued to the block. If
- * multiple LLs get issued from the same context we could have
- * redundant records on the list, but that's OK, as they'll all
- * get blown away at the next store.
- */
- void trackLoadLocked(PacketPtr pkt)
- {
- assert(pkt->isLocked());
- lockList.push_front(Lock(pkt->req));
- }
-
- /**
- * Clear the list of valid load locks. Should be called whenever
- * block is written to or invalidated.
- */
- void clearLoadLocks() { lockList.clear(); }
-
- /**
- * Handle interaction of load-locked operations and stores.
- * @return True if write should proceed, false otherwise. Returns
- * false only in the case of a failed store conditional.
- */
- bool checkWrite(PacketPtr pkt)
- {
- Request *req = pkt->req;
- if (pkt->isLocked()) {
- // it's a store conditional... have to check for matching
- // load locked.
- bool success = false;
-
- for (std::list<Lock>::iterator i = lockList.begin();
- i != lockList.end(); ++i)
- {
- if (i->matchesContext(req)) {
- // it's a store conditional, and as far as the memory
- // system can tell, the requesting context's lock is
- // still valid.
- success = true;
- break;
- }
- }
-
- req->setExtraData(success ? 1 : 0);
- clearLoadLocks();
- return success;
- } else {
- // for *all* stores (conditional or otherwise) we have to
- // clear the list of load-locks as they're all invalid now.
- clearLoadLocks();
- return true;
- }
- }
-};
-
-/**
- * Simple class to provide virtual print() method on cache blocks
- * without allocating a vtable pointer for every single cache block.
- * Just wrap the CacheBlk object in an instance of this before passing
- * to a function that requires a Printable object.
- */
-class CacheBlkPrintWrapper : public Printable
-{
- CacheBlk *blk;
- public:
- CacheBlkPrintWrapper(CacheBlk *_blk) : blk(_blk) {}
- virtual ~CacheBlkPrintWrapper() {}
- void print(std::ostream &o, int verbosity = 0,
- const std::string &prefix = "") const;
-};
-
-
-
-#endif //__CACHE_BLK_HH__
+++ /dev/null
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- * Nathan Binkert
- */
-
-/**
- * @file
- * Simobject instatiation of caches.
- */
-#include <vector>
-
-// Must be included first to determine which caches we want
-#include "enums/Prefetch.hh"
-#include "mem/config/cache.hh"
-#include "mem/config/prefetch.hh"
-#include "mem/cache/base_cache.hh"
-#include "mem/cache/cache.hh"
-#include "mem/bus.hh"
-#include "params/BaseCache.hh"
-
-// Tag Templates
-#if defined(USE_CACHE_LRU)
-#include "mem/cache/tags/lru.hh"
-#endif
-
-#if defined(USE_CACHE_FALRU)
-#include "mem/cache/tags/fa_lru.hh"
-#endif
-
-#if defined(USE_CACHE_IIC)
-#include "mem/cache/tags/iic.hh"
-#endif
-
-#if defined(USE_CACHE_SPLIT)
-#include "mem/cache/tags/split.hh"
-#endif
-
-#if defined(USE_CACHE_SPLIT_LIFO)
-#include "mem/cache/tags/split_lifo.hh"
-#endif
-
-//Prefetcher Headers
-#if defined(USE_GHB)
-#include "mem/cache/prefetch/ghb_prefetcher.hh"
-#endif
-#if defined(USE_TAGGED)
-#include "mem/cache/prefetch/tagged_prefetcher.hh"
-#endif
-#if defined(USE_STRIDED)
-#include "mem/cache/prefetch/stride_prefetcher.hh"
-#endif
-
-
-using namespace std;
-using namespace TheISA;
-
-#define BUILD_CACHE(TAGS, tags) \
- do { \
- BasePrefetcher *pf; \
- if (prefetch_policy == Enums::tagged) { \
- BUILD_TAGGED_PREFETCHER(TAGS); \
- } \
- else if (prefetch_policy == Enums::stride) { \
- BUILD_STRIDED_PREFETCHER(TAGS); \
- } \
- else if (prefetch_policy == Enums::ghb) { \
- BUILD_GHB_PREFETCHER(TAGS); \
- } \
- else { \
- BUILD_NULL_PREFETCHER(TAGS); \
- } \
- Cache<TAGS> *retval = \
- new Cache<TAGS>(this, tags, pf); \
- return retval; \
- } while (0)
-
-#define BUILD_CACHE_PANIC(x) do { \
- panic("%s not compiled into M5", x); \
- } while (0)
-
-#if defined(USE_CACHE_FALRU)
-#define BUILD_FALRU_CACHE do { \
- FALRU *tags = new FALRU(block_size, size, latency); \
- BUILD_CACHE(FALRU, tags); \
- } while (0)
-#else
-#define BUILD_FALRU_CACHE BUILD_CACHE_PANIC("falru cache")
-#endif
-
-#if defined(USE_CACHE_LRU)
-#define BUILD_LRU_CACHE do { \
- LRU *tags = new LRU(numSets, block_size, assoc, latency); \
- BUILD_CACHE(LRU, tags); \
- } while (0)
-#else
-#define BUILD_LRU_CACHE BUILD_CACHE_PANIC("lru cache")
-#endif
-
-#if defined(USE_CACHE_SPLIT)
-#define BUILD_SPLIT_CACHE do { \
- Split *tags = new Split(numSets, block_size, assoc, split_size, lifo, \
- two_queue, latency); \
- BUILD_CACHE(Split, tags); \
- } while (0)
-#else
-#define BUILD_SPLIT_CACHE BUILD_CACHE_PANIC("split cache")
-#endif
-
-#if defined(USE_CACHE_SPLIT_LIFO)
-#define BUILD_SPLIT_LIFO_CACHE do { \
- SplitLIFO *tags = new SplitLIFO(block_size, size, assoc, \
- latency, two_queue, -1); \
- BUILD_CACHE(SplitLIFO, tags); \
- } while (0)
-#else
-#define BUILD_SPLIT_LIFO_CACHE BUILD_CACHE_PANIC("lifo cache")
-#endif
-
-#if defined(USE_CACHE_IIC)
-#define BUILD_IIC_CACHE do { \
- IIC *tags = new IIC(iic_params); \
- BUILD_CACHE(IIC, tags); \
- } while (0)
-#else
-#define BUILD_IIC_CACHE BUILD_CACHE_PANIC("iic")
-#endif
-
-#define BUILD_CACHES do { \
- if (repl == NULL) { \
- if (numSets == 1) { \
- BUILD_FALRU_CACHE; \
- } else { \
- if (split == true) { \
- BUILD_SPLIT_CACHE; \
- } else if (lifo == true) { \
- BUILD_SPLIT_LIFO_CACHE; \
- } else { \
- BUILD_LRU_CACHE; \
- } \
- } \
- } else { \
- BUILD_IIC_CACHE; \
- } \
- } while (0)
-
-#define BUILD_COHERENCE(b) do { \
- } while (0)
-
-#if defined(USE_TAGGED)
-#define BUILD_TAGGED_PREFETCHER(t) \
- pf = new TaggedPrefetcher(this)
-#else
-#define BUILD_TAGGED_PREFETCHER(t) BUILD_CACHE_PANIC("Tagged Prefetcher")
-#endif
-
-#if defined(USE_STRIDED)
-#define BUILD_STRIDED_PREFETCHER(t) \
- pf = new StridePrefetcher(this)
-#else
-#define BUILD_STRIDED_PREFETCHER(t) BUILD_CACHE_PANIC("Stride Prefetcher")
-#endif
-
-#if defined(USE_GHB)
-#define BUILD_GHB_PREFETCHER(t) \
- pf = new GHBPrefetcher(this)
-#else
-#define BUILD_GHB_PREFETCHER(t) BUILD_CACHE_PANIC("GHB Prefetcher")
-#endif
-
-#if defined(USE_TAGGED)
-#define BUILD_NULL_PREFETCHER(t) \
- pf = new TaggedPrefetcher(this)
-#else
-#define BUILD_NULL_PREFETCHER(t) BUILD_CACHE_PANIC("NULL Prefetcher (uses Tagged)")
-#endif
-
-BaseCache *
-BaseCacheParams::create()
-{
- int numSets = size / (assoc * block_size);
- if (subblock_size == 0) {
- subblock_size = block_size;
- }
-
- //Warnings about prefetcher policy
- if (prefetch_policy == Enums::none) {
- if (prefetch_miss || prefetch_access)
- panic("With no prefetcher, you shouldn't prefetch from"
- " either miss or access stream\n");
- }
-
- if (prefetch_policy == Enums::tagged || prefetch_policy == Enums::stride ||
- prefetch_policy == Enums::ghb) {
-
- if (!prefetch_miss && !prefetch_access)
- warn("With this prefetcher you should chose a prefetch"
- " stream (miss or access)\nNo Prefetching will occur\n");
-
- if (prefetch_miss && prefetch_access)
- panic("Can't do prefetches from both miss and access stream");
- }
-
-#if defined(USE_CACHE_IIC)
- // Build IIC params
- IIC::Params iic_params;
- iic_params.size = size;
- iic_params.numSets = numSets;
- iic_params.blkSize = block_size;
- iic_params.assoc = assoc;
- iic_params.hashDelay = hash_delay;
- iic_params.hitLatency = latency;
- iic_params.rp = repl;
- iic_params.subblockSize = subblock_size;
-#else
- const void *repl = NULL;
-#endif
-
- BUILD_CACHES;
- return NULL;
-}
+++ /dev/null
-# -*- mode:python -*-
-
-# Copyright (c) 2006 The Regents of The University of Michigan
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met: redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer;
-# redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution;
-# neither the name of the copyright holders nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Authors: Nathan Binkert
-
-Import('*')
-
-Source('mshr.cc')
-Source('mshr_queue.cc')
+++ /dev/null
-/*
- * Copyright (c) 2002-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- * Dave Greene
- */
-
-/**
- * @file
- * Miss Status and Handling Register (MSHR) definitions.
- */
-
-#include <assert.h>
-#include <string>
-#include <vector>
-#include <algorithm>
-
-#include "mem/cache/miss/mshr.hh"
-#include "sim/core.hh" // for curTick
-#include "sim/host.hh"
-#include "base/misc.hh"
-#include "mem/cache/cache.hh"
-
-using namespace std;
-
-MSHR::MSHR()
-{
- inService = false;
- ntargets = 0;
- threadNum = -1;
- targets = new TargetList();
- deferredTargets = new TargetList();
-}
-
-
-MSHR::TargetList::TargetList()
- : needsExclusive(false), hasUpgrade(false)
-{}
-
-
-inline void
-MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
- Counter order, bool cpuSide, bool markPending)
-{
- if (cpuSide) {
- if (pkt->needsExclusive()) {
- needsExclusive = true;
- }
-
- if (pkt->cmd == MemCmd::UpgradeReq) {
- hasUpgrade = true;
- }
- }
-
- if (markPending) {
- MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
- if (mshr != NULL) {
- assert(!mshr->downstreamPending);
- mshr->downstreamPending = true;
- }
- }
-
- push_back(Target(pkt, readyTime, order, cpuSide, markPending));
-}
-
-
-void
-MSHR::TargetList::replaceUpgrades()
-{
- if (!hasUpgrade)
- return;
-
- Iterator end_i = end();
- for (Iterator i = begin(); i != end_i; ++i) {
- if (i->pkt->cmd == MemCmd::UpgradeReq) {
- i->pkt->cmd = MemCmd::ReadExReq;
- DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
- }
- }
-
- hasUpgrade = false;
-}
-
-
-void
-MSHR::TargetList::clearDownstreamPending()
-{
- Iterator end_i = end();
- for (Iterator i = begin(); i != end_i; ++i) {
- if (i->markedPending) {
- MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
- if (mshr != NULL) {
- mshr->clearDownstreamPending();
- }
- }
- }
-}
-
-
-bool
-MSHR::TargetList::checkFunctional(PacketPtr pkt)
-{
- Iterator end_i = end();
- for (Iterator i = begin(); i != end_i; ++i) {
- if (pkt->checkFunctional(i->pkt)) {
- return true;
- }
- }
-
- return false;
-}
-
-
-void
-MSHR::TargetList::
-print(std::ostream &os, int verbosity, const std::string &prefix) const
-{
- ConstIterator end_i = end();
- for (ConstIterator i = begin(); i != end_i; ++i) {
- ccprintf(os, "%s%s: ", prefix, i->isCpuSide() ? "cpu" : "mem");
- i->pkt->print(os, verbosity, "");
- }
-}
-
-
-void
-MSHR::allocate(Addr _addr, int _size, PacketPtr target,
- Tick whenReady, Counter _order)
-{
- addr = _addr;
- size = _size;
- readyTime = whenReady;
- order = _order;
- assert(target);
- isCacheFill = false;
- _isUncacheable = target->req->isUncacheable();
- inService = false;
- downstreamPending = false;
- threadNum = 0;
- ntargets = 1;
- // Don't know of a case where we would allocate a new MSHR for a
- // snoop (mem-side request), so set cpuSide to true here.
- assert(targets->isReset());
- targets->add(target, whenReady, _order, true, true);
- assert(deferredTargets->isReset());
- pendingInvalidate = false;
- pendingShared = false;
- data = NULL;
-}
-
-
-void
-MSHR::clearDownstreamPending()
-{
- assert(downstreamPending);
- downstreamPending = false;
- // recursively clear flag on any MSHRs we will be forwarding
- // responses to
- targets->clearDownstreamPending();
-}
-
-bool
-MSHR::markInService()
-{
- assert(!inService);
- if (isSimpleForward()) {
- // we just forwarded the request packet & don't expect a
- // response, so get rid of it
- assert(getNumTargets() == 1);
- popTarget();
- return true;
- }
- inService = true;
- if (!downstreamPending) {
- // let upstream caches know that the request has made it to a
- // level where it's going to get a response
- targets->clearDownstreamPending();
- }
- return false;
-}
-
-
-void
-MSHR::deallocate()
-{
- assert(targets->empty());
- targets->resetFlags();
- assert(deferredTargets->isReset());
- assert(ntargets == 0);
- inService = false;
- //allocIter = NULL;
- //readyIter = NULL;
-}
-
-/*
- * Adds a target to an MSHR
- */
-void
-MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
-{
- // if there's a request already in service for this MSHR, we will
- // have to defer the new target until after the response if any of
- // the following are true:
- // - there are other targets already deferred
- // - there's a pending invalidate to be applied after the response
- // comes back (but before this target is processed)
- // - the outstanding request is for a non-exclusive block and this
- // target requires an exclusive block
- if (inService &&
- (!deferredTargets->empty() || pendingInvalidate ||
- (!targets->needsExclusive && pkt->needsExclusive()))) {
- // need to put on deferred list
- deferredTargets->add(pkt, whenReady, _order, true, true);
- } else {
- // No request outstanding, or still OK to append to
- // outstanding request: append to regular target list. Only
- // mark pending if current request hasn't been issued yet
- // (isn't in service).
- targets->add(pkt, whenReady, _order, true, !inService);
- }
-
- ++ntargets;
-}
-
-bool
-MSHR::handleSnoop(PacketPtr pkt, Counter _order)
-{
- if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
- // Request has not been issued yet, or it's been issued
- // locally but is buffered unissued at some downstream cache
- // which is forwarding us this snoop. Either way, the packet
- // we're snooping logically precedes this MSHR's request, so
- // the snoop has no impact on the MSHR, but must be processed
- // in the standard way by the cache. The only exception is
- // that if we're an L2+ cache buffering an UpgradeReq from a
- // higher-level cache, and the snoop is invalidating, then our
- // buffered upgrades must be converted to read exclusives,
- // since the upper-level cache no longer has a valid copy.
- // That is, even though the upper-level cache got out on its
- // local bus first, some other invalidating transaction
- // reached the global bus before the upgrade did.
- if (pkt->needsExclusive()) {
- targets->replaceUpgrades();
- deferredTargets->replaceUpgrades();
- }
-
- return false;
- }
-
- // From here on down, the request issued by this MSHR logically
- // precedes the request we're snooping.
-
- if (pkt->needsExclusive()) {
- // snooped request still precedes the re-request we'll have to
- // issue for deferred targets, if any...
- deferredTargets->replaceUpgrades();
- }
-
- if (pendingInvalidate) {
- // a prior snoop has already appended an invalidation, so
- // logically we don't have the block anymore; no need for
- // further snooping.
- return true;
- }
-
- if (targets->needsExclusive || pkt->needsExclusive()) {
- // actual target device (typ. PhysicalMemory) will delete the
- // packet on reception, so we need to save a copy here
- PacketPtr cp_pkt = new Packet(pkt, true);
- targets->add(cp_pkt, curTick, _order, false,
- downstreamPending && targets->needsExclusive);
- ++ntargets;
-
- if (targets->needsExclusive) {
- // We're awaiting an exclusive copy, so ownership is pending.
- // It's up to us to respond once the data arrives.
- pkt->assertMemInhibit();
- pkt->setSupplyExclusive();
- } else {
- // Someone else may respond before we get around to
- // processing this snoop, which means the copied request
- // pointer will no longer be valid
- cp_pkt->req = NULL;
- }
-
- if (pkt->needsExclusive()) {
- // This transaction will take away our pending copy
- pendingInvalidate = true;
- }
- } else {
- // Read to a read: no conflict, so no need to record as
- // target, but make sure neither reader thinks he's getting an
- // exclusive copy
- pendingShared = true;
- pkt->assertShared();
- }
-
- return true;
-}
-
-
-bool
-MSHR::promoteDeferredTargets()
-{
- assert(targets->empty());
- if (deferredTargets->empty()) {
- return false;
- }
-
- // swap targets & deferredTargets lists
- TargetList *tmp = targets;
- targets = deferredTargets;
- deferredTargets = tmp;
-
- assert(targets->size() == ntargets);
-
- // clear deferredTargets flags
- deferredTargets->resetFlags();
-
- pendingInvalidate = false;
- pendingShared = false;
- order = targets->front().order;
- readyTime = std::max(curTick, targets->front().readyTime);
-
- return true;
-}
-
-
-void
-MSHR::handleFill(Packet *pkt, CacheBlk *blk)
-{
- if (pendingShared) {
- // we snooped another read while this read was in
- // service... assert shared line on its behalf
- pkt->assertShared();
- }
-
- if (!pkt->sharedAsserted() && !pendingInvalidate
- && deferredTargets->needsExclusive) {
- // We got an exclusive response, but we have deferred targets
- // which are waiting to request an exclusive copy (not because
- // of a pending invalidate). This can happen if the original
- // request was for a read-only (non-exclusive) block, but we
- // got an exclusive copy anyway because of the E part of the
- // MOESI/MESI protocol. Since we got the exclusive copy
- // there's no need to defer the targets, so move them up to
- // the regular target list.
- assert(!targets->needsExclusive);
- targets->needsExclusive = true;
- // if any of the deferred targets were upper-level cache
- // requests marked downstreamPending, need to clear that
- assert(!downstreamPending); // not pending here anymore
- deferredTargets->clearDownstreamPending();
- // this clears out deferredTargets too
- targets->splice(targets->end(), *deferredTargets);
- deferredTargets->resetFlags();
- }
-}
-
-
-bool
-MSHR::checkFunctional(PacketPtr pkt)
-{
- // For printing, we treat the MSHR as a whole as single entity.
- // For other requests, we iterate over the individual targets
- // since that's where the actual data lies.
- if (pkt->isPrint()) {
- pkt->checkFunctional(this, addr, size, NULL);
- return false;
- } else {
- return (targets->checkFunctional(pkt) ||
- deferredTargets->checkFunctional(pkt));
- }
-}
-
-
-void
-MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
-{
- ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
- prefix, addr, addr+size-1,
- isCacheFill ? "Fill" : "",
- needsExclusive() ? "Excl" : "",
- _isUncacheable ? "Unc" : "",
- inService ? "InSvc" : "",
- downstreamPending ? "DwnPend" : "",
- pendingInvalidate ? "PendInv" : "",
- pendingShared ? "PendShared" : "");
-
- ccprintf(os, "%s Targets:\n", prefix);
- targets->print(os, verbosity, prefix + " ");
- if (!deferredTargets->empty()) {
- ccprintf(os, "%s Deferred Targets:\n", prefix);
- deferredTargets->print(os, verbosity, prefix + " ");
- }
-}
-
-MSHR::~MSHR()
-{
-}
+++ /dev/null
-/*
- * Copyright (c) 2002-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- */
-
-/**
- * @file
- * Miss Status and Handling Register (MSHR) declaration.
- */
-
-#ifndef __MSHR_HH__
-#define __MSHR_HH__
-
-#include <list>
-
-#include "base/printable.hh"
-#include "mem/packet.hh"
-
-class CacheBlk;
-class MSHRQueue;
-
-/**
- * Miss Status and handling Register. This class keeps all the information
- * needed to handle a cache miss including a list of target requests.
- */
-class MSHR : public Packet::SenderState, public Printable
-{
-
- public:
-
- class Target {
- public:
- Tick recvTime; //!< Time when request was received (for stats)
- Tick readyTime; //!< Time when request is ready to be serviced
- Counter order; //!< Global order (for memory consistency mgmt)
- PacketPtr pkt; //!< Pending request packet.
- bool cpuSide; //!< Did request come from cpu side or mem side?
- bool markedPending; //!< Did we mark upstream MSHR
- //!< as downstreamPending?
-
- bool isCpuSide() const { return cpuSide; }
-
- Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
- bool _cpuSide, bool _markedPending)
- : recvTime(curTick), readyTime(_readyTime), order(_order),
- pkt(_pkt), cpuSide(_cpuSide), markedPending(_markedPending)
- {}
- };
-
- class TargetList : public std::list<Target> {
- /** Target list iterator. */
- typedef std::list<Target>::iterator Iterator;
- typedef std::list<Target>::const_iterator ConstIterator;
-
- public:
- bool needsExclusive;
- bool hasUpgrade;
-
- TargetList();
- void resetFlags() { needsExclusive = hasUpgrade = false; }
- bool isReset() { return !needsExclusive && !hasUpgrade; }
- void add(PacketPtr pkt, Tick readyTime, Counter order,
- bool cpuSide, bool markPending);
- void replaceUpgrades();
- void clearDownstreamPending();
- bool checkFunctional(PacketPtr pkt);
- void print(std::ostream &os, int verbosity,
- const std::string &prefix) const;
- };
-
- /** A list of MSHRs. */
- typedef std::list<MSHR *> List;
- /** MSHR list iterator. */
- typedef List::iterator Iterator;
- /** MSHR list const_iterator. */
- typedef List::const_iterator ConstIterator;
-
- /** Pointer to queue containing this MSHR. */
- MSHRQueue *queue;
-
- /** Cycle when ready to issue */
- Tick readyTime;
-
- /** Order number assigned by the miss queue. */
- Counter order;
-
- /** Address of the request. */
- Addr addr;
-
- /** Size of the request. */
- int size;
-
- /** True if the request has been sent to the bus. */
- bool inService;
-
- /** True if we will be putting the returned block in the cache */
- bool isCacheFill;
-
- /** True if we need to get an exclusive copy of the block. */
- bool needsExclusive() const { return targets->needsExclusive; }
-
- /** True if the request is uncacheable */
- bool _isUncacheable;
-
- bool downstreamPending;
-
- bool pendingInvalidate;
- bool pendingShared;
-
- /** Thread number of the miss. */
- short threadNum;
- /** The number of currently allocated targets. */
- short ntargets;
-
-
- /** Data buffer (if needed). Currently used only for pending
- * upgrade handling. */
- uint8_t *data;
-
- /**
- * Pointer to this MSHR on the ready list.
- * @sa MissQueue, MSHRQueue::readyList
- */
- Iterator readyIter;
-
- /**
- * Pointer to this MSHR on the allocated list.
- * @sa MissQueue, MSHRQueue::allocatedList
- */
- Iterator allocIter;
-
-private:
- /** List of all requests that match the address */
- TargetList *targets;
-
- TargetList *deferredTargets;
-
-public:
-
- bool isUncacheable() { return _isUncacheable; }
-
- /**
- * Allocate a miss to this MSHR.
- * @param cmd The requesting command.
- * @param addr The address of the miss.
- * @param asid The address space id of the miss.
- * @param size The number of bytes to request.
- * @param pkt The original miss.
- */
- void allocate(Addr addr, int size, PacketPtr pkt,
- Tick when, Counter _order);
-
- bool markInService();
-
- void clearDownstreamPending();
-
- /**
- * Mark this MSHR as free.
- */
- void deallocate();
-
- /**
- * Add a request to the list of targets.
- * @param target The target.
- */
- void allocateTarget(PacketPtr target, Tick when, Counter order);
- bool handleSnoop(PacketPtr target, Counter order);
-
- /** A simple constructor. */
- MSHR();
- /** A simple destructor. */
- ~MSHR();
-
- /**
- * Returns the current number of allocated targets.
- * @return The current number of allocated targets.
- */
- int getNumTargets() { return ntargets; }
-
- /**
- * Returns a pointer to the target list.
- * @return a pointer to the target list.
- */
- TargetList *getTargetList() { return targets; }
-
- /**
- * Returns true if there are targets left.
- * @return true if there are targets
- */
- bool hasTargets() { return !targets->empty(); }
-
- /**
- * Returns a reference to the first target.
- * @return A pointer to the first target.
- */
- Target *getTarget() { assert(hasTargets()); return &targets->front(); }
-
- /**
- * Pop first target.
- */
- void popTarget()
- {
- --ntargets;
- targets->pop_front();
- }
-
- bool isSimpleForward()
- {
- if (getNumTargets() != 1)
- return false;
- Target *tgt = getTarget();
- return tgt->isCpuSide() && !tgt->pkt->needsResponse();
- }
-
- bool promoteDeferredTargets();
-
- void handleFill(Packet *pkt, CacheBlk *blk);
-
- bool checkFunctional(PacketPtr pkt);
-
- /**
- * Prints the contents of this MSHR for debugging.
- */
- void print(std::ostream &os,
- int verbosity = 0,
- const std::string &prefix = "") const;
-};
-
-#endif //__MSHR_HH__
+++ /dev/null
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- */
-
-/** @file
- * Definition of MSHRQueue class functions.
- */
-
-#include "mem/cache/miss/mshr_queue.hh"
-
-using namespace std;
-
-MSHRQueue::MSHRQueue(const std::string &_label,
- int num_entries, int reserve, int _index)
- : label(_label),
- numEntries(num_entries + reserve - 1), numReserve(reserve),
- index(_index)
-{
- allocated = 0;
- inServiceEntries = 0;
- registers = new MSHR[numEntries];
- for (int i = 0; i < numEntries; ++i) {
- registers[i].queue = this;
- freeList.push_back(®isters[i]);
- }
-}
-
-MSHRQueue::~MSHRQueue()
-{
- delete [] registers;
-}
-
-MSHR *
-MSHRQueue::findMatch(Addr addr) const
-{
- MSHR::ConstIterator i = allocatedList.begin();
- MSHR::ConstIterator end = allocatedList.end();
- for (; i != end; ++i) {
- MSHR *mshr = *i;
- if (mshr->addr == addr) {
- return mshr;
- }
- }
- return NULL;
-}
-
-bool
-MSHRQueue::findMatches(Addr addr, vector<MSHR*>& matches) const
-{
- // Need an empty vector
- assert(matches.empty());
- bool retval = false;
- MSHR::ConstIterator i = allocatedList.begin();
- MSHR::ConstIterator end = allocatedList.end();
- for (; i != end; ++i) {
- MSHR *mshr = *i;
- if (mshr->addr == addr) {
- retval = true;
- matches.push_back(mshr);
- }
- }
- return retval;
-}
-
-
-bool
-MSHRQueue::checkFunctional(PacketPtr pkt, Addr blk_addr)
-{
- pkt->pushLabel(label);
- MSHR::ConstIterator i = allocatedList.begin();
- MSHR::ConstIterator end = allocatedList.end();
- for (; i != end; ++i) {
- MSHR *mshr = *i;
- if (mshr->addr == blk_addr && mshr->checkFunctional(pkt)) {
- pkt->popLabel();
- return true;
- }
- }
- pkt->popLabel();
- return false;
-}
-
-
-MSHR *
-MSHRQueue::findPending(Addr addr, int size) const
-{
- MSHR::ConstIterator i = readyList.begin();
- MSHR::ConstIterator end = readyList.end();
- for (; i != end; ++i) {
- MSHR *mshr = *i;
- if (mshr->addr < addr) {
- if (mshr->addr + mshr->size > addr) {
- return mshr;
- }
- } else {
- if (addr + size > mshr->addr) {
- return mshr;
- }
- }
- }
- return NULL;
-}
-
-
-MSHR::Iterator
-MSHRQueue::addToReadyList(MSHR *mshr)
-{
- if (readyList.empty() || readyList.back()->readyTime <= mshr->readyTime) {
- return readyList.insert(readyList.end(), mshr);
- }
-
- MSHR::Iterator i = readyList.begin();
- MSHR::Iterator end = readyList.end();
- for (; i != end; ++i) {
- if ((*i)->readyTime > mshr->readyTime) {
- return readyList.insert(i, mshr);
- }
- }
- assert(false);
- return end; // keep stupid compilers happy
-}
-
-
-MSHR *
-MSHRQueue::allocate(Addr addr, int size, PacketPtr &pkt,
- Tick when, Counter order)
-{
- assert(!freeList.empty());
- MSHR *mshr = freeList.front();
- assert(mshr->getNumTargets() == 0);
- freeList.pop_front();
-
- mshr->allocate(addr, size, pkt, when, order);
- mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
- mshr->readyIter = addToReadyList(mshr);
-
- allocated += 1;
- return mshr;
-}
-
-
-void
-MSHRQueue::deallocate(MSHR *mshr)
-{
- deallocateOne(mshr);
-}
-
-MSHR::Iterator
-MSHRQueue::deallocateOne(MSHR *mshr)
-{
- MSHR::Iterator retval = allocatedList.erase(mshr->allocIter);
- freeList.push_front(mshr);
- allocated--;
- if (mshr->inService) {
- inServiceEntries--;
- } else {
- readyList.erase(mshr->readyIter);
- }
- mshr->deallocate();
- return retval;
-}
-
-void
-MSHRQueue::moveToFront(MSHR *mshr)
-{
- if (!mshr->inService) {
- assert(mshr == *(mshr->readyIter));
- readyList.erase(mshr->readyIter);
- mshr->readyIter = readyList.insert(readyList.begin(), mshr);
- }
-}
-
-void
-MSHRQueue::markInService(MSHR *mshr)
-{
- if (mshr->markInService()) {
- deallocate(mshr);
- } else {
- readyList.erase(mshr->readyIter);
- inServiceEntries += 1;
- }
-}
-
-void
-MSHRQueue::markPending(MSHR *mshr)
-{
- assert(mshr->inService);
- mshr->inService = false;
- --inServiceEntries;
- /**
- * @ todo might want to add rerequests to front of pending list for
- * performance.
- */
- mshr->readyIter = addToReadyList(mshr);
-}
-
-void
-MSHRQueue::squash(int threadNum)
-{
- MSHR::Iterator i = allocatedList.begin();
- MSHR::Iterator end = allocatedList.end();
- for (; i != end;) {
- MSHR *mshr = *i;
- if (mshr->threadNum == threadNum) {
- while (mshr->hasTargets()) {
- mshr->popTarget();
- assert(0/*target->req->getThreadNum()*/ == threadNum);
- }
- assert(!mshr->hasTargets());
- assert(mshr->ntargets==0);
- if (!mshr->inService) {
- i = deallocateOne(mshr);
- } else {
- //mshr->pkt->flags &= ~CACHE_LINE_FILL;
- ++i;
- }
- } else {
- ++i;
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- */
-
-/** @file
- * Declaration of a structure to manage MSHRs.
- */
-
-#ifndef __MEM__CACHE__MISS__MSHR_QUEUE_HH__
-#define __MEM__CACHE__MISS__MSHR_QUEUE_HH__
-
-#include <vector>
-
-#include "mem/packet.hh"
-#include "mem/cache/miss/mshr.hh"
-
-/**
- * A Class for maintaining a list of pending and allocated memory requests.
- */
-class MSHRQueue
-{
- private:
- /** Local label (for functional print requests) */
- const std::string label;
-
- /** MSHR storage. */
- MSHR *registers;
- /** Holds pointers to all allocated entries. */
- MSHR::List allocatedList;
- /** Holds pointers to entries that haven't been sent to the bus. */
- MSHR::List readyList;
- /** Holds non allocated entries. */
- MSHR::List freeList;
-
- // Parameters
- /**
- * The total number of entries in this queue. This number is set as the
- * number of entries requested plus (numReserve - 1). This allows for
- * the same number of effective entries while still maintaining the reserve.
- */
- const int numEntries;
-
- /**
- * The number of entries to hold in reserve. This is needed because copy
- * operations can allocate upto 4 entries at one time.
- */
- const int numReserve;
-
- MSHR::Iterator addToReadyList(MSHR *mshr);
-
-
- public:
- /** The number of allocated entries. */
- int allocated;
- /** The number of entries that have been forwarded to the bus. */
- int inServiceEntries;
- /** The index of this queue within the cache (MSHR queue vs. write
- * buffer). */
- const int index;
-
- /**
- * Create a queue with a given number of entries.
- * @param num_entrys The number of entries in this queue.
- * @param reserve The minimum number of entries needed to satisfy
- * any access.
- */
- MSHRQueue(const std::string &_label, int num_entries, int reserve,
- int index);
-
- /** Destructor */
- ~MSHRQueue();
-
- /**
- * Find the first MSHR that matches the provided address.
- * @param addr The address to find.
- * @return Pointer to the matching MSHR, null if not found.
- */
- MSHR *findMatch(Addr addr) const;
-
- /**
- * Find and return all the matching entries in the provided vector.
- * @param addr The address to find.
- * @param matches The vector to return pointers to the matching entries.
- * @return True if any matches are found, false otherwise.
- * @todo Typedef the vector??
- */
- bool findMatches(Addr addr, std::vector<MSHR*>& matches) const;
-
- /**
- * Find any pending requests that overlap the given request.
- * @param pkt The request to find.
- * @return A pointer to the earliest matching MSHR.
- */
- MSHR *findPending(Addr addr, int size) const;
-
- bool checkFunctional(PacketPtr pkt, Addr blk_addr);
-
- /**
- * Allocates a new MSHR for the request and size. This places the request
- * as the first target in the MSHR.
- * @param pkt The request to handle.
- * @param size The number in bytes to fetch from memory.
- * @return The a pointer to the MSHR allocated.
- *
- * @pre There are free entries.
- */
- MSHR *allocate(Addr addr, int size, PacketPtr &pkt,
- Tick when, Counter order);
-
- /**
- * Removes the given MSHR from the queue. This places the MSHR on the
- * free list.
- * @param mshr
- */
- void deallocate(MSHR *mshr);
-
- /**
- * Remove a MSHR from the queue. Returns an iterator into the
- * allocatedList for faster squash implementation.
- * @param mshr The MSHR to remove.
- * @return An iterator to the next entry in the allocatedList.
- */
- MSHR::Iterator deallocateOne(MSHR *mshr);
-
- /**
- * Moves the MSHR to the front of the pending list if it is not
- * in service.
- * @param mshr The entry to move.
- */
- void moveToFront(MSHR *mshr);
-
- /**
- * Mark the given MSHR as in service. This removes the MSHR from the
- * readyList. Deallocates the MSHR if it does not expect a response.
- * @param mshr The MSHR to mark in service.
- */
- void markInService(MSHR *mshr);
-
- /**
- * Mark an in service entry as pending, used to resend a request.
- * @param mshr The MSHR to resend.
- */
- void markPending(MSHR *mshr);
-
- /**
- * Squash outstanding requests with the given thread number. If a request
- * is in service, just squashes the targets.
- * @param threadNum The thread to squash.
- */
- void squash(int threadNum);
-
- /**
- * Returns true if the pending list is not empty.
- * @return True if there are outstanding requests.
- */
- bool havePending() const
- {
- return !readyList.empty();
- }
-
- /**
- * Returns true if there are no free entries.
- * @return True if this queue is full.
- */
- bool isFull() const
- {
- return (allocated > numEntries - numReserve);
- }
-
- /**
- * Returns the MSHR at the head of the readyList.
- * @return The next request to service.
- */
- MSHR *getNextMSHR() const
- {
- if (readyList.empty() || readyList.front()->readyTime > curTick) {
- return NULL;
- }
- return readyList.front();
- }
-
- Tick nextMSHRReadyTime() const
- {
- return readyList.empty() ? MaxTick : readyList.front()->readyTime;
- }
-};
-
-#endif //__MEM__CACHE__MISS__MSHR_QUEUE_HH__
--- /dev/null
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ * Dave Greene
+ */
+
+/**
+ * @file
+ * Miss Status and Handling Register (MSHR) definitions.
+ */
+
+#include <assert.h>
+#include <string>
+#include <vector>
+#include <algorithm>
+
+#include "mem/cache/miss/mshr.hh"
+#include "sim/core.hh" // for curTick
+#include "sim/host.hh"
+#include "base/misc.hh"
+#include "mem/cache/cache.hh"
+
+using namespace std;
+
+MSHR::MSHR()
+{
+ inService = false;
+ ntargets = 0;
+ threadNum = -1;
+ targets = new TargetList();
+ deferredTargets = new TargetList();
+}
+
+
+MSHR::TargetList::TargetList()
+ : needsExclusive(false), hasUpgrade(false)
+{}
+
+
+inline void
+MSHR::TargetList::add(PacketPtr pkt, Tick readyTime,
+ Counter order, bool cpuSide, bool markPending)
+{
+ if (cpuSide) {
+ if (pkt->needsExclusive()) {
+ needsExclusive = true;
+ }
+
+ if (pkt->cmd == MemCmd::UpgradeReq) {
+ hasUpgrade = true;
+ }
+ }
+
+ if (markPending) {
+ MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
+ if (mshr != NULL) {
+ assert(!mshr->downstreamPending);
+ mshr->downstreamPending = true;
+ }
+ }
+
+ push_back(Target(pkt, readyTime, order, cpuSide, markPending));
+}
+
+
+void
+MSHR::TargetList::replaceUpgrades()
+{
+ if (!hasUpgrade)
+ return;
+
+ Iterator end_i = end();
+ for (Iterator i = begin(); i != end_i; ++i) {
+ if (i->pkt->cmd == MemCmd::UpgradeReq) {
+ i->pkt->cmd = MemCmd::ReadExReq;
+ DPRINTF(Cache, "Replacing UpgradeReq with ReadExReq\n");
+ }
+ }
+
+ hasUpgrade = false;
+}
+
+
+void
+MSHR::TargetList::clearDownstreamPending()
+{
+ Iterator end_i = end();
+ for (Iterator i = begin(); i != end_i; ++i) {
+ if (i->markedPending) {
+ MSHR *mshr = dynamic_cast<MSHR*>(i->pkt->senderState);
+ if (mshr != NULL) {
+ mshr->clearDownstreamPending();
+ }
+ }
+ }
+}
+
+
+bool
+MSHR::TargetList::checkFunctional(PacketPtr pkt)
+{
+ Iterator end_i = end();
+ for (Iterator i = begin(); i != end_i; ++i) {
+ if (pkt->checkFunctional(i->pkt)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+void
+MSHR::TargetList::
+print(std::ostream &os, int verbosity, const std::string &prefix) const
+{
+ ConstIterator end_i = end();
+ for (ConstIterator i = begin(); i != end_i; ++i) {
+ ccprintf(os, "%s%s: ", prefix, i->isCpuSide() ? "cpu" : "mem");
+ i->pkt->print(os, verbosity, "");
+ }
+}
+
+
+void
+MSHR::allocate(Addr _addr, int _size, PacketPtr target,
+ Tick whenReady, Counter _order)
+{
+ addr = _addr;
+ size = _size;
+ readyTime = whenReady;
+ order = _order;
+ assert(target);
+ isCacheFill = false;
+ _isUncacheable = target->req->isUncacheable();
+ inService = false;
+ downstreamPending = false;
+ threadNum = 0;
+ ntargets = 1;
+ // Don't know of a case where we would allocate a new MSHR for a
+ // snoop (mem-side request), so set cpuSide to true here.
+ assert(targets->isReset());
+ targets->add(target, whenReady, _order, true, true);
+ assert(deferredTargets->isReset());
+ pendingInvalidate = false;
+ pendingShared = false;
+ data = NULL;
+}
+
+
+void
+MSHR::clearDownstreamPending()
+{
+ assert(downstreamPending);
+ downstreamPending = false;
+ // recursively clear flag on any MSHRs we will be forwarding
+ // responses to
+ targets->clearDownstreamPending();
+}
+
+bool
+MSHR::markInService()
+{
+ assert(!inService);
+ if (isSimpleForward()) {
+ // we just forwarded the request packet & don't expect a
+ // response, so get rid of it
+ assert(getNumTargets() == 1);
+ popTarget();
+ return true;
+ }
+ inService = true;
+ if (!downstreamPending) {
+ // let upstream caches know that the request has made it to a
+ // level where it's going to get a response
+ targets->clearDownstreamPending();
+ }
+ return false;
+}
+
+
+void
+MSHR::deallocate()
+{
+ assert(targets->empty());
+ targets->resetFlags();
+ assert(deferredTargets->isReset());
+ assert(ntargets == 0);
+ inService = false;
+ //allocIter = NULL;
+ //readyIter = NULL;
+}
+
+/*
+ * Adds a target to an MSHR
+ */
+void
+MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
+{
+ // if there's a request already in service for this MSHR, we will
+ // have to defer the new target until after the response if any of
+ // the following are true:
+ // - there are other targets already deferred
+ // - there's a pending invalidate to be applied after the response
+ // comes back (but before this target is processed)
+ // - the outstanding request is for a non-exclusive block and this
+ // target requires an exclusive block
+ if (inService &&
+ (!deferredTargets->empty() || pendingInvalidate ||
+ (!targets->needsExclusive && pkt->needsExclusive()))) {
+ // need to put on deferred list
+ deferredTargets->add(pkt, whenReady, _order, true, true);
+ } else {
+ // No request outstanding, or still OK to append to
+ // outstanding request: append to regular target list. Only
+ // mark pending if current request hasn't been issued yet
+ // (isn't in service).
+ targets->add(pkt, whenReady, _order, true, !inService);
+ }
+
+ ++ntargets;
+}
+
+bool
+MSHR::handleSnoop(PacketPtr pkt, Counter _order)
+{
+ if (!inService || (pkt->isExpressSnoop() && downstreamPending)) {
+ // Request has not been issued yet, or it's been issued
+ // locally but is buffered unissued at some downstream cache
+ // which is forwarding us this snoop. Either way, the packet
+ // we're snooping logically precedes this MSHR's request, so
+ // the snoop has no impact on the MSHR, but must be processed
+ // in the standard way by the cache. The only exception is
+ // that if we're an L2+ cache buffering an UpgradeReq from a
+ // higher-level cache, and the snoop is invalidating, then our
+ // buffered upgrades must be converted to read exclusives,
+ // since the upper-level cache no longer has a valid copy.
+ // That is, even though the upper-level cache got out on its
+ // local bus first, some other invalidating transaction
+ // reached the global bus before the upgrade did.
+ if (pkt->needsExclusive()) {
+ targets->replaceUpgrades();
+ deferredTargets->replaceUpgrades();
+ }
+
+ return false;
+ }
+
+ // From here on down, the request issued by this MSHR logically
+ // precedes the request we're snooping.
+
+ if (pkt->needsExclusive()) {
+ // snooped request still precedes the re-request we'll have to
+ // issue for deferred targets, if any...
+ deferredTargets->replaceUpgrades();
+ }
+
+ if (pendingInvalidate) {
+ // a prior snoop has already appended an invalidation, so
+ // logically we don't have the block anymore; no need for
+ // further snooping.
+ return true;
+ }
+
+ if (targets->needsExclusive || pkt->needsExclusive()) {
+ // actual target device (typ. PhysicalMemory) will delete the
+ // packet on reception, so we need to save a copy here
+ PacketPtr cp_pkt = new Packet(pkt, true);
+ targets->add(cp_pkt, curTick, _order, false,
+ downstreamPending && targets->needsExclusive);
+ ++ntargets;
+
+ if (targets->needsExclusive) {
+ // We're awaiting an exclusive copy, so ownership is pending.
+ // It's up to us to respond once the data arrives.
+ pkt->assertMemInhibit();
+ pkt->setSupplyExclusive();
+ } else {
+ // Someone else may respond before we get around to
+ // processing this snoop, which means the copied request
+ // pointer will no longer be valid
+ cp_pkt->req = NULL;
+ }
+
+ if (pkt->needsExclusive()) {
+ // This transaction will take away our pending copy
+ pendingInvalidate = true;
+ }
+ } else {
+ // Read to a read: no conflict, so no need to record as
+ // target, but make sure neither reader thinks he's getting an
+ // exclusive copy
+ pendingShared = true;
+ pkt->assertShared();
+ }
+
+ return true;
+}
+
+
+bool
+MSHR::promoteDeferredTargets()
+{
+ assert(targets->empty());
+ if (deferredTargets->empty()) {
+ return false;
+ }
+
+ // swap targets & deferredTargets lists
+ TargetList *tmp = targets;
+ targets = deferredTargets;
+ deferredTargets = tmp;
+
+ assert(targets->size() == ntargets);
+
+ // clear deferredTargets flags
+ deferredTargets->resetFlags();
+
+ pendingInvalidate = false;
+ pendingShared = false;
+ order = targets->front().order;
+ readyTime = std::max(curTick, targets->front().readyTime);
+
+ return true;
+}
+
+
+void
+MSHR::handleFill(Packet *pkt, CacheBlk *blk)
+{
+ if (pendingShared) {
+ // we snooped another read while this read was in
+ // service... assert shared line on its behalf
+ pkt->assertShared();
+ }
+
+ if (!pkt->sharedAsserted() && !pendingInvalidate
+ && deferredTargets->needsExclusive) {
+ // We got an exclusive response, but we have deferred targets
+ // which are waiting to request an exclusive copy (not because
+ // of a pending invalidate). This can happen if the original
+ // request was for a read-only (non-exclusive) block, but we
+ // got an exclusive copy anyway because of the E part of the
+ // MOESI/MESI protocol. Since we got the exclusive copy
+ // there's no need to defer the targets, so move them up to
+ // the regular target list.
+ assert(!targets->needsExclusive);
+ targets->needsExclusive = true;
+ // if any of the deferred targets were upper-level cache
+ // requests marked downstreamPending, need to clear that
+ assert(!downstreamPending); // not pending here anymore
+ deferredTargets->clearDownstreamPending();
+ // this clears out deferredTargets too
+ targets->splice(targets->end(), *deferredTargets);
+ deferredTargets->resetFlags();
+ }
+}
+
+
+bool
+MSHR::checkFunctional(PacketPtr pkt)
+{
+ // For printing, we treat the MSHR as a whole as single entity.
+ // For other requests, we iterate over the individual targets
+ // since that's where the actual data lies.
+ if (pkt->isPrint()) {
+ pkt->checkFunctional(this, addr, size, NULL);
+ return false;
+ } else {
+ return (targets->checkFunctional(pkt) ||
+ deferredTargets->checkFunctional(pkt));
+ }
+}
+
+
+void
+MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
+{
+ ccprintf(os, "%s[%x:%x] %s %s %s state: %s %s %s %s\n",
+ prefix, addr, addr+size-1,
+ isCacheFill ? "Fill" : "",
+ needsExclusive() ? "Excl" : "",
+ _isUncacheable ? "Unc" : "",
+ inService ? "InSvc" : "",
+ downstreamPending ? "DwnPend" : "",
+ pendingInvalidate ? "PendInv" : "",
+ pendingShared ? "PendShared" : "");
+
+ ccprintf(os, "%s Targets:\n", prefix);
+ targets->print(os, verbosity, prefix + " ");
+ if (!deferredTargets->empty()) {
+ ccprintf(os, "%s Deferred Targets:\n", prefix);
+ deferredTargets->print(os, verbosity, prefix + " ");
+ }
+}
+
+MSHR::~MSHR()
+{
+}
--- /dev/null
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Miss Status and Handling Register (MSHR) declaration.
+ */
+
+#ifndef __MSHR_HH__
+#define __MSHR_HH__
+
+#include <list>
+
+#include "base/printable.hh"
+#include "mem/packet.hh"
+
+class CacheBlk;
+class MSHRQueue;
+
+/**
+ * Miss Status and handling Register. This class keeps all the information
+ * needed to handle a cache miss including a list of target requests.
+ */
+class MSHR : public Packet::SenderState, public Printable
+{
+
+ public:
+
+ class Target {
+ public:
+ Tick recvTime; //!< Time when request was received (for stats)
+ Tick readyTime; //!< Time when request is ready to be serviced
+ Counter order; //!< Global order (for memory consistency mgmt)
+ PacketPtr pkt; //!< Pending request packet.
+ bool cpuSide; //!< Did request come from cpu side or mem side?
+ bool markedPending; //!< Did we mark upstream MSHR
+ //!< as downstreamPending?
+
+ bool isCpuSide() const { return cpuSide; }
+
+ Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
+ bool _cpuSide, bool _markedPending)
+ : recvTime(curTick), readyTime(_readyTime), order(_order),
+ pkt(_pkt), cpuSide(_cpuSide), markedPending(_markedPending)
+ {}
+ };
+
+ class TargetList : public std::list<Target> {
+ /** Target list iterator. */
+ typedef std::list<Target>::iterator Iterator;
+ typedef std::list<Target>::const_iterator ConstIterator;
+
+ public:
+ bool needsExclusive;
+ bool hasUpgrade;
+
+ TargetList();
+ void resetFlags() { needsExclusive = hasUpgrade = false; }
+ bool isReset() { return !needsExclusive && !hasUpgrade; }
+ void add(PacketPtr pkt, Tick readyTime, Counter order,
+ bool cpuSide, bool markPending);
+ void replaceUpgrades();
+ void clearDownstreamPending();
+ bool checkFunctional(PacketPtr pkt);
+ void print(std::ostream &os, int verbosity,
+ const std::string &prefix) const;
+ };
+
+ /** A list of MSHRs. */
+ typedef std::list<MSHR *> List;
+ /** MSHR list iterator. */
+ typedef List::iterator Iterator;
+ /** MSHR list const_iterator. */
+ typedef List::const_iterator ConstIterator;
+
+ /** Pointer to queue containing this MSHR. */
+ MSHRQueue *queue;
+
+ /** Cycle when ready to issue */
+ Tick readyTime;
+
+ /** Order number assigned by the miss queue. */
+ Counter order;
+
+ /** Address of the request. */
+ Addr addr;
+
+ /** Size of the request. */
+ int size;
+
+ /** True if the request has been sent to the bus. */
+ bool inService;
+
+ /** True if we will be putting the returned block in the cache */
+ bool isCacheFill;
+
+ /** True if we need to get an exclusive copy of the block. */
+ bool needsExclusive() const { return targets->needsExclusive; }
+
+ /** True if the request is uncacheable */
+ bool _isUncacheable;
+
+ bool downstreamPending;
+
+ bool pendingInvalidate;
+ bool pendingShared;
+
+ /** Thread number of the miss. */
+ short threadNum;
+ /** The number of currently allocated targets. */
+ short ntargets;
+
+
+ /** Data buffer (if needed). Currently used only for pending
+ * upgrade handling. */
+ uint8_t *data;
+
+ /**
+ * Pointer to this MSHR on the ready list.
+ * @sa MissQueue, MSHRQueue::readyList
+ */
+ Iterator readyIter;
+
+ /**
+ * Pointer to this MSHR on the allocated list.
+ * @sa MissQueue, MSHRQueue::allocatedList
+ */
+ Iterator allocIter;
+
+private:
+ /** List of all requests that match the address */
+ TargetList *targets;
+
+ TargetList *deferredTargets;
+
+public:
+
+ bool isUncacheable() { return _isUncacheable; }
+
+ /**
+ * Allocate a miss to this MSHR.
+ * @param cmd The requesting command.
+ * @param addr The address of the miss.
+ * @param asid The address space id of the miss.
+ * @param size The number of bytes to request.
+ * @param pkt The original miss.
+ */
+ void allocate(Addr addr, int size, PacketPtr pkt,
+ Tick when, Counter _order);
+
+ bool markInService();
+
+ void clearDownstreamPending();
+
+ /**
+ * Mark this MSHR as free.
+ */
+ void deallocate();
+
+ /**
+ * Add a request to the list of targets.
+ * @param target The target.
+ */
+ void allocateTarget(PacketPtr target, Tick when, Counter order);
+ bool handleSnoop(PacketPtr target, Counter order);
+
+ /** A simple constructor. */
+ MSHR();
+ /** A simple destructor. */
+ ~MSHR();
+
+ /**
+ * Returns the current number of allocated targets.
+ * @return The current number of allocated targets.
+ */
+ int getNumTargets() { return ntargets; }
+
+ /**
+ * Returns a pointer to the target list.
+ * @return a pointer to the target list.
+ */
+ TargetList *getTargetList() { return targets; }
+
+ /**
+ * Returns true if there are targets left.
+ * @return true if there are targets
+ */
+ bool hasTargets() { return !targets->empty(); }
+
+ /**
+ * Returns a reference to the first target.
+ * @return A pointer to the first target.
+ */
+ Target *getTarget() { assert(hasTargets()); return &targets->front(); }
+
+ /**
+ * Pop first target.
+ */
+ void popTarget()
+ {
+ --ntargets;
+ targets->pop_front();
+ }
+
+ bool isSimpleForward()
+ {
+ if (getNumTargets() != 1)
+ return false;
+ Target *tgt = getTarget();
+ return tgt->isCpuSide() && !tgt->pkt->needsResponse();
+ }
+
+ bool promoteDeferredTargets();
+
+ void handleFill(Packet *pkt, CacheBlk *blk);
+
+ bool checkFunctional(PacketPtr pkt);
+
+ /**
+ * Prints the contents of this MSHR for debugging.
+ */
+ void print(std::ostream &os,
+ int verbosity = 0,
+ const std::string &prefix = "") const;
+};
+
+#endif //__MSHR_HH__
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/** @file
+ * Definition of MSHRQueue class functions.
+ */
+
+#include "mem/cache/miss/mshr_queue.hh"
+
+using namespace std;
+
+MSHRQueue::MSHRQueue(const std::string &_label,
+ int num_entries, int reserve, int _index)
+ : label(_label),
+ numEntries(num_entries + reserve - 1), numReserve(reserve),
+ index(_index)
+{
+ allocated = 0;
+ inServiceEntries = 0;
+ registers = new MSHR[numEntries];
+ for (int i = 0; i < numEntries; ++i) {
+ registers[i].queue = this;
+ freeList.push_back(®isters[i]);
+ }
+}
+
+MSHRQueue::~MSHRQueue()
+{
+ delete [] registers;
+}
+
+MSHR *
+MSHRQueue::findMatch(Addr addr) const
+{
+ MSHR::ConstIterator i = allocatedList.begin();
+ MSHR::ConstIterator end = allocatedList.end();
+ for (; i != end; ++i) {
+ MSHR *mshr = *i;
+ if (mshr->addr == addr) {
+ return mshr;
+ }
+ }
+ return NULL;
+}
+
+bool
+MSHRQueue::findMatches(Addr addr, vector<MSHR*>& matches) const
+{
+ // Need an empty vector
+ assert(matches.empty());
+ bool retval = false;
+ MSHR::ConstIterator i = allocatedList.begin();
+ MSHR::ConstIterator end = allocatedList.end();
+ for (; i != end; ++i) {
+ MSHR *mshr = *i;
+ if (mshr->addr == addr) {
+ retval = true;
+ matches.push_back(mshr);
+ }
+ }
+ return retval;
+}
+
+
+bool
+MSHRQueue::checkFunctional(PacketPtr pkt, Addr blk_addr)
+{
+ pkt->pushLabel(label);
+ MSHR::ConstIterator i = allocatedList.begin();
+ MSHR::ConstIterator end = allocatedList.end();
+ for (; i != end; ++i) {
+ MSHR *mshr = *i;
+ if (mshr->addr == blk_addr && mshr->checkFunctional(pkt)) {
+ pkt->popLabel();
+ return true;
+ }
+ }
+ pkt->popLabel();
+ return false;
+}
+
+
+MSHR *
+MSHRQueue::findPending(Addr addr, int size) const
+{
+ MSHR::ConstIterator i = readyList.begin();
+ MSHR::ConstIterator end = readyList.end();
+ for (; i != end; ++i) {
+ MSHR *mshr = *i;
+ if (mshr->addr < addr) {
+ if (mshr->addr + mshr->size > addr) {
+ return mshr;
+ }
+ } else {
+ if (addr + size > mshr->addr) {
+ return mshr;
+ }
+ }
+ }
+ return NULL;
+}
+
+
+MSHR::Iterator
+MSHRQueue::addToReadyList(MSHR *mshr)
+{
+ if (readyList.empty() || readyList.back()->readyTime <= mshr->readyTime) {
+ return readyList.insert(readyList.end(), mshr);
+ }
+
+ MSHR::Iterator i = readyList.begin();
+ MSHR::Iterator end = readyList.end();
+ for (; i != end; ++i) {
+ if ((*i)->readyTime > mshr->readyTime) {
+ return readyList.insert(i, mshr);
+ }
+ }
+ assert(false);
+ return end; // keep stupid compilers happy
+}
+
+
+MSHR *
+MSHRQueue::allocate(Addr addr, int size, PacketPtr &pkt,
+ Tick when, Counter order)
+{
+ assert(!freeList.empty());
+ MSHR *mshr = freeList.front();
+ assert(mshr->getNumTargets() == 0);
+ freeList.pop_front();
+
+ mshr->allocate(addr, size, pkt, when, order);
+ mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
+ mshr->readyIter = addToReadyList(mshr);
+
+ allocated += 1;
+ return mshr;
+}
+
+
+void
+MSHRQueue::deallocate(MSHR *mshr)
+{
+ deallocateOne(mshr);
+}
+
+MSHR::Iterator
+MSHRQueue::deallocateOne(MSHR *mshr)
+{
+ MSHR::Iterator retval = allocatedList.erase(mshr->allocIter);
+ freeList.push_front(mshr);
+ allocated--;
+ if (mshr->inService) {
+ inServiceEntries--;
+ } else {
+ readyList.erase(mshr->readyIter);
+ }
+ mshr->deallocate();
+ return retval;
+}
+
+void
+MSHRQueue::moveToFront(MSHR *mshr)
+{
+ if (!mshr->inService) {
+ assert(mshr == *(mshr->readyIter));
+ readyList.erase(mshr->readyIter);
+ mshr->readyIter = readyList.insert(readyList.begin(), mshr);
+ }
+}
+
+void
+MSHRQueue::markInService(MSHR *mshr)
+{
+ if (mshr->markInService()) {
+ deallocate(mshr);
+ } else {
+ readyList.erase(mshr->readyIter);
+ inServiceEntries += 1;
+ }
+}
+
+void
+MSHRQueue::markPending(MSHR *mshr)
+{
+ assert(mshr->inService);
+ mshr->inService = false;
+ --inServiceEntries;
+ /**
+ * @ todo might want to add rerequests to front of pending list for
+ * performance.
+ */
+ mshr->readyIter = addToReadyList(mshr);
+}
+
+void
+MSHRQueue::squash(int threadNum)
+{
+ MSHR::Iterator i = allocatedList.begin();
+ MSHR::Iterator end = allocatedList.end();
+ for (; i != end;) {
+ MSHR *mshr = *i;
+ if (mshr->threadNum == threadNum) {
+ while (mshr->hasTargets()) {
+ mshr->popTarget();
+ assert(0/*target->req->getThreadNum()*/ == threadNum);
+ }
+ assert(!mshr->hasTargets());
+ assert(mshr->ntargets==0);
+ if (!mshr->inService) {
+ i = deallocateOne(mshr);
+ } else {
+ //mshr->pkt->flags &= ~CACHE_LINE_FILL;
+ ++i;
+ }
+ } else {
+ ++i;
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/** @file
+ * Declaration of a structure to manage MSHRs.
+ */
+
+#ifndef __MEM__CACHE__MISS__MSHR_QUEUE_HH__
+#define __MEM__CACHE__MISS__MSHR_QUEUE_HH__
+
+#include <vector>
+
+#include "mem/packet.hh"
+#include "mem/cache/miss/mshr.hh"
+
+/**
+ * A Class for maintaining a list of pending and allocated memory requests.
+ */
+class MSHRQueue
+{
+ private:
+ /** Local label (for functional print requests) */
+ const std::string label;
+
+ /** MSHR storage. */
+ MSHR *registers;
+ /** Holds pointers to all allocated entries. */
+ MSHR::List allocatedList;
+ /** Holds pointers to entries that haven't been sent to the bus. */
+ MSHR::List readyList;
+ /** Holds non allocated entries. */
+ MSHR::List freeList;
+
+ // Parameters
+ /**
+ * The total number of entries in this queue. This number is set as the
+ * number of entries requested plus (numReserve - 1). This allows for
+ * the same number of effective entries while still maintaining the reserve.
+ */
+ const int numEntries;
+
+ /**
+ * The number of entries to hold in reserve. This is needed because copy
+ * operations can allocate upto 4 entries at one time.
+ */
+ const int numReserve;
+
+ MSHR::Iterator addToReadyList(MSHR *mshr);
+
+
+ public:
+ /** The number of allocated entries. */
+ int allocated;
+ /** The number of entries that have been forwarded to the bus. */
+ int inServiceEntries;
+ /** The index of this queue within the cache (MSHR queue vs. write
+ * buffer). */
+ const int index;
+
+ /**
+ * Create a queue with a given number of entries.
+ * @param num_entrys The number of entries in this queue.
+ * @param reserve The minimum number of entries needed to satisfy
+ * any access.
+ */
+ MSHRQueue(const std::string &_label, int num_entries, int reserve,
+ int index);
+
+ /** Destructor */
+ ~MSHRQueue();
+
+ /**
+ * Find the first MSHR that matches the provided address.
+ * @param addr The address to find.
+ * @return Pointer to the matching MSHR, null if not found.
+ */
+ MSHR *findMatch(Addr addr) const;
+
+ /**
+ * Find and return all the matching entries in the provided vector.
+ * @param addr The address to find.
+ * @param matches The vector to return pointers to the matching entries.
+ * @return True if any matches are found, false otherwise.
+ * @todo Typedef the vector??
+ */
+ bool findMatches(Addr addr, std::vector<MSHR*>& matches) const;
+
+ /**
+ * Find any pending requests that overlap the given request.
+ * @param pkt The request to find.
+ * @return A pointer to the earliest matching MSHR.
+ */
+ MSHR *findPending(Addr addr, int size) const;
+
+ bool checkFunctional(PacketPtr pkt, Addr blk_addr);
+
+ /**
+ * Allocates a new MSHR for the request and size. This places the request
+ * as the first target in the MSHR.
+ * @param pkt The request to handle.
+ * @param size The number in bytes to fetch from memory.
+ * @return The a pointer to the MSHR allocated.
+ *
+ * @pre There are free entries.
+ */
+ MSHR *allocate(Addr addr, int size, PacketPtr &pkt,
+ Tick when, Counter order);
+
+ /**
+ * Removes the given MSHR from the queue. This places the MSHR on the
+ * free list.
+ * @param mshr
+ */
+ void deallocate(MSHR *mshr);
+
+ /**
+ * Remove a MSHR from the queue. Returns an iterator into the
+ * allocatedList for faster squash implementation.
+ * @param mshr The MSHR to remove.
+ * @return An iterator to the next entry in the allocatedList.
+ */
+ MSHR::Iterator deallocateOne(MSHR *mshr);
+
+ /**
+ * Moves the MSHR to the front of the pending list if it is not
+ * in service.
+ * @param mshr The entry to move.
+ */
+ void moveToFront(MSHR *mshr);
+
+ /**
+ * Mark the given MSHR as in service. This removes the MSHR from the
+ * readyList. Deallocates the MSHR if it does not expect a response.
+ * @param mshr The MSHR to mark in service.
+ */
+ void markInService(MSHR *mshr);
+
+ /**
+ * Mark an in service entry as pending, used to resend a request.
+ * @param mshr The MSHR to resend.
+ */
+ void markPending(MSHR *mshr);
+
+ /**
+ * Squash outstanding requests with the given thread number. If a request
+ * is in service, just squashes the targets.
+ * @param threadNum The thread to squash.
+ */
+ void squash(int threadNum);
+
+ /**
+ * Returns true if the pending list is not empty.
+ * @return True if there are outstanding requests.
+ */
+ bool havePending() const
+ {
+ return !readyList.empty();
+ }
+
+ /**
+ * Returns true if there are no free entries.
+ * @return True if this queue is full.
+ */
+ bool isFull() const
+ {
+ return (allocated > numEntries - numReserve);
+ }
+
+ /**
+ * Returns the MSHR at the head of the readyList.
+ * @return The next request to service.
+ */
+ MSHR *getNextMSHR() const
+ {
+ if (readyList.empty() || readyList.front()->readyTime > curTick) {
+ return NULL;
+ }
+ return readyList.front();
+ }
+
+ Tick nextMSHRReadyTime() const
+ {
+ return readyList.empty() ? MaxTick : readyList.front()->readyTime;
+ }
+};
+
+#endif //__MEM__CACHE__MISS__MSHR_QUEUE_HH__
Import('*')
-Source('base_prefetcher.cc')
-Source('ghb_prefetcher.cc')
-Source('stride_prefetcher.cc')
-Source('tagged_prefetcher.cc')
+Source('base.cc')
+Source('ghb.cc')
+Source('stride.cc')
+Source('tagged.cc')
--- /dev/null
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Hardware Prefetcher Definition.
+ */
+
+#include "base/trace.hh"
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/prefetch/base_prefetcher.hh"
+#include "mem/request.hh"
+#include <list>
+
+BasePrefetcher::BasePrefetcher(const BaseCacheParams *p)
+ : size(p->prefetcher_size), pageStop(!p->prefetch_past_page),
+ serialSquash(p->prefetch_serial_squash),
+ cacheCheckPush(p->prefetch_cache_check_push),
+ only_data(p->prefetch_data_accesses_only)
+{
+}
+
+void
+BasePrefetcher::setCache(BaseCache *_cache)
+{
+ cache = _cache;
+ blkSize = cache->getBlockSize();
+}
+
+void
+BasePrefetcher::regStats(const std::string &name)
+{
+ pfIdentified
+ .name(name + ".prefetcher.num_hwpf_identified")
+ .desc("number of hwpf identified")
+ ;
+
+ pfMSHRHit
+ .name(name + ".prefetcher.num_hwpf_already_in_mshr")
+ .desc("number of hwpf that were already in mshr")
+ ;
+
+ pfCacheHit
+ .name(name + ".prefetcher.num_hwpf_already_in_cache")
+ .desc("number of hwpf that were already in the cache")
+ ;
+
+ pfBufferHit
+ .name(name + ".prefetcher.num_hwpf_already_in_prefetcher")
+ .desc("number of hwpf that were already in the prefetch queue")
+ ;
+
+ pfRemovedFull
+ .name(name + ".prefetcher.num_hwpf_evicted")
+ .desc("number of hwpf removed due to no buffer left")
+ ;
+
+ pfRemovedMSHR
+ .name(name + ".prefetcher.num_hwpf_removed_MSHR_hit")
+ .desc("number of hwpf removed because MSHR allocated")
+ ;
+
+ pfIssued
+ .name(name + ".prefetcher.num_hwpf_issued")
+ .desc("number of hwpf issued")
+ ;
+
+ pfSpanPage
+ .name(name + ".prefetcher.num_hwpf_span_page")
+ .desc("number of hwpf spanning a virtual page")
+ ;
+
+ pfSquashed
+ .name(name + ".prefetcher.num_hwpf_squashed_from_miss")
+ .desc("number of hwpf that got squashed due to a miss aborting calculation time")
+ ;
+}
+
+inline bool
+BasePrefetcher::inCache(Addr addr)
+{
+ if (cache->inCache(addr)) {
+ pfCacheHit++;
+ return true;
+ }
+ return false;
+}
+
+inline bool
+BasePrefetcher::inMissQueue(Addr addr)
+{
+ if (cache->inMissQueue(addr)) {
+ pfMSHRHit++;
+ return true;
+ }
+ return false;
+}
+
+PacketPtr
+BasePrefetcher::getPacket()
+{
+ DPRINTF(HWPrefetch, "%s:Requesting a hw_pf to issue\n", cache->name());
+
+ if (pf.empty()) {
+ DPRINTF(HWPrefetch, "%s:No HW_PF found\n", cache->name());
+ return NULL;
+ }
+
+ PacketPtr pkt;
+ bool keepTrying = false;
+ do {
+ pkt = *pf.begin();
+ pf.pop_front();
+ if (!cacheCheckPush) {
+ keepTrying = cache->inCache(pkt->getAddr());
+ }
+ if (pf.empty()) {
+ cache->deassertMemSideBusRequest(BaseCache::Request_PF);
+ if (keepTrying) return NULL; //None left, all were in cache
+ }
+ } while (keepTrying);
+
+ pfIssued++;
+ return pkt;
+}
+
+void
+BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
+{
+ if (!pkt->req->isUncacheable() && !(pkt->req->isInstRead() && only_data))
+ {
+ //Calculate the blk address
+ Addr blkAddr = pkt->getAddr() & ~(Addr)(blkSize-1);
+
+ //Check if miss is in pfq, if so remove it
+ std::list<PacketPtr>::iterator iter = inPrefetch(blkAddr);
+ if (iter != pf.end()) {
+ DPRINTF(HWPrefetch, "%s:Saw a miss to a queued prefetch, removing it\n", cache->name());
+ pfRemovedMSHR++;
+ pf.erase(iter);
+ if (pf.empty())
+ cache->deassertMemSideBusRequest(BaseCache::Request_PF);
+ }
+
+ //Remove anything in queue with delay older than time
+ //since everything is inserted in time order, start from end
+ //and work until pf.empty() or time is earlier
+ //This is done to emulate Aborting the previous work on a new miss
+ //Needed for serial calculators like GHB
+ if (serialSquash) {
+ iter = pf.end();
+ iter--;
+ while (!pf.empty() && ((*iter)->time >= time)) {
+ pfSquashed++;
+ pf.pop_back();
+ iter--;
+ }
+ if (pf.empty())
+ cache->deassertMemSideBusRequest(BaseCache::Request_PF);
+ }
+
+
+ std::list<Addr> addresses;
+ std::list<Tick> delays;
+ calculatePrefetch(pkt, addresses, delays);
+
+ std::list<Addr>::iterator addr = addresses.begin();
+ std::list<Tick>::iterator delay = delays.begin();
+ while (addr != addresses.end())
+ {
+ DPRINTF(HWPrefetch, "%s:Found a pf canidate, inserting into prefetch queue\n", cache->name());
+ //temp calc this here...
+ pfIdentified++;
+ //create a prefetch memreq
+ Request * prefetchReq = new Request(*addr, blkSize, 0);
+ PacketPtr prefetch;
+ prefetch = new Packet(prefetchReq, MemCmd::HardPFReq, -1);
+ prefetch->allocate();
+ prefetch->req->setThreadContext(pkt->req->getCpuNum(),
+ pkt->req->getThreadNum());
+
+ prefetch->time = time + (*delay); //@todo ADD LATENCY HERE
+ //... initialize
+
+ //Check if it is already in the cache
+ if (cacheCheckPush) {
+ if (cache->inCache(prefetch->getAddr())) {
+ addr++;
+ delay++;
+ continue;
+ }
+ }
+
+ //Check if it is already in the miss_queue
+ if (cache->inMissQueue(prefetch->getAddr())) {
+ addr++;
+ delay++;
+ continue;
+ }
+
+ //Check if it is already in the pf buffer
+ if (inPrefetch(prefetch->getAddr()) != pf.end()) {
+ pfBufferHit++;
+ addr++;
+ delay++;
+ continue;
+ }
+
+ //We just remove the head if we are full
+ if (pf.size() == size)
+ {
+ DPRINTF(HWPrefetch, "%s:Inserting into prefetch queue, it was full removing oldest\n", cache->name());
+ pfRemovedFull++;
+ pf.pop_front();
+ }
+
+ pf.push_back(prefetch);
+
+ //Make sure to request the bus, with proper delay
+ cache->requestMemSideBus(BaseCache::Request_PF, prefetch->time);
+
+ //Increment through the list
+ addr++;
+ delay++;
+ }
+ }
+}
+
+std::list<PacketPtr>::iterator
+BasePrefetcher::inPrefetch(Addr address)
+{
+ //Guaranteed to only be one match, we always check before inserting
+ std::list<PacketPtr>::iterator iter;
+ for (iter=pf.begin(); iter != pf.end(); iter++) {
+ if (((*iter)->getAddr() & ~(Addr)(blkSize-1)) == address) {
+ return iter;
+ }
+ }
+ return pf.end();
+}
+
+
--- /dev/null
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Miss and writeback queue declarations.
+ */
+
+#ifndef __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
+#define __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
+
+#include <list>
+
+#include "base/statistics.hh"
+#include "mem/packet.hh"
+#include "params/BaseCache.hh"
+
+class BaseCache;
+
+class BasePrefetcher
+{
+ protected:
+
+ /** The Prefetch Queue. */
+ std::list<PacketPtr> pf;
+
+ // PARAMETERS
+
+ /** The number of MSHRs in the Prefetch Queue. */
+ const int size;
+
+ /** Pointr to the parent cache. */
+ BaseCache* cache;
+
+ /** The block size of the parent cache. */
+ int blkSize;
+
+ /** Do we prefetch across page boundaries. */
+ bool pageStop;
+
+ /** Do we remove prefetches with later times than a new miss.*/
+ bool serialSquash;
+
+ /** Do we check if it is in the cache when inserting into buffer,
+ or removing.*/
+ bool cacheCheckPush;
+
+ /** Do we prefetch on only data reads, or on inst reads as well. */
+ bool only_data;
+
+ public:
+
+ Stats::Scalar<> pfIdentified;
+ Stats::Scalar<> pfMSHRHit;
+ Stats::Scalar<> pfCacheHit;
+ Stats::Scalar<> pfBufferHit;
+ Stats::Scalar<> pfRemovedFull;
+ Stats::Scalar<> pfRemovedMSHR;
+ Stats::Scalar<> pfIssued;
+ Stats::Scalar<> pfSpanPage;
+ Stats::Scalar<> pfSquashed;
+
+ void regStats(const std::string &name);
+
+ public:
+ BasePrefetcher(const BaseCacheParams *p);
+
+ virtual ~BasePrefetcher() {}
+
+ void setCache(BaseCache *_cache);
+
+ void handleMiss(PacketPtr &pkt, Tick time);
+
+ bool inCache(Addr addr);
+
+ bool inMissQueue(Addr addr);
+
+ PacketPtr getPacket();
+
+ bool havePending()
+ {
+ return !pf.empty();
+ }
+
+ virtual void calculatePrefetch(PacketPtr &pkt,
+ std::list<Addr> &addresses,
+ std::list<Tick> &delays) = 0;
+
+ std::list<PacketPtr>::iterator inPrefetch(Addr address);
+};
+
+
+#endif //__MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
+++ /dev/null
-/*
- * Copyright (c) 2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Ron Dreslinski
- */
-
-/**
- * @file
- * Hardware Prefetcher Definition.
- */
-
-#include "base/trace.hh"
-#include "mem/cache/base_cache.hh"
-#include "mem/cache/prefetch/base_prefetcher.hh"
-#include "mem/request.hh"
-#include <list>
-
-BasePrefetcher::BasePrefetcher(const BaseCacheParams *p)
- : size(p->prefetcher_size), pageStop(!p->prefetch_past_page),
- serialSquash(p->prefetch_serial_squash),
- cacheCheckPush(p->prefetch_cache_check_push),
- only_data(p->prefetch_data_accesses_only)
-{
-}
-
-void
-BasePrefetcher::setCache(BaseCache *_cache)
-{
- cache = _cache;
- blkSize = cache->getBlockSize();
-}
-
-void
-BasePrefetcher::regStats(const std::string &name)
-{
- pfIdentified
- .name(name + ".prefetcher.num_hwpf_identified")
- .desc("number of hwpf identified")
- ;
-
- pfMSHRHit
- .name(name + ".prefetcher.num_hwpf_already_in_mshr")
- .desc("number of hwpf that were already in mshr")
- ;
-
- pfCacheHit
- .name(name + ".prefetcher.num_hwpf_already_in_cache")
- .desc("number of hwpf that were already in the cache")
- ;
-
- pfBufferHit
- .name(name + ".prefetcher.num_hwpf_already_in_prefetcher")
- .desc("number of hwpf that were already in the prefetch queue")
- ;
-
- pfRemovedFull
- .name(name + ".prefetcher.num_hwpf_evicted")
- .desc("number of hwpf removed due to no buffer left")
- ;
-
- pfRemovedMSHR
- .name(name + ".prefetcher.num_hwpf_removed_MSHR_hit")
- .desc("number of hwpf removed because MSHR allocated")
- ;
-
- pfIssued
- .name(name + ".prefetcher.num_hwpf_issued")
- .desc("number of hwpf issued")
- ;
-
- pfSpanPage
- .name(name + ".prefetcher.num_hwpf_span_page")
- .desc("number of hwpf spanning a virtual page")
- ;
-
- pfSquashed
- .name(name + ".prefetcher.num_hwpf_squashed_from_miss")
- .desc("number of hwpf that got squashed due to a miss aborting calculation time")
- ;
-}
-
-inline bool
-BasePrefetcher::inCache(Addr addr)
-{
- if (cache->inCache(addr)) {
- pfCacheHit++;
- return true;
- }
- return false;
-}
-
-inline bool
-BasePrefetcher::inMissQueue(Addr addr)
-{
- if (cache->inMissQueue(addr)) {
- pfMSHRHit++;
- return true;
- }
- return false;
-}
-
-PacketPtr
-BasePrefetcher::getPacket()
-{
- DPRINTF(HWPrefetch, "%s:Requesting a hw_pf to issue\n", cache->name());
-
- if (pf.empty()) {
- DPRINTF(HWPrefetch, "%s:No HW_PF found\n", cache->name());
- return NULL;
- }
-
- PacketPtr pkt;
- bool keepTrying = false;
- do {
- pkt = *pf.begin();
- pf.pop_front();
- if (!cacheCheckPush) {
- keepTrying = cache->inCache(pkt->getAddr());
- }
- if (pf.empty()) {
- cache->deassertMemSideBusRequest(BaseCache::Request_PF);
- if (keepTrying) return NULL; //None left, all were in cache
- }
- } while (keepTrying);
-
- pfIssued++;
- return pkt;
-}
-
-void
-BasePrefetcher::handleMiss(PacketPtr &pkt, Tick time)
-{
- if (!pkt->req->isUncacheable() && !(pkt->req->isInstRead() && only_data))
- {
- //Calculate the blk address
- Addr blkAddr = pkt->getAddr() & ~(Addr)(blkSize-1);
-
- //Check if miss is in pfq, if so remove it
- std::list<PacketPtr>::iterator iter = inPrefetch(blkAddr);
- if (iter != pf.end()) {
- DPRINTF(HWPrefetch, "%s:Saw a miss to a queued prefetch, removing it\n", cache->name());
- pfRemovedMSHR++;
- pf.erase(iter);
- if (pf.empty())
- cache->deassertMemSideBusRequest(BaseCache::Request_PF);
- }
-
- //Remove anything in queue with delay older than time
- //since everything is inserted in time order, start from end
- //and work until pf.empty() or time is earlier
- //This is done to emulate Aborting the previous work on a new miss
- //Needed for serial calculators like GHB
- if (serialSquash) {
- iter = pf.end();
- iter--;
- while (!pf.empty() && ((*iter)->time >= time)) {
- pfSquashed++;
- pf.pop_back();
- iter--;
- }
- if (pf.empty())
- cache->deassertMemSideBusRequest(BaseCache::Request_PF);
- }
-
-
- std::list<Addr> addresses;
- std::list<Tick> delays;
- calculatePrefetch(pkt, addresses, delays);
-
- std::list<Addr>::iterator addr = addresses.begin();
- std::list<Tick>::iterator delay = delays.begin();
- while (addr != addresses.end())
- {
- DPRINTF(HWPrefetch, "%s:Found a pf canidate, inserting into prefetch queue\n", cache->name());
- //temp calc this here...
- pfIdentified++;
- //create a prefetch memreq
- Request * prefetchReq = new Request(*addr, blkSize, 0);
- PacketPtr prefetch;
- prefetch = new Packet(prefetchReq, MemCmd::HardPFReq, -1);
- prefetch->allocate();
- prefetch->req->setThreadContext(pkt->req->getCpuNum(),
- pkt->req->getThreadNum());
-
- prefetch->time = time + (*delay); //@todo ADD LATENCY HERE
- //... initialize
-
- //Check if it is already in the cache
- if (cacheCheckPush) {
- if (cache->inCache(prefetch->getAddr())) {
- addr++;
- delay++;
- continue;
- }
- }
-
- //Check if it is already in the miss_queue
- if (cache->inMissQueue(prefetch->getAddr())) {
- addr++;
- delay++;
- continue;
- }
-
- //Check if it is already in the pf buffer
- if (inPrefetch(prefetch->getAddr()) != pf.end()) {
- pfBufferHit++;
- addr++;
- delay++;
- continue;
- }
-
- //We just remove the head if we are full
- if (pf.size() == size)
- {
- DPRINTF(HWPrefetch, "%s:Inserting into prefetch queue, it was full removing oldest\n", cache->name());
- pfRemovedFull++;
- pf.pop_front();
- }
-
- pf.push_back(prefetch);
-
- //Make sure to request the bus, with proper delay
- cache->requestMemSideBus(BaseCache::Request_PF, prefetch->time);
-
- //Increment through the list
- addr++;
- delay++;
- }
- }
-}
-
-std::list<PacketPtr>::iterator
-BasePrefetcher::inPrefetch(Addr address)
-{
- //Guaranteed to only be one match, we always check before inserting
- std::list<PacketPtr>::iterator iter;
- for (iter=pf.begin(); iter != pf.end(); iter++) {
- if (((*iter)->getAddr() & ~(Addr)(blkSize-1)) == address) {
- return iter;
- }
- }
- return pf.end();
-}
-
-
+++ /dev/null
-/*
- * Copyright (c) 2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Ron Dreslinski
- */
-
-/**
- * @file
- * Miss and writeback queue declarations.
- */
-
-#ifndef __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
-#define __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
-
-#include <list>
-
-#include "base/statistics.hh"
-#include "mem/packet.hh"
-#include "params/BaseCache.hh"
-
-class BaseCache;
-
-class BasePrefetcher
-{
- protected:
-
- /** The Prefetch Queue. */
- std::list<PacketPtr> pf;
-
- // PARAMETERS
-
- /** The number of MSHRs in the Prefetch Queue. */
- const int size;
-
- /** Pointr to the parent cache. */
- BaseCache* cache;
-
- /** The block size of the parent cache. */
- int blkSize;
-
- /** Do we prefetch across page boundaries. */
- bool pageStop;
-
- /** Do we remove prefetches with later times than a new miss.*/
- bool serialSquash;
-
- /** Do we check if it is in the cache when inserting into buffer,
- or removing.*/
- bool cacheCheckPush;
-
- /** Do we prefetch on only data reads, or on inst reads as well. */
- bool only_data;
-
- public:
-
- Stats::Scalar<> pfIdentified;
- Stats::Scalar<> pfMSHRHit;
- Stats::Scalar<> pfCacheHit;
- Stats::Scalar<> pfBufferHit;
- Stats::Scalar<> pfRemovedFull;
- Stats::Scalar<> pfRemovedMSHR;
- Stats::Scalar<> pfIssued;
- Stats::Scalar<> pfSpanPage;
- Stats::Scalar<> pfSquashed;
-
- void regStats(const std::string &name);
-
- public:
- BasePrefetcher(const BaseCacheParams *p);
-
- virtual ~BasePrefetcher() {}
-
- void setCache(BaseCache *_cache);
-
- void handleMiss(PacketPtr &pkt, Tick time);
-
- bool inCache(Addr addr);
-
- bool inMissQueue(Addr addr);
-
- PacketPtr getPacket();
-
- bool havePending()
- {
- return !pf.empty();
- }
-
- virtual void calculatePrefetch(PacketPtr &pkt,
- std::list<Addr> &addresses,
- std::list<Tick> &delays) = 0;
-
- std::list<PacketPtr>::iterator inPrefetch(Addr address);
-};
-
-
-#endif //__MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
--- /dev/null
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Steve Reinhardt
+ */
+
+/**
+ * @file
+ * GHB Prefetcher implementation.
+ */
+
+#include "mem/cache/prefetch/ghb_prefetcher.hh"
+#include "arch/isa_traits.hh"
+
+void
+GHBPrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
+ std::list<Tick> &delays)
+{
+ Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1);
+ int cpuID = pkt->req->getCpuNum();
+ if (!useCPUId) cpuID = 0;
+
+
+ int new_stride = blkAddr - last_miss_addr[cpuID];
+ int old_stride = last_miss_addr[cpuID] -
+ second_last_miss_addr[cpuID];
+
+ second_last_miss_addr[cpuID] = last_miss_addr[cpuID];
+ last_miss_addr[cpuID] = blkAddr;
+
+ if (new_stride == old_stride) {
+ for (int d=1; d <= degree; d++) {
+ Addr newAddr = blkAddr + d * new_stride;
+ if (this->pageStop &&
+ (blkAddr & ~(TheISA::VMPageSize - 1)) !=
+ (newAddr & ~(TheISA::VMPageSize - 1)))
+ {
+ //Spanned the page, so now stop
+ this->pfSpanPage += degree - d + 1;
+ return;
+ }
+ else
+ {
+ addresses.push_back(newAddr);
+ delays.push_back(latency);
+ }
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Describes a ghb prefetcher.
+ */
+
+#ifndef __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
+#define __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
+
+#include "mem/cache/prefetch/base_prefetcher.hh"
+
+class GHBPrefetcher : public BasePrefetcher
+{
+ protected:
+
+ Addr second_last_miss_addr[64/*MAX_CPUS*/];
+ Addr last_miss_addr[64/*MAX_CPUS*/];
+
+ Tick latency;
+ int degree;
+ bool useCPUId;
+
+ public:
+
+ GHBPrefetcher(const BaseCacheParams *p)
+ : BasePrefetcher(p), latency(p->prefetch_latency),
+ degree(p->prefetch_degree), useCPUId(p->prefetch_use_cpu_id)
+ {
+ }
+
+ ~GHBPrefetcher() {}
+
+ void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
+ std::list<Tick> &delays);
+};
+
+#endif // __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
+++ /dev/null
-/*
- * Copyright (c) 2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Ron Dreslinski
- * Steve Reinhardt
- */
-
-/**
- * @file
- * GHB Prefetcher implementation.
- */
-
-#include "mem/cache/prefetch/ghb_prefetcher.hh"
-#include "arch/isa_traits.hh"
-
-void
-GHBPrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
- std::list<Tick> &delays)
-{
- Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1);
- int cpuID = pkt->req->getCpuNum();
- if (!useCPUId) cpuID = 0;
-
-
- int new_stride = blkAddr - last_miss_addr[cpuID];
- int old_stride = last_miss_addr[cpuID] -
- second_last_miss_addr[cpuID];
-
- second_last_miss_addr[cpuID] = last_miss_addr[cpuID];
- last_miss_addr[cpuID] = blkAddr;
-
- if (new_stride == old_stride) {
- for (int d=1; d <= degree; d++) {
- Addr newAddr = blkAddr + d * new_stride;
- if (this->pageStop &&
- (blkAddr & ~(TheISA::VMPageSize - 1)) !=
- (newAddr & ~(TheISA::VMPageSize - 1)))
- {
- //Spanned the page, so now stop
- this->pfSpanPage += degree - d + 1;
- return;
- }
- else
- {
- addresses.push_back(newAddr);
- delays.push_back(latency);
- }
- }
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Ron Dreslinski
- */
-
-/**
- * @file
- * Describes a ghb prefetcher.
- */
-
-#ifndef __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
-#define __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
-
-#include "mem/cache/prefetch/base_prefetcher.hh"
-
-class GHBPrefetcher : public BasePrefetcher
-{
- protected:
-
- Addr second_last_miss_addr[64/*MAX_CPUS*/];
- Addr last_miss_addr[64/*MAX_CPUS*/];
-
- Tick latency;
- int degree;
- bool useCPUId;
-
- public:
-
- GHBPrefetcher(const BaseCacheParams *p)
- : BasePrefetcher(p), latency(p->prefetch_latency),
- degree(p->prefetch_degree), useCPUId(p->prefetch_use_cpu_id)
- {
- }
-
- ~GHBPrefetcher() {}
-
- void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
- std::list<Tick> &delays);
-};
-
-#endif // __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
--- /dev/null
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Steve Reinhardt
+ */
+
+/**
+ * @file
+ * Stride Prefetcher template instantiations.
+ */
+
+#include "mem/cache/prefetch/stride_prefetcher.hh"
+
+void
+StridePrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
+ std::list<Tick> &delays)
+{
+// Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1);
+ int cpuID = pkt->req->getCpuNum();
+ if (!useCPUId) cpuID = 0;
+
+ /* Scan Table for IAddr Match */
+/* std::list<strideEntry*>::iterator iter;
+ for (iter=table[cpuID].begin();
+ iter !=table[cpuID].end();
+ iter++) {
+ if ((*iter)->IAddr == pkt->pc) break;
+ }
+
+ if (iter != table[cpuID].end()) {
+ //Hit in table
+
+ int newStride = blkAddr - (*iter)->MAddr;
+ if (newStride == (*iter)->stride) {
+ (*iter)->confidence++;
+ }
+ else {
+ (*iter)->stride = newStride;
+ (*iter)->confidence--;
+ }
+
+ (*iter)->MAddr = blkAddr;
+
+ for (int d=1; d <= degree; d++) {
+ Addr newAddr = blkAddr + d * newStride;
+ if (this->pageStop &&
+ (blkAddr & ~(TheISA::VMPageSize - 1)) !=
+ (newAddr & ~(TheISA::VMPageSize - 1)))
+ {
+ //Spanned the page, so now stop
+ this->pfSpanPage += degree - d + 1;
+ return;
+ }
+ else
+ {
+ addresses.push_back(newAddr);
+ delays.push_back(latency);
+ }
+ }
+ }
+ else {
+ //Miss in table
+ //Find lowest confidence and replace
+
+ }
+*/
+}
--- /dev/null
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Describes a strided prefetcher.
+ */
+
+#ifndef __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
+#define __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
+
+#include "mem/cache/prefetch/base_prefetcher.hh"
+
+class StridePrefetcher : public BasePrefetcher
+{
+ protected:
+
+ class strideEntry
+ {
+ public:
+ Addr IAddr;
+ Addr MAddr;
+ int stride;
+ int64_t confidence;
+
+/* bool operator < (strideEntry a,strideEntry b)
+ {
+ if (a.confidence == b.confidence) {
+ return true; //??????
+ }
+ else return a.confidence < b.confidence;
+ }*/
+ };
+ Addr* lastMissAddr[64/*MAX_CPUS*/];
+
+ std::list<strideEntry*> table[64/*MAX_CPUS*/];
+ Tick latency;
+ int degree;
+ bool useCPUId;
+
+
+ public:
+
+ StridePrefetcher(const BaseCacheParams *p)
+ : BasePrefetcher(p), latency(p->prefetch_latency),
+ degree(p->prefetch_degree), useCPUId(p->prefetch_use_cpu_id)
+ {
+ }
+
+ ~StridePrefetcher() {}
+
+ void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
+ std::list<Tick> &delays);
+};
+
+#endif // __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
+++ /dev/null
-/*
- * Copyright (c) 2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Ron Dreslinski
- * Steve Reinhardt
- */
-
-/**
- * @file
- * Stride Prefetcher template instantiations.
- */
-
-#include "mem/cache/prefetch/stride_prefetcher.hh"
-
-void
-StridePrefetcher::calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
- std::list<Tick> &delays)
-{
-// Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1);
- int cpuID = pkt->req->getCpuNum();
- if (!useCPUId) cpuID = 0;
-
- /* Scan Table for IAddr Match */
-/* std::list<strideEntry*>::iterator iter;
- for (iter=table[cpuID].begin();
- iter !=table[cpuID].end();
- iter++) {
- if ((*iter)->IAddr == pkt->pc) break;
- }
-
- if (iter != table[cpuID].end()) {
- //Hit in table
-
- int newStride = blkAddr - (*iter)->MAddr;
- if (newStride == (*iter)->stride) {
- (*iter)->confidence++;
- }
- else {
- (*iter)->stride = newStride;
- (*iter)->confidence--;
- }
-
- (*iter)->MAddr = blkAddr;
-
- for (int d=1; d <= degree; d++) {
- Addr newAddr = blkAddr + d * newStride;
- if (this->pageStop &&
- (blkAddr & ~(TheISA::VMPageSize - 1)) !=
- (newAddr & ~(TheISA::VMPageSize - 1)))
- {
- //Spanned the page, so now stop
- this->pfSpanPage += degree - d + 1;
- return;
- }
- else
- {
- addresses.push_back(newAddr);
- delays.push_back(latency);
- }
- }
- }
- else {
- //Miss in table
- //Find lowest confidence and replace
-
- }
-*/
-}
+++ /dev/null
-/*
- * Copyright (c) 2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Ron Dreslinski
- */
-
-/**
- * @file
- * Describes a strided prefetcher.
- */
-
-#ifndef __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
-#define __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
-
-#include "mem/cache/prefetch/base_prefetcher.hh"
-
-class StridePrefetcher : public BasePrefetcher
-{
- protected:
-
- class strideEntry
- {
- public:
- Addr IAddr;
- Addr MAddr;
- int stride;
- int64_t confidence;
-
-/* bool operator < (strideEntry a,strideEntry b)
- {
- if (a.confidence == b.confidence) {
- return true; //??????
- }
- else return a.confidence < b.confidence;
- }*/
- };
- Addr* lastMissAddr[64/*MAX_CPUS*/];
-
- std::list<strideEntry*> table[64/*MAX_CPUS*/];
- Tick latency;
- int degree;
- bool useCPUId;
-
-
- public:
-
- StridePrefetcher(const BaseCacheParams *p)
- : BasePrefetcher(p), latency(p->prefetch_latency),
- degree(p->prefetch_degree), useCPUId(p->prefetch_use_cpu_id)
- {
- }
-
- ~StridePrefetcher() {}
-
- void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
- std::list<Tick> &delays);
-};
-
-#endif // __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
--- /dev/null
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Describes a tagged prefetcher based on template policies.
+ */
+
+#include "arch/isa_traits.hh"
+#include "mem/cache/prefetch/tagged_prefetcher.hh"
+
+TaggedPrefetcher::TaggedPrefetcher(const BaseCacheParams *p)
+ : BasePrefetcher(p),
+ latency(p->prefetch_latency), degree(p->prefetch_degree)
+{
+}
+
+void
+TaggedPrefetcher::
+calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
+ std::list<Tick> &delays)
+{
+ Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1);
+
+ for (int d=1; d <= degree; d++) {
+ Addr newAddr = blkAddr + d*(this->blkSize);
+ if (this->pageStop &&
+ (blkAddr & ~(TheISA::VMPageSize - 1)) !=
+ (newAddr & ~(TheISA::VMPageSize - 1)))
+ {
+ //Spanned the page, so now stop
+ this->pfSpanPage += degree - d + 1;
+ return;
+ }
+ else
+ {
+ addresses.push_back(newAddr);
+ delays.push_back(latency);
+ }
+ }
+}
+
+
--- /dev/null
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Describes a tagged prefetcher.
+ */
+
+#ifndef __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
+#define __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
+
+#include "mem/cache/prefetch/base_prefetcher.hh"
+
+class TaggedPrefetcher : public BasePrefetcher
+{
+ protected:
+
+ Tick latency;
+ int degree;
+
+ public:
+
+ TaggedPrefetcher(const BaseCacheParams *p);
+
+ ~TaggedPrefetcher() {}
+
+ void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
+ std::list<Tick> &delays);
+};
+
+#endif // __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
+++ /dev/null
-/*
- * Copyright (c) 2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Ron Dreslinski
- */
-
-/**
- * @file
- * Describes a tagged prefetcher based on template policies.
- */
-
-#include "arch/isa_traits.hh"
-#include "mem/cache/prefetch/tagged_prefetcher.hh"
-
-TaggedPrefetcher::TaggedPrefetcher(const BaseCacheParams *p)
- : BasePrefetcher(p),
- latency(p->prefetch_latency), degree(p->prefetch_degree)
-{
-}
-
-void
-TaggedPrefetcher::
-calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
- std::list<Tick> &delays)
-{
- Addr blkAddr = pkt->getAddr() & ~(Addr)(this->blkSize-1);
-
- for (int d=1; d <= degree; d++) {
- Addr newAddr = blkAddr + d*(this->blkSize);
- if (this->pageStop &&
- (blkAddr & ~(TheISA::VMPageSize - 1)) !=
- (newAddr & ~(TheISA::VMPageSize - 1)))
- {
- //Spanned the page, so now stop
- this->pfSpanPage += degree - d + 1;
- return;
- }
- else
- {
- addresses.push_back(newAddr);
- delays.push_back(latency);
- }
- }
-}
-
-
+++ /dev/null
-/*
- * Copyright (c) 2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Ron Dreslinski
- */
-
-/**
- * @file
- * Describes a tagged prefetcher.
- */
-
-#ifndef __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
-#define __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
-
-#include "mem/cache/prefetch/base_prefetcher.hh"
-
-class TaggedPrefetcher : public BasePrefetcher
-{
- protected:
-
- Tick latency;
- int degree;
-
- public:
-
- TaggedPrefetcher(const BaseCacheParams *p);
-
- ~TaggedPrefetcher() {}
-
- void calculatePrefetch(PacketPtr &pkt, std::list<Addr> &addresses,
- std::list<Tick> &delays);
-};
-
-#endif // __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
+++ /dev/null
-from m5.SimObject import SimObject
-from m5.params import *
-class Repl(SimObject):
- type = 'Repl'
- abstract = True
-
-class GenRepl(Repl):
- type = 'GenRepl'
- fresh_res = Param.Int("Fresh pool residency time")
- num_pools = Param.Int("Number of priority pools")
- pool_res = Param.Int("Pool residency time")
Import('*')
-Source('base_tags.cc')
+Source('base.cc')
Source('fa_lru.cc')
Source('iic.cc')
Source('lru.cc')
Source('split_lifo.cc')
Source('split_lru.cc')
-SimObject('Repl.py')
-Source('repl/gen.cc')
+SimObject('iic_repl/Repl.py')
+Source('iic_repl/gen.cc')
TraceFlag('IIC')
TraceFlag('IICMore')
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ * Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Definitions of BaseTags.
+ */
+
+#include "mem/cache/tags/base_tags.hh"
+
+#include "mem/cache/base_cache.hh"
+#include "cpu/smt.hh" //maxThreadsPerCPU
+#include "sim/sim_exit.hh"
+
+using namespace std;
+
+void
+BaseTags::setCache(BaseCache *_cache)
+{
+ cache = _cache;
+ objName = cache->name();
+}
+
+void
+BaseTags::regStats(const string &name)
+{
+ using namespace Stats;
+ replacements
+ .init(maxThreadsPerCPU)
+ .name(name + ".replacements")
+ .desc("number of replacements")
+ .flags(total)
+ ;
+
+ tagsInUse
+ .name(name + ".tagsinuse")
+ .desc("Cycle average of tags in use")
+ ;
+
+ totalRefs
+ .name(name + ".total_refs")
+ .desc("Total number of references to valid blocks.")
+ ;
+
+ sampledRefs
+ .name(name + ".sampled_refs")
+ .desc("Sample count of references to valid blocks.")
+ ;
+
+ avgRefs
+ .name(name + ".avg_refs")
+ .desc("Average number of references to valid blocks.")
+ ;
+
+ avgRefs = totalRefs/sampledRefs;
+
+ warmupCycle
+ .name(name + ".warmup_cycle")
+ .desc("Cycle when the warmup percentage was hit.")
+ ;
+
+ registerExitCallback(new BaseTagsCallback(this));
+}
--- /dev/null
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ * Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Declaration of a common base class for cache tagstore objects.
+ */
+
+#ifndef __BASE_TAGS_HH__
+#define __BASE_TAGS_HH__
+
+#include <string>
+#include "base/statistics.hh"
+#include "base/callback.hh"
+
+class BaseCache;
+
+/**
+ * A common base class of Cache tagstore objects.
+ */
+class BaseTags
+{
+ protected:
+ /** Pointer to the parent cache. */
+ BaseCache *cache;
+
+ /** Local copy of the parent cache name. Used for DPRINTF. */
+ std::string objName;
+
+ /**
+ * The number of tags that need to be touched to meet the warmup
+ * percentage.
+ */
+ int warmupBound;
+ /** Marked true when the cache is warmed up. */
+ bool warmedUp;
+
+ // Statistics
+ /**
+ * @addtogroup CacheStatistics
+ * @{
+ */
+
+ /** Number of replacements of valid blocks per thread. */
+ Stats::Vector<> replacements;
+ /** Per cycle average of the number of tags that hold valid data. */
+ Stats::Average<> tagsInUse;
+
+ /** The total number of references to a block before it is replaced. */
+ Stats::Scalar<> totalRefs;
+
+ /**
+ * The number of reference counts sampled. This is different from
+ * replacements because we sample all the valid blocks when the simulator
+ * exits.
+ */
+ Stats::Scalar<> sampledRefs;
+
+ /**
+ * Average number of references to a block before is was replaced.
+ * @todo This should change to an average stat once we have them.
+ */
+ Stats::Formula avgRefs;
+
+ /** The cycle that the warmup percentage was hit. */
+ Stats::Scalar<> warmupCycle;
+ /**
+ * @}
+ */
+
+ public:
+
+ /**
+ * Destructor.
+ */
+ virtual ~BaseTags() {}
+
+ /**
+ * Set the parent cache back pointer. Also copies the cache name to
+ * objName.
+ * @param _cache Pointer to parent cache.
+ */
+ void setCache(BaseCache *_cache);
+
+ /**
+ * Return the parent cache name.
+ * @return the parent cache name.
+ */
+ const std::string &name() const
+ {
+ return objName;
+ }
+
+ /**
+ * Register local statistics.
+ * @param name The name to preceed each statistic name.
+ */
+ void regStats(const std::string &name);
+
+ /**
+ * Average in the reference count for valid blocks when the simulation
+ * exits.
+ */
+ virtual void cleanupRefs() {}
+};
+
+class BaseTagsCallback : public Callback
+{
+ BaseTags *tags;
+ public:
+ BaseTagsCallback(BaseTags *t) : tags(t) {}
+ virtual void process() { tags->cleanupRefs(); };
+};
+
+#endif //__BASE_TAGS_HH__
+++ /dev/null
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- * Ron Dreslinski
- */
-
-/**
- * @file
- * Definitions of BaseTags.
- */
-
-#include "mem/cache/tags/base_tags.hh"
-
-#include "mem/cache/base_cache.hh"
-#include "cpu/smt.hh" //maxThreadsPerCPU
-#include "sim/sim_exit.hh"
-
-using namespace std;
-
-void
-BaseTags::setCache(BaseCache *_cache)
-{
- cache = _cache;
- objName = cache->name();
-}
-
-void
-BaseTags::regStats(const string &name)
-{
- using namespace Stats;
- replacements
- .init(maxThreadsPerCPU)
- .name(name + ".replacements")
- .desc("number of replacements")
- .flags(total)
- ;
-
- tagsInUse
- .name(name + ".tagsinuse")
- .desc("Cycle average of tags in use")
- ;
-
- totalRefs
- .name(name + ".total_refs")
- .desc("Total number of references to valid blocks.")
- ;
-
- sampledRefs
- .name(name + ".sampled_refs")
- .desc("Sample count of references to valid blocks.")
- ;
-
- avgRefs
- .name(name + ".avg_refs")
- .desc("Average number of references to valid blocks.")
- ;
-
- avgRefs = totalRefs/sampledRefs;
-
- warmupCycle
- .name(name + ".warmup_cycle")
- .desc("Cycle when the warmup percentage was hit.")
- ;
-
- registerExitCallback(new BaseTagsCallback(this));
-}
+++ /dev/null
-/*
- * Copyright (c) 2003-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- * Ron Dreslinski
- */
-
-/**
- * @file
- * Declaration of a common base class for cache tagstore objects.
- */
-
-#ifndef __BASE_TAGS_HH__
-#define __BASE_TAGS_HH__
-
-#include <string>
-#include "base/statistics.hh"
-#include "base/callback.hh"
-
-class BaseCache;
-
-/**
- * A common base class of Cache tagstore objects.
- */
-class BaseTags
-{
- protected:
- /** Pointer to the parent cache. */
- BaseCache *cache;
-
- /** Local copy of the parent cache name. Used for DPRINTF. */
- std::string objName;
-
- /**
- * The number of tags that need to be touched to meet the warmup
- * percentage.
- */
- int warmupBound;
- /** Marked true when the cache is warmed up. */
- bool warmedUp;
-
- // Statistics
- /**
- * @addtogroup CacheStatistics
- * @{
- */
-
- /** Number of replacements of valid blocks per thread. */
- Stats::Vector<> replacements;
- /** Per cycle average of the number of tags that hold valid data. */
- Stats::Average<> tagsInUse;
-
- /** The total number of references to a block before it is replaced. */
- Stats::Scalar<> totalRefs;
-
- /**
- * The number of reference counts sampled. This is different from
- * replacements because we sample all the valid blocks when the simulator
- * exits.
- */
- Stats::Scalar<> sampledRefs;
-
- /**
- * Average number of references to a block before is was replaced.
- * @todo This should change to an average stat once we have them.
- */
- Stats::Formula avgRefs;
-
- /** The cycle that the warmup percentage was hit. */
- Stats::Scalar<> warmupCycle;
- /**
- * @}
- */
-
- public:
-
- /**
- * Destructor.
- */
- virtual ~BaseTags() {}
-
- /**
- * Set the parent cache back pointer. Also copies the cache name to
- * objName.
- * @param _cache Pointer to parent cache.
- */
- void setCache(BaseCache *_cache);
-
- /**
- * Return the parent cache name.
- * @return the parent cache name.
- */
- const std::string &name() const
- {
- return objName;
- }
-
- /**
- * Register local statistics.
- * @param name The name to preceed each statistic name.
- */
- void regStats(const std::string &name);
-
- /**
- * Average in the reference count for valid blocks when the simulation
- * exits.
- */
- virtual void cleanupRefs() {}
-};
-
-class BaseTagsCallback : public Callback
-{
- BaseTags *tags;
- public:
- BaseTagsCallback(BaseTags *t) : tags(t) {}
- virtual void process() { tags->cleanupRefs(); };
-};
-
-#endif //__BASE_TAGS_HH__
--- /dev/null
+# Copyright (c) 2005-2008 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+from m5.SimObject import SimObject
+from m5.params import *
+class Repl(SimObject):
+ type = 'Repl'
+ abstract = True
+
+class GenRepl(Repl):
+ type = 'GenRepl'
+ fresh_res = Param.Int("Fresh pool residency time")
+ num_pools = Param.Int("Number of priority pools")
+ pool_res = Param.Int("Pool residency time")
--- /dev/null
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ * Steve Reinhardt
+ */
+
+/**
+ * @file
+ * Definitions of the Generational replacement policy.
+ */
+
+#include <string>
+
+#include "base/misc.hh"
+#include "mem/cache/tags/iic.hh"
+#include "mem/cache/tags/repl/gen.hh"
+#include "params/GenRepl.hh"
+#include "sim/host.hh"
+
+using namespace std;
+
+GenRepl::GenRepl(const Params *p) // fix this, should be set by cache
+ : Repl(p), num_pools(p->num_pools), fresh_res(p->fresh_res),
+ pool_res(p->pool_res), num_entries(0), num_pool_entries(0), misses(0),
+ pools(pools = new GenPool[num_pools+1])
+{
+}
+
+GenRepl::~GenRepl()
+{
+ delete [] pools;
+}
+
+unsigned long
+GenRepl::getRepl()
+{
+ unsigned long tmp;
+ GenReplEntry *re;
+ int i;
+ int num_seen = 0;
+ if (!(num_pool_entries>0)) {
+ fatal("No blks available to replace");
+ }
+ num_entries--;
+ num_pool_entries--;
+ for (i = 0; i < num_pools; i++) {
+ while ((re = pools[i].pop())) {
+ num_seen++;
+ // Remove invalidated entries
+ if (!re->valid) {
+ delete re;
+ continue;
+ }
+ if (iic->clearRef(re->tag_ptr)) {
+ pools[(((i+1)== num_pools)? i :i+1)].push(re, misses);
+ }
+ else {
+ tmp = re->tag_ptr;
+ delete re;
+
+ repl_pool.sample(i);
+
+ return tmp;
+ }
+ }
+ }
+ fatal("No replacement found");
+ return 0xffffffff;
+}
+
+unsigned long *
+GenRepl::getNRepl(int n)
+{
+ unsigned long *tmp;
+ GenReplEntry *re;
+ int i;
+ if (!(num_pool_entries>(n-1))) {
+ fatal("Not enough blks available to replace");
+ }
+ num_entries -= n;
+ num_pool_entries -= n;
+ tmp = new unsigned long[n]; /* array of cache_blk pointers */
+ int blk_index = 0;
+ for (i = 0; i < num_pools && blk_index < n; i++) {
+ while (blk_index < n && (re = pools[i].pop())) {
+ // Remove invalidated entries
+ if (!re->valid) {
+ delete re;
+ continue;
+ }
+ if (iic->clearRef(re->tag_ptr)) {
+ pools[(((i+1)== num_pools)? i :i+1)].push(re, misses);
+ }
+ else {
+ tmp[blk_index] = re->tag_ptr;
+ blk_index++;
+ delete re;
+ repl_pool.sample(i);
+ }
+ }
+ }
+ if (blk_index >= n)
+ return tmp;
+ /* search the fresh pool */
+
+ fatal("No N replacements found");
+ return NULL;
+}
+
+void
+GenRepl::doAdvance(std::list<unsigned long> &demoted)
+{
+ int i;
+ int num_seen = 0;
+ GenReplEntry *re;
+ misses++;
+ for (i=0; i<num_pools; i++) {
+ while (misses-pools[i].oldest > pool_res && (re = pools[i].pop())!=NULL) {
+ if (iic->clearRef(re->tag_ptr)) {
+ pools[(((i+1)== num_pools)? i :i+1)].push(re, misses);
+ /** @todo Not really demoted, but use it for now. */
+ demoted.push_back(re->tag_ptr);
+ advance_pool.sample(i);
+ }
+ else {
+ pools[(((i-1)<0)?i:i-1)].push(re, misses);
+ demoted.push_back(re->tag_ptr);
+ demote_pool.sample(i);
+ }
+ }
+ num_seen += pools[i].size;
+ }
+ while (misses-pools[num_pools].oldest > fresh_res
+ && (re = pools[num_pools].pop())!=NULL) {
+ num_pool_entries++;
+ if (iic->clearRef(re->tag_ptr)) {
+ pools[num_pools/2].push(re, misses);
+ /** @todo Not really demoted, but use it for now. */
+ demoted.push_back(re->tag_ptr);
+ advance_pool.sample(num_pools);
+ }
+ else {
+ pools[num_pools/2-1].push(re, misses);
+ demoted.push_back(re->tag_ptr);
+ demote_pool.sample(num_pools);
+ }
+ }
+}
+
+void*
+GenRepl::add(unsigned long tag_index)
+{
+ GenReplEntry *re = new GenReplEntry;
+ re->tag_ptr = tag_index;
+ re->valid = true;
+ pools[num_pools].push(re, misses);
+ num_entries++;
+ return (void*)re;
+}
+
+void
+GenRepl::regStats(const string name)
+{
+ using namespace Stats;
+
+ /** GEN statistics */
+ repl_pool
+ .init(0, 16, 1)
+ .name(name + ".repl_pool_dist")
+ .desc("Dist. of Repl. across pools")
+ .flags(pdf)
+ ;
+
+ advance_pool
+ .init(0, 16, 1)
+ .name(name + ".advance_pool_dist")
+ .desc("Dist. of Repl. across pools")
+ .flags(pdf)
+ ;
+
+ demote_pool
+ .init(0, 16, 1)
+ .name(name + ".demote_pool_dist")
+ .desc("Dist. of Repl. across pools")
+ .flags(pdf)
+ ;
+}
+
+int
+GenRepl::fixTag(void* _re, unsigned long old_index, unsigned long new_index)
+{
+ GenReplEntry *re = (GenReplEntry*)_re;
+ assert(re->valid);
+ if (re->tag_ptr == old_index) {
+ re->tag_ptr = new_index;
+ return 1;
+ }
+ fatal("Repl entry: tag ptrs do not match");
+ return 0;
+}
+
+bool
+GenRepl::findTagPtr(unsigned long index)
+{
+ for (int i = 0; i < num_pools + 1; ++i) {
+ list<GenReplEntry*>::const_iterator iter = pools[i].entries.begin();
+ list<GenReplEntry*>::const_iterator end = pools[i].entries.end();
+ for (; iter != end; ++iter) {
+ if ((*iter)->valid && (*iter)->tag_ptr == index) {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+GenRepl *
+GenReplParams::create()
+{
+ return new GenRepl(this);
+}
--- /dev/null
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declarations of generational replacement policy
+ */
+
+#ifndef ___GEN_HH__
+#define __GEN_HH__
+
+#include <list>
+
+#include "base/statistics.hh"
+#include "mem/cache/tags/repl/repl.hh"
+#include "params/GenRepl.hh"
+
+/**
+ * Generational Replacement entry.
+ */
+class GenReplEntry
+{
+ public:
+ /** Valid flag, used to quickly invalidate bogus entries. */
+ bool valid;
+ /** The difference between this entry and the previous in the pool. */
+ int delta;
+ /** Pointer to the corresponding tag in the IIC. */
+ unsigned long tag_ptr;
+};
+
+/**
+ * Generational replacement pool
+ */
+class GenPool
+{
+ public:
+ /** The time the last entry was added. */
+ Tick newest;
+ /** The time the oldest entry was added. */
+ Tick oldest;
+ /** List of the replacement entries in this pool. */
+ std::list<GenReplEntry*> entries;
+
+ /** The number of entries in this pool. */
+ int size;
+
+ /**
+ * Simple constructor.
+ */
+ GenPool() {
+ newest = 0;
+ oldest = 0;
+ size = 0;
+ }
+
+ /**
+ * Add an entry to this pool.
+ * @param re The entry to add.
+ * @param now The current time.
+ */
+ void push(GenReplEntry *re, Tick now) {
+ ++size;
+ if (!entries.empty()) {
+ re->delta = now - newest;
+ newest = now;
+ } else {
+ re->delta = 0;
+ newest = oldest = now;
+ }
+ entries.push_back(re);
+ }
+
+ /**
+ * Remove an entry from the pool.
+ * @return The entry at the front of the list.
+ */
+ GenReplEntry* pop() {
+ GenReplEntry *tmp = NULL;
+ if (!entries.empty()) {
+ --size;
+ tmp = entries.front();
+ entries.pop_front();
+ oldest += tmp->delta;
+ }
+ return tmp;
+ }
+
+ /**
+ * Return the entry at the front of the list.
+ * @return the entry at the front of the list.
+ */
+ GenReplEntry* top() {
+ return entries.front();
+ }
+
+ /**
+ * Destructor.
+ */
+ ~GenPool() {
+ while (!entries.empty()) {
+ GenReplEntry *tmp = entries.front();
+ entries.pop_front();
+ delete tmp;
+ }
+ }
+};
+
+/**
+ * Generational replacement policy for use with the IIC.
+ * @todo update to use STL and for efficiency
+ */
+class GenRepl : public Repl
+{
+ public:
+ /** The number of pools. */
+ int num_pools;
+ /** The amount of time to stay in the fresh pool. */
+ int fresh_res;
+ /** The amount of time to stay in the normal pools. */
+ int pool_res;
+ /** The maximum number of entries */
+ int num_entries;
+ /** The number of entries currently in the pools. */
+ int num_pool_entries;
+ /** The number of misses. Used as the internal time. */
+ Tick misses;
+ /** The array of pools. */
+ GenPool *pools;
+
+ // Statistics
+
+ /**
+ * @addtogroup CacheStatistics
+ * @{
+ */
+ /** The number of replacements from each pool. */
+ Stats::Distribution<> repl_pool;
+ /** The number of advances out of each pool. */
+ Stats::Distribution<> advance_pool;
+ /** The number of demotions from each pool. */
+ Stats::Distribution<> demote_pool;
+ /**
+ * @}
+ */
+
+ typedef GenReplParams Params;
+ GenRepl(const Params *p);
+
+ /**
+ * Destructor.
+ */
+ ~GenRepl();
+
+ /**
+ * Returns the tag pointer of the cache block to replace.
+ * @return The tag to replace.
+ */
+ virtual unsigned long getRepl();
+
+ /**
+ * Return an array of N tag pointers to replace.
+ * @param n The number of tag pointer to return.
+ * @return An array of tag pointers to replace.
+ */
+ virtual unsigned long *getNRepl(int n);
+
+ /**
+ * Update replacement data
+ */
+ virtual void doAdvance(std::list<unsigned long> &demoted);
+
+ /**
+ * Add a tag to the replacement policy and return a pointer to the
+ * replacement entry.
+ * @param tag_index The tag to add.
+ * @return The replacement entry.
+ */
+ virtual void* add(unsigned long tag_index);
+
+ /**
+ * Register statistics.
+ * @param name The name to prepend to statistic descriptions.
+ */
+ virtual void regStats(const std::string name);
+
+ /**
+ * Update the tag pointer to when the tag moves.
+ * @param re The replacement entry of the tag.
+ * @param old_index The old tag pointer.
+ * @param new_index The new tag pointer.
+ * @return 1 if successful, 0 otherwise.
+ */
+ virtual int fixTag(void *re, unsigned long old_index,
+ unsigned long new_index);
+
+ /**
+ * Remove this entry from the replacement policy.
+ * @param re The replacement entry to remove
+ */
+ virtual void removeEntry(void *re)
+ {
+ ((GenReplEntry*)re)->valid = false;
+ }
+
+ protected:
+ /**
+ * Debug function to verify that there is only one repl entry per tag.
+ * @param index The tag index to check.
+ */
+ bool findTagPtr(unsigned long index);
+};
+
+#endif /* __GEN_HH__ */
--- /dev/null
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ * Steve Reinhardt
+ * Nathan Binkert
+ */
+
+/**
+ * @file
+ * Declaration of a base replacement policy class.
+ */
+
+#ifndef __REPL_HH__
+#define __REPL_HH__
+
+#include <string>
+#include <list>
+
+#include "cpu/smt.hh"
+#include "sim/host.hh"
+#include "sim/sim_object.hh"
+
+
+class IIC;
+
+/**
+ * A pure virtual base class that defines the interface of a replacement
+ * policy.
+ */
+class Repl : public SimObject
+{
+ public:
+ /** Pointer to the IIC using this policy. */
+ IIC *iic;
+
+ Repl (const Params *params)
+ : SimObject(params)
+ {
+ iic = NULL;
+ }
+
+ /**
+ * Set the back pointer to the IIC.
+ * @param iic_ptr Pointer to the IIC.
+ */
+ void setIIC(IIC *iic_ptr)
+ {
+ iic = iic_ptr;
+ }
+
+ /**
+ * Returns the tag pointer of the cache block to replace.
+ * @return The tag to replace.
+ */
+ virtual unsigned long getRepl() = 0;
+
+ /**
+ * Return an array of N tag pointers to replace.
+ * @param n The number of tag pointer to return.
+ * @return An array of tag pointers to replace.
+ */
+ virtual unsigned long *getNRepl(int n) = 0;
+
+ /**
+ * Update replacement data
+ */
+ virtual void doAdvance(std::list<unsigned long> &demoted) = 0;
+
+ /**
+ * Add a tag to the replacement policy and return a pointer to the
+ * replacement entry.
+ * @param tag_index The tag to add.
+ * @return The replacement entry.
+ */
+ virtual void* add(unsigned long tag_index) = 0;
+
+ /**
+ * Register statistics.
+ * @param name The name to prepend to statistic descriptions.
+ */
+ virtual void regStats(const std::string name) = 0;
+
+ /**
+ * Update the tag pointer to when the tag moves.
+ * @param re The replacement entry of the tag.
+ * @param old_index The old tag pointer.
+ * @param new_index The new tag pointer.
+ * @return 1 if successful, 0 otherwise.
+ */
+ virtual int fixTag(void *re, unsigned long old_index,
+ unsigned long new_index) = 0;
+
+ /**
+ * Remove this entry from the replacement policy.
+ * @param re The replacement entry to remove
+ */
+ virtual void removeEntry(void *re) = 0;
+};
+
+#endif /* SMT_REPL_HH */
+++ /dev/null
-/*
- * Copyright (c) 2002-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- * Steve Reinhardt
- */
-
-/**
- * @file
- * Definitions of the Generational replacement policy.
- */
-
-#include <string>
-
-#include "base/misc.hh"
-#include "mem/cache/tags/iic.hh"
-#include "mem/cache/tags/repl/gen.hh"
-#include "params/GenRepl.hh"
-#include "sim/host.hh"
-
-using namespace std;
-
-GenRepl::GenRepl(const Params *p) // fix this, should be set by cache
- : Repl(p), num_pools(p->num_pools), fresh_res(p->fresh_res),
- pool_res(p->pool_res), num_entries(0), num_pool_entries(0), misses(0),
- pools(pools = new GenPool[num_pools+1])
-{
-}
-
-GenRepl::~GenRepl()
-{
- delete [] pools;
-}
-
-unsigned long
-GenRepl::getRepl()
-{
- unsigned long tmp;
- GenReplEntry *re;
- int i;
- int num_seen = 0;
- if (!(num_pool_entries>0)) {
- fatal("No blks available to replace");
- }
- num_entries--;
- num_pool_entries--;
- for (i = 0; i < num_pools; i++) {
- while ((re = pools[i].pop())) {
- num_seen++;
- // Remove invalidated entries
- if (!re->valid) {
- delete re;
- continue;
- }
- if (iic->clearRef(re->tag_ptr)) {
- pools[(((i+1)== num_pools)? i :i+1)].push(re, misses);
- }
- else {
- tmp = re->tag_ptr;
- delete re;
-
- repl_pool.sample(i);
-
- return tmp;
- }
- }
- }
- fatal("No replacement found");
- return 0xffffffff;
-}
-
-unsigned long *
-GenRepl::getNRepl(int n)
-{
- unsigned long *tmp;
- GenReplEntry *re;
- int i;
- if (!(num_pool_entries>(n-1))) {
- fatal("Not enough blks available to replace");
- }
- num_entries -= n;
- num_pool_entries -= n;
- tmp = new unsigned long[n]; /* array of cache_blk pointers */
- int blk_index = 0;
- for (i = 0; i < num_pools && blk_index < n; i++) {
- while (blk_index < n && (re = pools[i].pop())) {
- // Remove invalidated entries
- if (!re->valid) {
- delete re;
- continue;
- }
- if (iic->clearRef(re->tag_ptr)) {
- pools[(((i+1)== num_pools)? i :i+1)].push(re, misses);
- }
- else {
- tmp[blk_index] = re->tag_ptr;
- blk_index++;
- delete re;
- repl_pool.sample(i);
- }
- }
- }
- if (blk_index >= n)
- return tmp;
- /* search the fresh pool */
-
- fatal("No N replacements found");
- return NULL;
-}
-
-void
-GenRepl::doAdvance(std::list<unsigned long> &demoted)
-{
- int i;
- int num_seen = 0;
- GenReplEntry *re;
- misses++;
- for (i=0; i<num_pools; i++) {
- while (misses-pools[i].oldest > pool_res && (re = pools[i].pop())!=NULL) {
- if (iic->clearRef(re->tag_ptr)) {
- pools[(((i+1)== num_pools)? i :i+1)].push(re, misses);
- /** @todo Not really demoted, but use it for now. */
- demoted.push_back(re->tag_ptr);
- advance_pool.sample(i);
- }
- else {
- pools[(((i-1)<0)?i:i-1)].push(re, misses);
- demoted.push_back(re->tag_ptr);
- demote_pool.sample(i);
- }
- }
- num_seen += pools[i].size;
- }
- while (misses-pools[num_pools].oldest > fresh_res
- && (re = pools[num_pools].pop())!=NULL) {
- num_pool_entries++;
- if (iic->clearRef(re->tag_ptr)) {
- pools[num_pools/2].push(re, misses);
- /** @todo Not really demoted, but use it for now. */
- demoted.push_back(re->tag_ptr);
- advance_pool.sample(num_pools);
- }
- else {
- pools[num_pools/2-1].push(re, misses);
- demoted.push_back(re->tag_ptr);
- demote_pool.sample(num_pools);
- }
- }
-}
-
-void*
-GenRepl::add(unsigned long tag_index)
-{
- GenReplEntry *re = new GenReplEntry;
- re->tag_ptr = tag_index;
- re->valid = true;
- pools[num_pools].push(re, misses);
- num_entries++;
- return (void*)re;
-}
-
-void
-GenRepl::regStats(const string name)
-{
- using namespace Stats;
-
- /** GEN statistics */
- repl_pool
- .init(0, 16, 1)
- .name(name + ".repl_pool_dist")
- .desc("Dist. of Repl. across pools")
- .flags(pdf)
- ;
-
- advance_pool
- .init(0, 16, 1)
- .name(name + ".advance_pool_dist")
- .desc("Dist. of Repl. across pools")
- .flags(pdf)
- ;
-
- demote_pool
- .init(0, 16, 1)
- .name(name + ".demote_pool_dist")
- .desc("Dist. of Repl. across pools")
- .flags(pdf)
- ;
-}
-
-int
-GenRepl::fixTag(void* _re, unsigned long old_index, unsigned long new_index)
-{
- GenReplEntry *re = (GenReplEntry*)_re;
- assert(re->valid);
- if (re->tag_ptr == old_index) {
- re->tag_ptr = new_index;
- return 1;
- }
- fatal("Repl entry: tag ptrs do not match");
- return 0;
-}
-
-bool
-GenRepl::findTagPtr(unsigned long index)
-{
- for (int i = 0; i < num_pools + 1; ++i) {
- list<GenReplEntry*>::const_iterator iter = pools[i].entries.begin();
- list<GenReplEntry*>::const_iterator end = pools[i].entries.end();
- for (; iter != end; ++iter) {
- if ((*iter)->valid && (*iter)->tag_ptr == index) {
- return true;
- }
- }
- }
- return false;
-}
-
-GenRepl *
-GenReplParams::create()
-{
- return new GenRepl(this);
-}
+++ /dev/null
-/*
- * Copyright (c) 2002-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- */
-
-/**
- * @file
- * Declarations of generational replacement policy
- */
-
-#ifndef ___GEN_HH__
-#define __GEN_HH__
-
-#include <list>
-
-#include "base/statistics.hh"
-#include "mem/cache/tags/repl/repl.hh"
-#include "params/GenRepl.hh"
-
-/**
- * Generational Replacement entry.
- */
-class GenReplEntry
-{
- public:
- /** Valid flag, used to quickly invalidate bogus entries. */
- bool valid;
- /** The difference between this entry and the previous in the pool. */
- int delta;
- /** Pointer to the corresponding tag in the IIC. */
- unsigned long tag_ptr;
-};
-
-/**
- * Generational replacement pool
- */
-class GenPool
-{
- public:
- /** The time the last entry was added. */
- Tick newest;
- /** The time the oldest entry was added. */
- Tick oldest;
- /** List of the replacement entries in this pool. */
- std::list<GenReplEntry*> entries;
-
- /** The number of entries in this pool. */
- int size;
-
- /**
- * Simple constructor.
- */
- GenPool() {
- newest = 0;
- oldest = 0;
- size = 0;
- }
-
- /**
- * Add an entry to this pool.
- * @param re The entry to add.
- * @param now The current time.
- */
- void push(GenReplEntry *re, Tick now) {
- ++size;
- if (!entries.empty()) {
- re->delta = now - newest;
- newest = now;
- } else {
- re->delta = 0;
- newest = oldest = now;
- }
- entries.push_back(re);
- }
-
- /**
- * Remove an entry from the pool.
- * @return The entry at the front of the list.
- */
- GenReplEntry* pop() {
- GenReplEntry *tmp = NULL;
- if (!entries.empty()) {
- --size;
- tmp = entries.front();
- entries.pop_front();
- oldest += tmp->delta;
- }
- return tmp;
- }
-
- /**
- * Return the entry at the front of the list.
- * @return the entry at the front of the list.
- */
- GenReplEntry* top() {
- return entries.front();
- }
-
- /**
- * Destructor.
- */
- ~GenPool() {
- while (!entries.empty()) {
- GenReplEntry *tmp = entries.front();
- entries.pop_front();
- delete tmp;
- }
- }
-};
-
-/**
- * Generational replacement policy for use with the IIC.
- * @todo update to use STL and for efficiency
- */
-class GenRepl : public Repl
-{
- public:
- /** The number of pools. */
- int num_pools;
- /** The amount of time to stay in the fresh pool. */
- int fresh_res;
- /** The amount of time to stay in the normal pools. */
- int pool_res;
- /** The maximum number of entries */
- int num_entries;
- /** The number of entries currently in the pools. */
- int num_pool_entries;
- /** The number of misses. Used as the internal time. */
- Tick misses;
- /** The array of pools. */
- GenPool *pools;
-
- // Statistics
-
- /**
- * @addtogroup CacheStatistics
- * @{
- */
- /** The number of replacements from each pool. */
- Stats::Distribution<> repl_pool;
- /** The number of advances out of each pool. */
- Stats::Distribution<> advance_pool;
- /** The number of demotions from each pool. */
- Stats::Distribution<> demote_pool;
- /**
- * @}
- */
-
- typedef GenReplParams Params;
- GenRepl(const Params *p);
-
- /**
- * Destructor.
- */
- ~GenRepl();
-
- /**
- * Returns the tag pointer of the cache block to replace.
- * @return The tag to replace.
- */
- virtual unsigned long getRepl();
-
- /**
- * Return an array of N tag pointers to replace.
- * @param n The number of tag pointer to return.
- * @return An array of tag pointers to replace.
- */
- virtual unsigned long *getNRepl(int n);
-
- /**
- * Update replacement data
- */
- virtual void doAdvance(std::list<unsigned long> &demoted);
-
- /**
- * Add a tag to the replacement policy and return a pointer to the
- * replacement entry.
- * @param tag_index The tag to add.
- * @return The replacement entry.
- */
- virtual void* add(unsigned long tag_index);
-
- /**
- * Register statistics.
- * @param name The name to prepend to statistic descriptions.
- */
- virtual void regStats(const std::string name);
-
- /**
- * Update the tag pointer to when the tag moves.
- * @param re The replacement entry of the tag.
- * @param old_index The old tag pointer.
- * @param new_index The new tag pointer.
- * @return 1 if successful, 0 otherwise.
- */
- virtual int fixTag(void *re, unsigned long old_index,
- unsigned long new_index);
-
- /**
- * Remove this entry from the replacement policy.
- * @param re The replacement entry to remove
- */
- virtual void removeEntry(void *re)
- {
- ((GenReplEntry*)re)->valid = false;
- }
-
- protected:
- /**
- * Debug function to verify that there is only one repl entry per tag.
- * @param index The tag index to check.
- */
- bool findTagPtr(unsigned long index);
-};
-
-#endif /* __GEN_HH__ */
+++ /dev/null
-/*
- * Copyright (c) 2002-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- * Steve Reinhardt
- * Nathan Binkert
- */
-
-/**
- * @file
- * Declaration of a base replacement policy class.
- */
-
-#ifndef __REPL_HH__
-#define __REPL_HH__
-
-#include <string>
-#include <list>
-
-#include "cpu/smt.hh"
-#include "sim/host.hh"
-#include "sim/sim_object.hh"
-
-
-class IIC;
-
-/**
- * A pure virtual base class that defines the interface of a replacement
- * policy.
- */
-class Repl : public SimObject
-{
- public:
- /** Pointer to the IIC using this policy. */
- IIC *iic;
-
- Repl (const Params *params)
- : SimObject(params)
- {
- iic = NULL;
- }
-
- /**
- * Set the back pointer to the IIC.
- * @param iic_ptr Pointer to the IIC.
- */
- void setIIC(IIC *iic_ptr)
- {
- iic = iic_ptr;
- }
-
- /**
- * Returns the tag pointer of the cache block to replace.
- * @return The tag to replace.
- */
- virtual unsigned long getRepl() = 0;
-
- /**
- * Return an array of N tag pointers to replace.
- * @param n The number of tag pointer to return.
- * @return An array of tag pointers to replace.
- */
- virtual unsigned long *getNRepl(int n) = 0;
-
- /**
- * Update replacement data
- */
- virtual void doAdvance(std::list<unsigned long> &demoted) = 0;
-
- /**
- * Add a tag to the replacement policy and return a pointer to the
- * replacement entry.
- * @param tag_index The tag to add.
- * @return The replacement entry.
- */
- virtual void* add(unsigned long tag_index) = 0;
-
- /**
- * Register statistics.
- * @param name The name to prepend to statistic descriptions.
- */
- virtual void regStats(const std::string name) = 0;
-
- /**
- * Update the tag pointer to when the tag moves.
- * @param re The replacement entry of the tag.
- * @param old_index The old tag pointer.
- * @param new_index The new tag pointer.
- * @return 1 if successful, 0 otherwise.
- */
- virtual int fixTag(void *re, unsigned long old_index,
- unsigned long new_index) = 0;
-
- /**
- * Remove this entry from the replacement policy.
- * @param re The replacement entry to remove
- */
- virtual void removeEntry(void *re) = 0;
-};
-
-#endif /* SMT_REPL_HH */