Was having difficulty with merging the cache, reverted to an early version and will...
authorRon Dreslinski <rdreslin@umich.edu>
Wed, 28 Jun 2006 15:02:14 +0000 (11:02 -0400)
committerRon Dreslinski <rdreslin@umich.edu>
Wed, 28 Jun 2006 15:02:14 +0000 (11:02 -0400)
src/mem/cache/prefetch/tagged_prefetcher_impl.hh:
    Trying to merge
src/mem/cache/base_cache.cc:
src/mem/cache/base_cache.hh:
src/mem/cache/cache.cc:
src/mem/cache/cache.hh:
src/mem/cache/cache_blk.hh:
src/mem/cache/cache_builder.cc:
src/mem/cache/cache_impl.hh:
src/mem/cache/coherence/coherence_protocol.cc:
src/mem/cache/coherence/coherence_protocol.hh:
src/mem/cache/coherence/simple_coherence.hh:
src/mem/cache/coherence/uni_coherence.cc:
src/mem/cache/coherence/uni_coherence.hh:
src/mem/cache/miss/blocking_buffer.cc:
src/mem/cache/miss/blocking_buffer.hh:
src/mem/cache/miss/miss_queue.cc:
src/mem/cache/miss/miss_queue.hh:
src/mem/cache/miss/mshr.cc:
src/mem/cache/miss/mshr.hh:
src/mem/cache/miss/mshr_queue.cc:
src/mem/cache/miss/mshr_queue.hh:
src/mem/cache/prefetch/base_prefetcher.cc:
src/mem/cache/prefetch/base_prefetcher.hh:
src/mem/cache/prefetch/ghb_prefetcher.cc:
src/mem/cache/prefetch/ghb_prefetcher.hh:
src/mem/cache/prefetch/stride_prefetcher.cc:
src/mem/cache/prefetch/stride_prefetcher.hh:
src/mem/cache/prefetch/tagged_prefetcher.hh:
src/mem/cache/tags/base_tags.cc:
src/mem/cache/tags/base_tags.hh:
src/mem/cache/tags/fa_lru.cc:
src/mem/cache/tags/fa_lru.hh:
src/mem/cache/tags/iic.cc:
src/mem/cache/tags/iic.hh:
src/mem/cache/tags/lru.cc:
src/mem/cache/tags/lru.hh:
src/mem/cache/tags/repl/gen.cc:
src/mem/cache/tags/repl/gen.hh:
src/mem/cache/tags/repl/repl.cc:
src/mem/cache/tags/repl/repl.hh:
src/mem/cache/tags/split.cc:
src/mem/cache/tags/split.hh:
src/mem/cache/tags/split_blk.hh:
src/mem/cache/tags/split_lifo.cc:
src/mem/cache/tags/split_lifo.hh:
src/mem/cache/tags/split_lru.cc:
src/mem/cache/tags/split_lru.hh:
    Pulling an early version of the cache into the tree due to merging issues.  Will apply patches and push.

--HG--
extra : convert_revision : 3276e5fb9a6272681a1690babf2b586dd0e1f380

47 files changed:
src/mem/cache/base_cache.cc [new file with mode: 0644]
src/mem/cache/base_cache.hh [new file with mode: 0644]
src/mem/cache/cache.cc [new file with mode: 0644]
src/mem/cache/cache.hh [new file with mode: 0644]
src/mem/cache/cache_blk.hh [new file with mode: 0644]
src/mem/cache/cache_builder.cc [new file with mode: 0644]
src/mem/cache/cache_impl.hh [new file with mode: 0644]
src/mem/cache/coherence/coherence_protocol.cc [new file with mode: 0644]
src/mem/cache/coherence/coherence_protocol.hh [new file with mode: 0644]
src/mem/cache/coherence/simple_coherence.hh [new file with mode: 0644]
src/mem/cache/coherence/uni_coherence.cc [new file with mode: 0644]
src/mem/cache/coherence/uni_coherence.hh [new file with mode: 0644]
src/mem/cache/miss/blocking_buffer.cc [new file with mode: 0644]
src/mem/cache/miss/blocking_buffer.hh [new file with mode: 0644]
src/mem/cache/miss/miss_queue.cc [new file with mode: 0644]
src/mem/cache/miss/miss_queue.hh [new file with mode: 0644]
src/mem/cache/miss/mshr.cc [new file with mode: 0644]
src/mem/cache/miss/mshr.hh [new file with mode: 0644]
src/mem/cache/miss/mshr_queue.cc [new file with mode: 0644]
src/mem/cache/miss/mshr_queue.hh [new file with mode: 0644]
src/mem/cache/prefetch/base_prefetcher.cc [new file with mode: 0644]
src/mem/cache/prefetch/base_prefetcher.hh [new file with mode: 0644]
src/mem/cache/prefetch/ghb_prefetcher.cc [new file with mode: 0644]
src/mem/cache/prefetch/ghb_prefetcher.hh [new file with mode: 0644]
src/mem/cache/prefetch/stride_prefetcher.cc [new file with mode: 0644]
src/mem/cache/prefetch/stride_prefetcher.hh [new file with mode: 0644]
src/mem/cache/prefetch/tagged_prefetcher.hh [new file with mode: 0644]
src/mem/cache/prefetch/tagged_prefetcher_impl.hh
src/mem/cache/tags/base_tags.cc [new file with mode: 0644]
src/mem/cache/tags/base_tags.hh [new file with mode: 0644]
src/mem/cache/tags/fa_lru.cc [new file with mode: 0644]
src/mem/cache/tags/fa_lru.hh [new file with mode: 0644]
src/mem/cache/tags/iic.cc [new file with mode: 0644]
src/mem/cache/tags/iic.hh [new file with mode: 0644]
src/mem/cache/tags/lru.cc [new file with mode: 0644]
src/mem/cache/tags/lru.hh [new file with mode: 0644]
src/mem/cache/tags/repl/gen.cc [new file with mode: 0644]
src/mem/cache/tags/repl/gen.hh [new file with mode: 0644]
src/mem/cache/tags/repl/repl.cc [new file with mode: 0644]
src/mem/cache/tags/repl/repl.hh [new file with mode: 0644]
src/mem/cache/tags/split.cc [new file with mode: 0644]
src/mem/cache/tags/split.hh [new file with mode: 0644]
src/mem/cache/tags/split_blk.hh [new file with mode: 0644]
src/mem/cache/tags/split_lifo.cc [new file with mode: 0644]
src/mem/cache/tags/split_lifo.hh [new file with mode: 0644]
src/mem/cache/tags/split_lru.cc [new file with mode: 0644]
src/mem/cache/tags/split_lru.hh [new file with mode: 0644]

diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc
new file mode 100644 (file)
index 0000000..10a49ed
--- /dev/null
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Definition of BaseCache functions.
+ */
+
+#include "mem/cache/base_cache.hh"
+#include "cpu/smt.hh"
+#include "cpu/base.hh"
+
+using namespace std;
+
+BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
+                                bool _isCpuSide)
+    : Port(_name), cache(_cache), isCpuSide(_isCpuSide)
+{
+    blocked = false;
+    //Start ports at null if more than one is created we should panic
+    cpuSidePort = NULL;
+    memSidePort = NULL;
+}
+
+bool
+BaseCache::CachePort::recvStatusChange(Port::Status status)
+{
+    cache->recvStatusChange(status, isCpuSide);
+}
+
+void
+BaseCache::CachePort::getDeviceAddressRanges(AddrRangeList &resp,
+                                       AddrRangeList &snoop)
+{
+    cache->getAddressRanges(resp, snoop);
+}
+
+int
+BaseCache::CachePort::deviceBlockSize()
+{
+    return cache->getBlockSize();
+}
+
+bool
+BaseCache::CachePort::recvTiming(Packet *pkt)
+{
+    return cache->doTimingAccess(pkt, this, isCpuSide);
+}
+
+Tick
+BaseCache::CachePort::recvAtomic(Packet *pkt)
+{
+    return cache->doAtomicAccess(pkt, isCpuSide);
+}
+
+void
+BaseCache::CachePort::recvFunctional(Packet *pkt)
+{
+    cache->doFunctionalAccess(pkt, isCpuSide);
+}
+
+void
+BaseCache::CachePort::setBlocked()
+{
+    blocked = true;
+}
+
+void
+BaseCache::CachePort::clearBlocked()
+{
+    blocked = false;
+}
+
+Port*
+BaseCache::getPort(const std::string &if_name)
+{
+    if(if_name == "cpu_side")
+    {
+        if(cpuSidePort != NULL)
+            panic("Already have a cpu side for this cache\n");
+        cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
+        return cpuSidePort;
+    }
+    else if(if_name == "mem_side")
+    {
+        if(memSidePort != NULL)
+            panic("Already have a mem side for this cache\n");
+        memSidePort = new CachePort(name() + "-mem_side_port", this, false);
+        return memSidePort;
+    }
+    else panic("Port name %s unrecognized\n", if_name);
+}
+
+void
+BaseCache::regStats()
+{
+    using namespace Stats;
+
+    // Hit statistics
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        hits[access_idx]
+            .init(maxThreadsPerCPU)
+            .name(name() + "." + cstr + "_hits")
+            .desc("number of " + cstr + " hits")
+            .flags(total | nozero | nonan)
+            ;
+    }
+
+    demandHits
+        .name(name() + ".demand_hits")
+        .desc("number of demand (read+write) hits")
+        .flags(total)
+        ;
+    demandHits = hits[Read] + hits[Write];
+
+    overallHits
+        .name(name() + ".overall_hits")
+        .desc("number of overall hits")
+        .flags(total)
+        ;
+    overallHits = demandHits + hits[Soft_Prefetch] + hits[Hard_Prefetch]
+        + hits[Writeback];
+
+    // Miss statistics
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        misses[access_idx]
+            .init(maxThreadsPerCPU)
+            .name(name() + "." + cstr + "_misses")
+            .desc("number of " + cstr + " misses")
+            .flags(total | nozero | nonan)
+            ;
+    }
+
+    demandMisses
+        .name(name() + ".demand_misses")
+        .desc("number of demand (read+write) misses")
+        .flags(total)
+        ;
+    demandMisses = misses[Read] + misses[Write];
+
+    overallMisses
+        .name(name() + ".overall_misses")
+        .desc("number of overall misses")
+        .flags(total)
+        ;
+    overallMisses = demandMisses + misses[Soft_Prefetch] +
+        misses[Hard_Prefetch] + misses[Writeback];
+
+    // Miss latency statistics
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        missLatency[access_idx]
+            .init(maxThreadsPerCPU)
+            .name(name() + "." + cstr + "_miss_latency")
+            .desc("number of " + cstr + " miss cycles")
+            .flags(total | nozero | nonan)
+            ;
+    }
+
+    demandMissLatency
+        .name(name() + ".demand_miss_latency")
+        .desc("number of demand (read+write) miss cycles")
+        .flags(total)
+        ;
+    demandMissLatency = missLatency[Read] + missLatency[Write];
+
+    overallMissLatency
+        .name(name() + ".overall_miss_latency")
+        .desc("number of overall miss cycles")
+        .flags(total)
+        ;
+    overallMissLatency = demandMissLatency + missLatency[Soft_Prefetch] +
+        missLatency[Hard_Prefetch];
+
+    // access formulas
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        accesses[access_idx]
+            .name(name() + "." + cstr + "_accesses")
+            .desc("number of " + cstr + " accesses(hits+misses)")
+            .flags(total | nozero | nonan)
+            ;
+
+        accesses[access_idx] = hits[access_idx] + misses[access_idx];
+    }
+
+    demandAccesses
+        .name(name() + ".demand_accesses")
+        .desc("number of demand (read+write) accesses")
+        .flags(total)
+        ;
+    demandAccesses = demandHits + demandMisses;
+
+    overallAccesses
+        .name(name() + ".overall_accesses")
+        .desc("number of overall (read+write) accesses")
+        .flags(total)
+        ;
+    overallAccesses = overallHits + overallMisses;
+
+    // miss rate formulas
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        missRate[access_idx]
+            .name(name() + "." + cstr + "_miss_rate")
+            .desc("miss rate for " + cstr + " accesses")
+            .flags(total | nozero | nonan)
+            ;
+
+        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
+    }
+
+    demandMissRate
+        .name(name() + ".demand_miss_rate")
+        .desc("miss rate for demand accesses")
+        .flags(total)
+        ;
+    demandMissRate = demandMisses / demandAccesses;
+
+    overallMissRate
+        .name(name() + ".overall_miss_rate")
+        .desc("miss rate for overall accesses")
+        .flags(total)
+        ;
+    overallMissRate = overallMisses / overallAccesses;
+
+    // miss latency formulas
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        avgMissLatency[access_idx]
+            .name(name() + "." + cstr + "_avg_miss_latency")
+            .desc("average " + cstr + " miss latency")
+            .flags(total | nozero | nonan)
+            ;
+
+        avgMissLatency[access_idx] =
+            missLatency[access_idx] / misses[access_idx];
+    }
+
+    demandAvgMissLatency
+        .name(name() + ".demand_avg_miss_latency")
+        .desc("average overall miss latency")
+        .flags(total)
+        ;
+    demandAvgMissLatency = demandMissLatency / demandMisses;
+
+    overallAvgMissLatency
+        .name(name() + ".overall_avg_miss_latency")
+        .desc("average overall miss latency")
+        .flags(total)
+        ;
+    overallAvgMissLatency = overallMissLatency / overallMisses;
+
+    blocked_cycles.init(NUM_BLOCKED_CAUSES);
+    blocked_cycles
+        .name(name() + ".blocked_cycles")
+        .desc("number of cycles access was blocked")
+        .subname(Blocked_NoMSHRs, "no_mshrs")
+        .subname(Blocked_NoTargets, "no_targets")
+        ;
+
+
+    blocked_causes.init(NUM_BLOCKED_CAUSES);
+    blocked_causes
+        .name(name() + ".blocked")
+        .desc("number of cycles access was blocked")
+        .subname(Blocked_NoMSHRs, "no_mshrs")
+        .subname(Blocked_NoTargets, "no_targets")
+        ;
+
+    avg_blocked
+        .name(name() + ".avg_blocked_cycles")
+        .desc("average number of cycles each access was blocked")
+        .subname(Blocked_NoMSHRs, "no_mshrs")
+        .subname(Blocked_NoTargets, "no_targets")
+        ;
+
+    avg_blocked = blocked_cycles / blocked_causes;
+
+    fastWrites
+        .name(name() + ".fast_writes")
+        .desc("number of fast writes performed")
+        ;
+
+    cacheCopies
+        .name(name() + ".cache_copies")
+        .desc("number of cache copies performed")
+        ;
+}
diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh
new file mode 100644 (file)
index 0000000..0170b02
--- /dev/null
@@ -0,0 +1,480 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declares a basic cache interface BaseCache.
+ */
+
+#ifndef __BASE_CACHE_HH__
+#define __BASE_CACHE_HH__
+
+#include <vector>
+#include <string>
+#include <list>
+#include <inttypes.h>
+
+#include "base/statistics.hh"
+#include "base/trace.hh"
+#include "mem/mem_object.hh"
+#include "mem/packet.hh"
+#include "mem/port.hh"
+#include "mem/request.hh"
+
+/**
+ * Reasons for Caches to be Blocked.
+ */
+enum BlockedCause{
+    Blocked_NoMSHRs,
+    Blocked_NoTargets,
+    Blocked_NoWBBuffers,
+    Blocked_Coherence,
+    Blocked_Copy,
+    NUM_BLOCKED_CAUSES
+};
+
+/**
+ * Reasons for cache to request a bus.
+ */
+enum RequestCause{
+    Request_MSHR,
+    Request_WB,
+    Request_Coherence,
+    Request_PF
+};
+
+/**
+ * A basic cache interface. Implements some common functions for speed.
+ */
+class BaseCache : public MemObject
+{
+    class CachePort : public Port
+    {
+        BaseCache *cache;
+
+      public:
+        CachePort(const std::string &_name, BaseCache *_cache, bool _isCpuSide);
+
+      private:
+        virtual bool recvTiming(Packet *pkt);
+
+        virtual Tick recvAtomic(Packet *pkt);
+
+        virtual void recvFunctional(Packet *pkt);
+
+        virtual void recvStatusChange(Status status);
+
+        virtual void getDeviceAddressRanges(AddrRangeList &resp,
+                                            AddrRangeList &snoop);
+
+        virtual int deviceBlockSize();
+
+        void setBlocked();
+
+        void clearBlocked();
+
+        bool blocked;
+
+        bool isCpuSide;
+    };
+
+    struct CacheEvent : public Event
+    {
+        Packet *pkt;
+        CachePort *cachePort;
+
+        CacheResponseEvent(Packet *pkt, CachePort *cachePort);
+        void process();
+        const char *description();
+    }
+
+  protected:
+    CachePort *cpuSidePort;
+    CachePort *memSidePort;
+
+  public:
+    virtual Port *getPort(const std::string &if_name);
+
+  private:
+    //To be defined in cache_impl.hh not in base class
+    virtual bool doTimingAccess(Packet *pkt, MemoryPort *memoryPort, bool isCpuSide);
+    virtual Tick doAtomicAccess(Packet *pkt, bool isCpuSide);
+    virtual void doFunctionalAccess(Packet *pkt, bool isCpuSide);
+    virtual void recvStatusChange(Port::Status status, bool isCpuSide);
+
+    /**
+     * Bit vector of the blocking reasons for the access path.
+     * @sa #BlockedCause
+     */
+    uint8_t blocked;
+
+    /**
+     * Bit vector for the blocking reasons for the snoop path.
+     * @sa #BlockedCause
+     */
+    uint8_t blockedSnoop;
+
+    /**
+     * Bit vector for the outstanding requests for the master interface.
+     */
+    uint8_t masterRequests;
+
+    /**
+     * Bit vector for the outstanding requests for the slave interface.
+     */
+    uint8_t slaveRequests;
+
+  protected:
+
+    /** True if this cache is connected to the CPU. */
+    bool topLevelCache;
+
+    /** Stores time the cache blocked for statistics. */
+    Tick blockedCycle;
+
+    /** Block size of this cache */
+    const int blkSize;
+
+    /** The number of misses to trigger an exit event. */
+    Counter missCount;
+
+  public:
+    // Statistics
+    /**
+     * @addtogroup CacheStatistics
+     * @{
+     */
+
+    /** Number of hits per thread for each type of command. @sa Packet::Command */
+    Stats::Vector<> hits[NUM_MEM_CMDS];
+    /** Number of hits for demand accesses. */
+    Stats::Formula demandHits;
+    /** Number of hit for all accesses. */
+    Stats::Formula overallHits;
+
+    /** Number of misses per thread for each type of command. @sa Packet::Command */
+    Stats::Vector<> misses[NUM_MEM_CMDS];
+    /** Number of misses for demand accesses. */
+    Stats::Formula demandMisses;
+    /** Number of misses for all accesses. */
+    Stats::Formula overallMisses;
+
+    /**
+     * Total number of cycles per thread/command spent waiting for a miss.
+     * Used to calculate the average miss latency.
+     */
+    Stats::Vector<> missLatency[NUM_MEM_CMDS];
+    /** Total number of cycles spent waiting for demand misses. */
+    Stats::Formula demandMissLatency;
+    /** Total number of cycles spent waiting for all misses. */
+    Stats::Formula overallMissLatency;
+
+    /** The number of accesses per command and thread. */
+    Stats::Formula accesses[NUM_MEM_CMDS];
+    /** The number of demand accesses. */
+    Stats::Formula demandAccesses;
+    /** The number of overall accesses. */
+    Stats::Formula overallAccesses;
+
+    /** The miss rate per command and thread. */
+    Stats::Formula missRate[NUM_MEM_CMDS];
+    /** The miss rate of all demand accesses. */
+    Stats::Formula demandMissRate;
+    /** The miss rate for all accesses. */
+    Stats::Formula overallMissRate;
+
+    /** The average miss latency per command and thread. */
+    Stats::Formula avgMissLatency[NUM_MEM_CMDS];
+    /** The average miss latency for demand misses. */
+    Stats::Formula demandAvgMissLatency;
+    /** The average miss latency for all misses. */
+    Stats::Formula overallAvgMissLatency;
+
+    /** The total number of cycles blocked for each blocked cause. */
+    Stats::Vector<> blocked_cycles;
+    /** The number of times this cache blocked for each blocked cause. */
+    Stats::Vector<> blocked_causes;
+
+    /** The average number of cycles blocked for each blocked cause. */
+    Stats::Formula avg_blocked;
+
+    /** The number of fast writes (WH64) performed. */
+    Stats::Scalar<> fastWrites;
+
+    /** The number of cache copies performed. */
+    Stats::Scalar<> cacheCopies;
+
+    /**
+     * @}
+     */
+
+    /**
+     * Register stats for this object.
+     */
+    virtual void regStats();
+
+  public:
+
+    class Params
+    {
+      public:
+        /** List of address ranges of this cache. */
+        std::vector<Range<Addr> > addrRange;
+        /** The hit latency for this cache. */
+        int hitLatency;
+        /** The block size of this cache. */
+        int blkSize;
+        /**
+         * The maximum number of misses this cache should handle before
+         * ending the simulation.
+         */
+        Counter maxMisses;
+
+        /**
+         * Construct an instance of this parameter class.
+         */
+        Params(std::vector<Range<Addr> > addr_range,
+               int hit_latency, int _blkSize, Counter max_misses)
+            : addrRange(addr_range), hitLatency(hit_latency), blkSize(_blkSize),
+              maxMisses(max_misses)
+        {
+        }
+    };
+
+    /**
+     * Create and initialize a basic cache object.
+     * @param name The name of this cache.
+     * @param hier_params Pointer to the HierParams object for this hierarchy
+     * of this cache.
+     * @param params The parameter object for this BaseCache.
+     */
+    BaseCache(const std::string &name, HierParams *hier_params, Params &params)
+        : BaseMem(name, hier_params, params.hitLatency, params.addrRange),
+          blocked(0), blockedSnoop(0), masterRequests(0), slaveRequests(0),
+          topLevelCache(false),  blkSize(params.blkSize),
+          missCount(params.maxMisses)
+    {
+    }
+
+    /**
+     * Query block size of a cache.
+     * @return  The block size
+     */
+    int getBlockSize() const
+    {
+        return blkSize;
+    }
+
+    /**
+     * Returns true if this cache is connect to the CPU.
+     * @return True if this is a L1 cache.
+     */
+    bool isTopLevel()
+    {
+        return topLevelCache;
+    }
+
+    /**
+     * Returns true if the cache is blocked for accesses.
+     */
+    bool isBlocked()
+    {
+        return blocked != 0;
+    }
+
+    /**
+     * Returns true if the cache is blocked for snoops.
+     */
+    bool isBlockedForSnoop()
+    {
+        return blockedSnoop != 0;
+    }
+
+    /**
+     * Marks the access path of the cache as blocked for the given cause. This
+     * also sets the blocked flag in the slave interface.
+     * @param cause The reason for the cache blocking.
+     */
+    void setBlocked(BlockedCause cause)
+    {
+        uint8_t flag = 1 << cause;
+        if (blocked == 0) {
+            blocked_causes[cause]++;
+            blockedCycle = curTick;
+        }
+        blocked |= flag;
+        DPRINTF(Cache,"Blocking for cause %s\n", cause);
+        cpuSidePort->setBlocked();
+    }
+
+    /**
+     * Marks the snoop path of the cache as blocked for the given cause. This
+     * also sets the blocked flag in the master interface.
+     * @param cause The reason to block the snoop path.
+     */
+    void setBlockedForSnoop(BlockedCause cause)
+    {
+        uint8_t flag = 1 << cause;
+        blockedSnoop |= flag;
+        memSidePort->setBlocked();
+    }
+
+    /**
+     * Marks the cache as unblocked for the given cause. This also clears the
+     * blocked flags in the appropriate interfaces.
+     * @param cause The newly unblocked cause.
+     * @warning Calling this function can cause a blocked request on the bus to
+     * access the cache. The cache must be in a state to handle that request.
+     */
+    void clearBlocked(BlockedCause cause)
+    {
+        uint8_t flag = 1 << cause;
+        blocked &= ~flag;
+        blockedSnoop &= ~flag;
+        DPRINTF(Cache,"Unblocking for cause %s, causes left=%i\n",
+                cause, blocked);
+        if (!isBlocked()) {
+            blocked_cycles[cause] += curTick - blockedCycle;
+            DPRINTF(Cache,"Unblocking from all causes\n");
+            cpuSidePort->clearBlocked();
+        }
+        if (!isBlockedForSnoop()) {
+           memSidePort->clearBlocked();
+        }
+
+    }
+
+    /**
+     * True if the master bus should be requested.
+     * @return True if there are outstanding requests for the master bus.
+     */
+    bool doMasterRequest()
+    {
+        return masterRequests != 0;
+    }
+
+    /**
+     * Request the master bus for the given cause and time.
+     * @param cause The reason for the request.
+     * @param time The time to make the request.
+     */
+    void setMasterRequest(RequestCause cause, Tick time)
+    {
+        uint8_t flag = 1<<cause;
+        masterRequests |= flag;
+        assert("Implement\n" && 0);
+//     mi->pktuest(time);
+    }
+
+    /**
+     * Clear the master bus request for the given cause.
+     * @param cause The request reason to clear.
+     */
+    void clearMasterRequest(RequestCause cause)
+    {
+        uint8_t flag = 1<<cause;
+        masterRequests &= ~flag;
+    }
+
+    /**
+     * Return true if the slave bus should be requested.
+     * @return True if there are outstanding requests for the slave bus.
+     */
+    bool doSlaveRequest()
+    {
+        return slaveRequests != 0;
+    }
+
+    /**
+     * Request the slave bus for the given reason and time.
+     * @param cause The reason for the request.
+     * @param time The time to make the request.
+     */
+    void setSlaveRequest(RequestCause cause, Tick time)
+    {
+        uint8_t flag = 1<<cause;
+        slaveRequests |= flag;
+        assert("Implement\n" && 0);
+//     si->pktuest(time);
+    }
+
+    /**
+     * Clear the slave bus request for the given reason.
+     * @param cause The request reason to clear.
+     */
+    void clearSlaveRequest(RequestCause cause)
+    {
+        uint8_t flag = 1<<cause;
+        slaveRequests &= ~flag;
+    }
+
+    /**
+     * Send a response to the slave interface.
+     * @param req The request being responded to.
+     * @param time The time the response is ready.
+     */
+    void respond(Packet *pkt, Tick time)
+    {
+        assert("Implement\n" && 0);
+//     si->respond(pkt,time);
+    }
+
+    /**
+     * Send a reponse to the slave interface and calculate miss latency.
+     * @param req The request to respond to.
+     * @param time The time the response is ready.
+     */
+    void respondToMiss(Packet *pkt, Tick time)
+    {
+        if (!pkt->isUncacheable()) {
+            missLatency[pkt->cmd.toIndex()][pkt->thread_num] += time - pkt->time;
+        }
+        assert("Implement\n" && 0);
+//     si->respond(pkt,time);
+    }
+
+    /**
+     * Suppliess the data if cache to cache transfers are enabled.
+     * @param req The bus transaction to fulfill.
+     */
+    void respondToSnoop(Packet *pkt)
+    {
+        assert("Implement\n" && 0);
+//     mi->respond(pkt,curTick + hitLatency);
+    }
+
+    /**
+     * Notification from master interface that a address range changed. Nothing
+     * to do for a cache.
+     */
+    void rangeChange() {}
+};
+
+#endif //__BASE_CACHE_HH__
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
new file mode 100644 (file)
index 0000000..db66c09
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Steve Reinhardt
+ *          Lisa Hsu
+ *          Kevin Lim
+ */
+
+/**
+ * @file
+ * Cache template instantiations.
+ */
+
+#include "mem/config/cache.hh"
+#include "mem/config/compression.hh"
+
+#include "mem/cache/tags/cache_tags.hh"
+
+#if defined(USE_CACHE_LRU)
+#include "mem/cache/tags/lru.hh"
+#endif
+
+#if defined(USE_CACHE_FALRU)
+#include "mem/cache/tags/fa_lru.hh"
+#endif
+
+#if defined(USE_CACHE_IIC)
+#include "mem/cache/tags/iic.hh"
+#endif
+
+#if defined(USE_CACHE_SPLIT)
+#include "mem/cache/tags/split.hh"
+#endif
+
+#if defined(USE_CACHE_SPLIT_LIFO)
+#include "mem/cache/tags/split_lifo.hh"
+#endif
+
+#include "base/compression/null_compression.hh"
+#if defined(USE_LZSS_COMPRESSION)
+#include "base/compression/lzss_compression.hh"
+#endif
+
+#include "mem/cache/miss/miss_queue.hh"
+#include "mem/cache/miss/blocking_buffer.hh"
+
+#include "mem/cache/coherence/uni_coherence.hh"
+#include "mem/cache/coherence/simple_coherence.hh"
+
+#include "mem/cache/cache_impl.hh"
+
+// Template Instantiations
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+
+#if defined(USE_CACHE_FALRU)
+template class Cache<CacheTags<FALRU,NullCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<FALRU,NullCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<FALRU,NullCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<FALRU,NullCompression>, MissQueue, UniCoherence>;
+#if defined(USE_LZSS_COMPRESSION)
+template class Cache<CacheTags<FALRU,LZSSCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<FALRU,LZSSCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<FALRU,LZSSCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<FALRU,LZSSCompression>, MissQueue, UniCoherence>;
+#endif
+#endif
+
+#if defined(USE_CACHE_IIC)
+template class Cache<CacheTags<IIC,NullCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<IIC,NullCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<IIC,NullCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<IIC,NullCompression>, MissQueue, UniCoherence>;
+#if defined(USE_LZSS_COMPRESSION)
+template class Cache<CacheTags<IIC,LZSSCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<IIC,LZSSCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<IIC,LZSSCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<IIC,LZSSCompression>, MissQueue, UniCoherence>;
+#endif
+#endif
+
+#if defined(USE_CACHE_LRU)
+template class Cache<CacheTags<LRU,NullCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<LRU,NullCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<LRU,NullCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<LRU,NullCompression>, MissQueue, UniCoherence>;
+#if defined(USE_LZSS_COMPRESSION)
+template class Cache<CacheTags<LRU,LZSSCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<LRU,LZSSCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<LRU,LZSSCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<LRU,LZSSCompression>, MissQueue, UniCoherence>;
+#endif
+#endif
+
+#if defined(USE_CACHE_SPLIT)
+template class Cache<CacheTags<Split,NullCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<Split,NullCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<Split,NullCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<Split,NullCompression>, MissQueue, UniCoherence>;
+#if defined(USE_LZSS_COMPRESSION)
+template class Cache<CacheTags<Split,LZSSCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<Split,LZSSCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<Split,LZSSCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<Split,LZSSCompression>, MissQueue, UniCoherence>;
+#endif
+#endif
+
+#if defined(USE_CACHE_SPLIT_LIFO)
+template class Cache<CacheTags<SplitLIFO,NullCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<SplitLIFO,NullCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<SplitLIFO,NullCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<SplitLIFO,NullCompression>, MissQueue, UniCoherence>;
+#if defined(USE_LZSS_COMPRESSION)
+template class Cache<CacheTags<SplitLIFO,LZSSCompression>, BlockingBuffer, SimpleCoherence>;
+template class Cache<CacheTags<SplitLIFO,LZSSCompression>, BlockingBuffer, UniCoherence>;
+template class Cache<CacheTags<SplitLIFO,LZSSCompression>, MissQueue, SimpleCoherence>;
+template class Cache<CacheTags<SplitLIFO,LZSSCompression>, MissQueue, UniCoherence>;
+#endif
+#endif
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh
new file mode 100644 (file)
index 0000000..dcb22a9
--- /dev/null
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Dave Greene
+ *          Steve Reinhardt
+ */
+
+/**
+ * @file
+ * Describes a cache based on template policies.
+ */
+
+#ifndef __CACHE_HH__
+#define __CACHE_HH__
+
+#include "base/misc.hh" // fatal, panic, and warn
+#include "cpu/smt.hh" // SMT_MAX_THREADS
+
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/prefetch/prefetcher.hh"
+
+// forward declarations
+class Bus;
+class ExecContext;
+
+/**
+ * A template-policy based cache. The behavior of the cache can be altered by
+ * supplying different template policies. TagStore handles all tag and data
+ * storage @sa TagStore. Buffering handles all misses and writes/writebacks
+ * @sa MissQueue. Coherence handles all coherence policy details @sa
+ * UniCoherence, SimpleMultiCoherence.
+ */
+template <class TagStore, class Buffering, class Coherence>
+class Cache : public BaseCache
+{
+  public:
+    /** Define the type of cache block to use. */
+    typedef typename TagStore::BlkType BlkType;
+
+    bool prefetchAccess;
+  protected:
+
+    /** Tag and data Storage */
+    TagStore *tags;
+    /** Miss and Writeback handler */
+    Buffering *missQueue;
+    /** Coherence protocol. */
+    Coherence *coherence;
+
+    /** Prefetcher */
+    Prefetcher<TagStore, Buffering> *prefetcher;
+
+    /** Do fast copies in this cache. */
+    bool doCopy;
+
+    /** Block on a delayed copy. */
+    bool blockOnCopy;
+
+    /**
+     * The clock ratio of the outgoing bus.
+     * Used for calculating critical word first.
+     */
+    int busRatio;
+
+     /**
+      * The bus width in bytes of the outgoing bus.
+      * Used for calculating critical word first.
+      */
+    int busWidth;
+
+     /**
+      * A permanent mem req to always be used to cause invalidations.
+      * Used to append to target list, to cause an invalidation.
+      */
+    Packet * invalidatePkt;
+
+    /**
+     * Temporarily move a block into a MSHR.
+     * @todo Remove this when LSQ/SB are fixed and implemented in memtest.
+     */
+    void pseudoFill(Addr addr, int asid);
+
+    /**
+     * Temporarily move a block into an existing MSHR.
+     * @todo Remove this when LSQ/SB are fixed and implemented in memtest.
+     */
+    void pseudoFill(MSHR *mshr);
+
+  public:
+
+    class Params
+    {
+      public:
+        TagStore *tags;
+        Buffering *missQueue;
+        Coherence *coherence;
+        bool doCopy;
+        bool blockOnCopy;
+        BaseCache::Params baseParams;
+        Bus *in;
+        Bus *out;
+        Prefetcher<TagStore, Buffering> *prefetcher;
+        bool prefetchAccess;
+
+        Params(TagStore *_tags, Buffering *mq, Coherence *coh,
+               bool do_copy, BaseCache::Params params, Bus * in_bus,
+               Bus * out_bus, Prefetcher<TagStore, Buffering> *_prefetcher,
+               bool prefetch_access)
+            : tags(_tags), missQueue(mq), coherence(coh), doCopy(do_copy),
+              blockOnCopy(false), baseParams(params), in(in_bus), out(out_bus),
+              prefetcher(_prefetcher), prefetchAccess(prefetch_access)
+        {
+        }
+    };
+
+    /** Instantiates a basic cache object. */
+    Cache(const std::string &_name, HierParams *hier_params, Params &params);
+
+    void regStats();
+
+    /**
+     * Performs the access specified by the request.
+     * @param req The request to perform.
+     * @return The result of the access.
+     */
+    MemAccessResult access(Packet * &pkt);
+
+    /**
+     * Selects a request to send on the bus.
+     * @return The memory request to service.
+     */
+    Packet * getPacket();
+
+    /**
+     * Was the request was sent successfully?
+     * @param req The request.
+     * @param success True if the request was sent successfully.
+     */
+    void sendResult(Packet * &pkt, bool success);
+
+    /**
+     * Handles a response (cache line fill/write ack) from the bus.
+     * @param req The request being responded to.
+     */
+    void handleResponse(Packet * &pkt);
+
+    /**
+     * Start handling a copy transaction.
+     * @param req The copy request to perform.
+     */
+    void startCopy(Packet * &pkt);
+
+    /**
+     * Handle a delayed copy transaction.
+     * @param req The delayed copy request to continue.
+     * @param addr The address being responded to.
+     * @param blk The block of the current response.
+     * @param mshr The mshr being handled.
+     */
+    void handleCopy(Packet * &pkt, Addr addr, BlkType *blk, MSHR *mshr);
+
+    /**
+     * Selects a coherence message to forward to lower levels of the hierarchy.
+     * @return The coherence message to forward.
+     */
+    Packet * getCoherenceReq();
+
+    /**
+     * Snoops bus transactions to maintain coherence.
+     * @param req The current bus transaction.
+     */
+    void snoop(Packet * &pkt);
+
+    void snoopResponse(Packet * &pkt);
+
+    /**
+     * Invalidates the block containing address if found.
+     * @param addr The address to look for.
+     * @param asid The address space ID of the address.
+     * @todo Is this function necessary?
+     */
+    void invalidateBlk(Addr addr, int asid);
+
+    /**
+     * Aquash all requests associated with specified thread.
+     * intended for use by I-cache.
+     * @param thread_number The thread to squash.
+     */
+    void squash(int thread_number)
+    {
+        missQueue->squash(thread_number);
+    }
+
+    /**
+     * Return the number of outstanding misses in a Cache.
+     * Default returns 0.
+     *
+     * @retval unsigned The number of missing still outstanding.
+     */
+    unsigned outstandingMisses() const
+    {
+        return missQueue->getMisses();
+    }
+
+    /**
+     * Send a response to the slave interface.
+     * @param req The request being responded to.
+     * @param time The time the response is ready.
+     */
+    void respond(Packet * &pkt, Tick time)
+    {
+        si->respond(pkt,time);
+    }
+
+    /**
+     * Perform the access specified in the request and return the estimated
+     * time of completion. This function can either update the hierarchy state
+     * or just perform the access wherever the data is found depending on the
+     * state of the update flag.
+     * @param req The memory request to satisfy
+     * @param update If true, update the hierarchy, otherwise just perform the
+     * request.
+     * @return The estimated completion time.
+     */
+    Tick probe(Packet * &pkt, bool update);
+
+    /**
+     * Snoop for the provided request in the cache and return the estimated
+     * time of completion.
+     * @todo Can a snoop probe not change state?
+     * @param req The memory request to satisfy
+     * @param update If true, update the hierarchy, otherwise just perform the
+     * request.
+     * @return The estimated completion time.
+     */
+    Tick snoopProbe(Packet * &pkt, bool update);
+};
+
+#endif // __CACHE_HH__
diff --git a/src/mem/cache/cache_blk.hh b/src/mem/cache/cache_blk.hh
new file mode 100644 (file)
index 0000000..cf1bd20
--- /dev/null
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/** @file
+ * Definitions of a simple cache block class.
+ */
+
+#ifndef __CACHE_BLK_HH__
+#define __CACHE_BLK_HH__
+
+#include "sim/root.hh"         // for Tick
+#include "arch/isa_traits.hh"  // for Addr
+#include "cpu/exec_context.hh"
+
+/**
+ * Cache block status bit assignments
+ */
+enum CacheBlkStatusBits {
+    /** valid, readable */
+    BlkValid =         0x01,
+    /** write permission */
+    BlkWritable =      0x02,
+    /** dirty (modified) */
+    BlkDirty =         0x04,
+    /** compressed */
+    BlkCompressed =    0x08,
+    /** block was referenced */
+    BlkReferenced =    0x10,
+    /** block was a hardware prefetch yet unaccessed*/
+    BlkHWPrefetched =  0x20
+};
+
+/**
+ * A Basic Cache block.
+ * Contains the tag, status, and a pointer to data.
+ */
+class CacheBlk
+{
+  public:
+    /** The address space ID of this block. */
+    int asid;
+    /** Data block tag value. */
+    Addr tag;
+    /**
+     * Contains a copy of the data in this block for easy access. This is used
+     * for efficient execution when the data could be actually stored in
+     * another format (COW, compressed, sub-blocked, etc). In all cases the
+     * data stored here should be kept consistant with the actual data
+     * referenced by this block.
+     */
+    uint8_t *data;
+    /** the number of bytes stored in this block. */
+    int size;
+
+    /** block state: OR of CacheBlkStatusBit */
+    typedef unsigned State;
+
+    /** The current status of this block. @sa CacheBlockStatusBits */
+    State status;
+
+    /** Which curTick will this block be accessable */
+    Tick whenReady;
+
+    /** Save the exec context so that writebacks can use them. */
+    ExecContext *xc;
+
+    /**
+     * The set this block belongs to.
+     * @todo Move this into subclasses when we fix CacheTags to use them.
+     */
+    int set;
+
+    /** Number of references to this block since it was brought in. */
+    int refCount;
+
+    CacheBlk()
+        : asid(-1), tag(0), data(0) ,size(0), status(0), whenReady(0), xc(0),
+          set(-1), refCount(0)
+    {}
+
+    /**
+     * Copy the state of the given block into this one.
+     * @param rhs The block to copy.
+     * @return a const reference to this block.
+     */
+    const CacheBlk& operator=(const CacheBlk& rhs)
+    {
+        asid = rhs.asid;
+        tag = rhs.tag;
+        data = rhs.data;
+        size = rhs.size;
+        status = rhs.status;
+        whenReady = rhs.whenReady;
+        xc = rhs.xc;
+        set = rhs.set;
+        refCount = rhs.refCount;
+        return *this;
+    }
+
+    /**
+     * Checks the write permissions of this block.
+     * @return True if the block is writable.
+     */
+    bool isWritable() const
+    {
+        const int needed_bits = BlkWritable | BlkValid;
+        return (status & needed_bits) == needed_bits;
+    }
+
+    /**
+     * Checks that a block is valid (readable).
+     * @return True if the block is valid.
+     */
+    bool isValid() const
+    {
+        return (status & BlkValid) != 0;
+    }
+
+    /**
+     * Check to see if a block has been written.
+     * @return True if the block is dirty.
+     */
+    bool isModified() const
+    {
+        return (status & BlkDirty) != 0;
+    }
+
+    /**
+     * Check to see if this block contains compressed data.
+     * @return True iF the block's data is compressed.
+     */
+    bool isCompressed() const
+    {
+        return (status & BlkCompressed) != 0;
+    }
+
+    /**
+     * Check if this block has been referenced.
+     * @return True if the block has been referenced.
+     */
+    bool isReferenced() const
+    {
+        return (status & BlkReferenced) != 0;
+    }
+
+    /**
+     * Check if this block was the result of a hardware prefetch, yet to
+     * be touched.
+     * @return True if the block was a hardware prefetch, unaccesed.
+     */
+    bool isPrefetch() const
+    {
+        return (status & BlkHWPrefetched) != 0;
+    }
+
+
+};
+
+/**
+ * Output a CacheBlk to the given ostream.
+ * @param out The stream for the output.
+ * @param blk The cache block to print.
+ *
+ * @return The output stream.
+ */
+inline std::ostream &
+operator<<(std::ostream &out, const CacheBlk &blk)
+{
+    out << std::hex << std::endl;
+    out << "  Tag: " << blk.tag << std::endl;
+    out << "  Status: " <<  blk.status << std::endl;
+
+    return(out << std::dec);
+}
+
+#endif //__CACHE_BLK_HH__
diff --git a/src/mem/cache/cache_builder.cc b/src/mem/cache/cache_builder.cc
new file mode 100644 (file)
index 0000000..e3efb9b
--- /dev/null
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Nathan Binkert
+ */
+
+/**
+ * @file
+ * Simobject instatiation of caches.
+ */
+#include <vector>
+
+// Must be included first to determine which caches we want
+#include "mem/config/cache.hh"
+#include "mem/config/compression.hh"
+#include "mem/config/prefetch.hh"
+
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/cache.hh"
+#include "mem/bus/bus.hh"
+#include "mem/cache/coherence/coherence_protocol.hh"
+#include "sim/builder.hh"
+
+// Tag Templates
+#if defined(USE_CACHE_LRU)
+#include "mem/cache/tags/lru.hh"
+#endif
+
+#if defined(USE_CACHE_FALRU)
+#include "mem/cache/tags/fa_lru.hh"
+#endif
+
+#if defined(USE_CACHE_IIC)
+#include "mem/cache/tags/iic.hh"
+#endif
+
+#if defined(USE_CACHE_SPLIT)
+#include "mem/cache/tags/split.hh"
+#endif
+
+#if defined(USE_CACHE_SPLIT_LIFO)
+#include "mem/cache/tags/split_lifo.hh"
+#endif
+
+// Compression Templates
+#include "base/compression/null_compression.hh"
+#if defined(USE_LZSS_COMPRESSION)
+#include "base/compression/lzss_compression.hh"
+#endif
+
+// CacheTags Templates
+#include "mem/cache/tags/cache_tags.hh"
+
+// MissQueue Templates
+#include "mem/cache/miss/miss_queue.hh"
+#include "mem/cache/miss/blocking_buffer.hh"
+
+// Coherence Templates
+#include "mem/cache/coherence/uni_coherence.hh"
+#include "mem/cache/coherence/simple_coherence.hh"
+
+// Bus Interfaces
+#include "mem/bus/slave_interface.hh"
+#include "mem/bus/master_interface.hh"
+#include "mem/memory_interface.hh"
+
+#include "mem/trace/mem_trace_writer.hh"
+
+//Prefetcher Headers
+#if defined(USE_GHB)
+#include "mem/cache/prefetch/ghb_prefetcher.hh"
+#endif
+#if defined(USE_TAGGED)
+#include "mem/cache/prefetch/tagged_prefetcher.hh"
+#endif
+#if defined(USE_STRIDED)
+#include "mem/cache/prefetch/stride_prefetcher.hh"
+#endif
+
+
+using namespace std;
+using namespace TheISA;
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
+
+    Param<int> size;
+    Param<int> assoc;
+    Param<int> block_size;
+    Param<int> latency;
+    Param<int> mshrs;
+    Param<int> tgts_per_mshr;
+    Param<int> write_buffers;
+    Param<bool> prioritizeRequests;
+    SimObjectParam<Bus *> in_bus;
+    SimObjectParam<Bus *> out_bus;
+    Param<bool> do_copy;
+    SimObjectParam<CoherenceProtocol *> protocol;
+    Param<Addr> trace_addr;
+    Param<int> hash_delay;
+#if defined(USE_CACHE_IIC)
+    SimObjectParam<Repl *> repl;
+#endif
+    Param<bool> compressed_bus;
+    Param<bool> store_compressed;
+    Param<bool> adaptive_compression;
+    Param<int> compression_latency;
+    Param<int> subblock_size;
+    Param<Counter> max_miss_count;
+    SimObjectParam<HierParams *> hier;
+    VectorParam<Range<Addr> > addr_range;
+    SimObjectParam<MemTraceWriter *> mem_trace;
+    Param<bool> split;
+    Param<int> split_size;
+    Param<bool> lifo;
+    Param<bool> two_queue;
+    Param<bool> prefetch_miss;
+    Param<bool> prefetch_access;
+    Param<int> prefetcher_size;
+    Param<bool> prefetch_past_page;
+    Param<bool> prefetch_serial_squash;
+    Param<Tick> prefetch_latency;
+    Param<int> prefetch_degree;
+    Param<string> prefetch_policy;
+    Param<bool> prefetch_cache_check_push;
+    Param<bool> prefetch_use_cpu_id;
+    Param<bool> prefetch_data_accesses_only;
+
+END_DECLARE_SIM_OBJECT_PARAMS(BaseCache)
+
+
+BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache)
+
+    INIT_PARAM(size, "capacity in bytes"),
+    INIT_PARAM(assoc, "associativity"),
+    INIT_PARAM(block_size, "block size in bytes"),
+    INIT_PARAM(latency, "hit latency in CPU cycles"),
+    INIT_PARAM(mshrs, "number of MSHRs (max outstanding requests)"),
+    INIT_PARAM(tgts_per_mshr, "max number of accesses per MSHR"),
+    INIT_PARAM_DFLT(write_buffers, "number of write buffers", 8),
+    INIT_PARAM_DFLT(prioritizeRequests, "always service demand misses first",
+                    false),
+    INIT_PARAM_DFLT(in_bus, "incoming bus object", NULL),
+    INIT_PARAM(out_bus, "outgoing bus object"),
+    INIT_PARAM_DFLT(do_copy, "perform fast copies in the cache", false),
+    INIT_PARAM_DFLT(protocol, "coherence protocol to use in the cache", NULL),
+    INIT_PARAM_DFLT(trace_addr, "address to trace", 0),
+
+    INIT_PARAM_DFLT(hash_delay, "time in cycles of hash access",1),
+#if defined(USE_CACHE_IIC)
+    INIT_PARAM_DFLT(repl, "replacement policy",NULL),
+#endif
+    INIT_PARAM_DFLT(compressed_bus,
+                    "This cache connects to a compressed memory",
+                    false),
+    INIT_PARAM_DFLT(store_compressed, "Store compressed data in the cache",
+                    false),
+    INIT_PARAM_DFLT(adaptive_compression, "Use an adaptive compression scheme",
+                    false),
+    INIT_PARAM_DFLT(compression_latency,
+                    "Latency in cycles of compression algorithm",
+                    0),
+    INIT_PARAM_DFLT(subblock_size,
+                    "Size of subblock in IIC used for compression",
+                    0),
+    INIT_PARAM_DFLT(max_miss_count,
+                    "The number of misses to handle before calling exit",
+                    0),
+    INIT_PARAM_DFLT(hier,
+                    "Hierarchy global variables",
+                    &defaultHierParams),
+    INIT_PARAM_DFLT(addr_range, "The address range in bytes",
+                    vector<Range<Addr> >(1,RangeIn((Addr)0, MaxAddr))),
+    INIT_PARAM_DFLT(mem_trace, "Memory trace to write accesses to", NULL),
+    INIT_PARAM_DFLT(split, "Whether this is a partitioned cache", false),
+    INIT_PARAM_DFLT(split_size, "the number of \"ways\" belonging to the LRU partition", 0),
+    INIT_PARAM_DFLT(lifo, "whether you are using a LIFO repl. policy", false),
+    INIT_PARAM_DFLT(two_queue, "whether the lifo should have two queue replacement", false),
+    INIT_PARAM_DFLT(prefetch_miss, "wheter you are using the hardware prefetcher from Miss stream", false),
+    INIT_PARAM_DFLT(prefetch_access, "wheter you are using the hardware prefetcher from Access stream", false),
+    INIT_PARAM_DFLT(prefetcher_size, "Number of entries in the harware prefetch queue", 100),
+    INIT_PARAM_DFLT(prefetch_past_page, "Allow prefetches to cross virtual page boundaries", false),
+    INIT_PARAM_DFLT(prefetch_serial_squash, "Squash prefetches with a later time on a subsequent miss", false),
+    INIT_PARAM_DFLT(prefetch_latency, "Latency of the prefetcher", 10),
+    INIT_PARAM_DFLT(prefetch_degree, "Degree of the prefetch depth", 1),
+    INIT_PARAM_DFLT(prefetch_policy, "Type of prefetcher to use", "none"),
+    INIT_PARAM_DFLT(prefetch_cache_check_push, "Check if in cash on push or pop of prefetch queue", true),
+    INIT_PARAM_DFLT(prefetch_use_cpu_id, "Use the CPU ID to seperate calculations of prefetches", true),
+    INIT_PARAM_DFLT(prefetch_data_accesses_only, "Only prefetch on data not on instruction accesses", false)
+END_INIT_SIM_OBJECT_PARAMS(BaseCache)
+
+
+#define BUILD_CACHE(t, comp, b, c) do {                                        \
+        Prefetcher<CacheTags<t, comp>, b> *pf; \
+        if (pf_policy == "tagged") {      \
+             BUILD_TAGGED_PREFETCHER(t, comp, b); \
+        }            \
+        else if (pf_policy == "stride") {       \
+             BUILD_STRIDED_PREFETCHER(t, comp, b); \
+        } \
+        else if (pf_policy == "ghb") {       \
+             BUILD_GHB_PREFETCHER(t, comp, b); \
+        } \
+        else { \
+             BUILD_NULL_PREFETCHER(t, comp, b); \
+        } \
+        Cache<CacheTags<t, comp>, b, c>::Params params(tagStore, mq, coh, \
+                                                       do_copy, base_params, \
+                                                       in_bus, out_bus, pf,  \
+                                                       prefetch_access); \
+        Cache<CacheTags<t, comp>, b, c> *retval =                      \
+            new Cache<CacheTags<t, comp>, b, c>(getInstanceName(), hier, \
+                                                params);               \
+        if (in_bus == NULL) {                                          \
+            retval->setSlaveInterface(new MemoryInterface<Cache<CacheTags<t, comp>, b, c> >(getInstanceName(), hier, retval, mem_trace)); \
+        } else {                                                       \
+            retval->setSlaveInterface(new SlaveInterface<Cache<CacheTags<t, comp>, b, c>, Bus>(getInstanceName(), hier, retval, in_bus, mem_trace)); \
+        }                                                              \
+        retval->setMasterInterface(new MasterInterface<Cache<CacheTags<t, comp>, b, c>, Bus>(getInstanceName(), hier, retval, out_bus)); \
+        out_bus->rangeChange();                                                \
+        return retval;                                                 \
+    } while (0)
+
+#define BUILD_CACHE_PANIC(x) do {                      \
+        panic("%s not compiled into M5", x);           \
+    } while (0)
+
+#if defined(USE_LZSS_COMPRESSION)
+#define BUILD_COMPRESSED_CACHE(TAGS, tags, b, c) do { \
+        if (compressed_bus || store_compressed){                       \
+            CacheTags<TAGS, LZSSCompression> *tagStore =               \
+                new CacheTags<TAGS, LZSSCompression>(tags,             \
+                                                     compression_latency, \
+                                                     true, store_compressed, \
+                                                     adaptive_compression,   \
+                                                     prefetch_miss); \
+            BUILD_CACHE(TAGS, LZSSCompression, b, c);                  \
+        } else {                                                       \
+            CacheTags<TAGS, NullCompression> *tagStore =               \
+                new CacheTags<TAGS, NullCompression>(tags,             \
+                                                     compression_latency, \
+                                                     true, store_compressed, \
+                                                     adaptive_compression,   \
+                                                     prefetch_miss); \
+            BUILD_CACHE(TAGS, NullCompression, b, c);                  \
+        }                                                              \
+    } while (0)
+#else
+#define BUILD_COMPRESSED_CACHE(TAGS, tags, b, c) do { \
+        if (compressed_bus || store_compressed){                       \
+            BUILD_CACHE_PANIC("compressed caches");                    \
+        } else {                                                       \
+            CacheTags<TAGS, NullCompression> *tagStore =               \
+                new CacheTags<TAGS, NullCompression>(tags,             \
+                                                      compression_latency, \
+                                                      true, store_compressed, \
+                                                      adaptive_compression    \
+                                                      prefetch_miss); \
+            BUILD_CACHE(TAGS, NullCompression, b, c);                  \
+        }                                                              \
+    } while (0)
+#endif
+
+#if defined(USE_CACHE_FALRU)
+#define BUILD_FALRU_CACHE(b,c) do {                        \
+        FALRU *tags = new FALRU(block_size, size, latency); \
+        BUILD_COMPRESSED_CACHE(FALRU, tags, b, c);             \
+    } while (0)
+#else
+#define BUILD_FALRU_CACHE(b, c) BUILD_CACHE_PANIC("falru cache")
+#endif
+
+#if defined(USE_CACHE_LRU)
+#define BUILD_LRU_CACHE(b, c) do {                             \
+        LRU *tags = new LRU(numSets, block_size, assoc, latency);      \
+        BUILD_COMPRESSED_CACHE(LRU, tags, b, c);                       \
+    } while (0)
+#else
+#define BUILD_LRU_CACHE(b, c) BUILD_CACHE_PANIC("lru cache")
+#endif
+
+#if defined(USE_CACHE_SPLIT)
+#define BUILD_SPLIT_CACHE(b, c) do {                                   \
+        Split *tags = new Split(numSets, block_size, assoc, split_size, lifo, \
+                                two_queue, latency);           \
+        BUILD_COMPRESSED_CACHE(Split, tags, b, c);                     \
+    } while (0)
+#else
+#define BUILD_SPLIT_CACHE(b, c) BUILD_CACHE_PANIC("split cache")
+#endif
+
+#if defined(USE_CACHE_SPLIT_LIFO)
+#define BUILD_SPLIT_LIFO_CACHE(b, c) do {                              \
+        SplitLIFO *tags = new SplitLIFO(block_size, size, assoc,        \
+                                        latency, two_queue, -1);       \
+        BUILD_COMPRESSED_CACHE(SplitLIFO, tags, b, c);                 \
+    } while (0)
+#else
+#define BUILD_SPLIT_LIFO_CACHE(b, c) BUILD_CACHE_PANIC("lifo cache")
+#endif
+
+#if defined(USE_CACHE_IIC)
+#define BUILD_IIC_CACHE(b ,c) do {                     \
+        IIC *tags = new IIC(iic_params);               \
+        BUILD_COMPRESSED_CACHE(IIC, tags, b, c);       \
+    } while (0)
+#else
+#define BUILD_IIC_CACHE(b, c) BUILD_CACHE_PANIC("iic")
+#endif
+
+#define BUILD_CACHES(b, c) do {                                \
+        if (repl == NULL) {                            \
+            if (numSets == 1) {                                \
+                BUILD_FALRU_CACHE(b, c);               \
+            } else {                                   \
+                if (split == true) {                   \
+                    BUILD_SPLIT_CACHE(b, c);           \
+                } else if (lifo == true) {             \
+                    BUILD_SPLIT_LIFO_CACHE(b, c);      \
+                } else {                               \
+                    BUILD_LRU_CACHE(b, c);             \
+                }                                      \
+            }                                          \
+        } else {                                       \
+            BUILD_IIC_CACHE(b, c);                     \
+        }                                              \
+    } while (0)
+
+#define BUILD_COHERENCE(b) do {                                                \
+        if (protocol == NULL) {                                                \
+            UniCoherence *coh = new UniCoherence();                    \
+            BUILD_CACHES(b, UniCoherence);                             \
+        } else {                                                       \
+            SimpleCoherence *coh = new SimpleCoherence(protocol);      \
+            BUILD_CACHES(b, SimpleCoherence);                          \
+        }                                                              \
+    } while (0)
+
+#if defined(USE_TAGGED)
+#define BUILD_TAGGED_PREFETCHER(t, comp, b) pf = new   \
+                TaggedPrefetcher<CacheTags<t, comp>, b>(prefetcher_size, \
+                                                        !prefetch_past_page, \
+                                                        prefetch_serial_squash, \
+                                                        prefetch_cache_check_push, \
+                                                        prefetch_data_accesses_only, \
+                                                        prefetch_latency, \
+                                                        prefetch_degree)
+#else
+#define BUILD_TAGGED_PREFETCHER(t, comp, b) BUILD_CACHE_PANIC("Tagged Prefetcher")
+#endif
+
+#if defined(USE_STRIDED)
+#define BUILD_STRIDED_PREFETCHER(t, comp, b) pf = new  \
+                StridePrefetcher<CacheTags<t, comp>, b>(prefetcher_size, \
+                                                        !prefetch_past_page, \
+                                                        prefetch_serial_squash, \
+                                                        prefetch_cache_check_push, \
+                                                        prefetch_data_accesses_only, \
+                                                        prefetch_latency, \
+                                                        prefetch_degree, \
+                                                        prefetch_use_cpu_id)
+#else
+#define BUILD_STRIDED_PREFETCHER(t, comp, b) BUILD_CACHE_PANIC("Stride Prefetcher")
+#endif
+
+#if defined(USE_GHB)
+#define BUILD_GHB_PREFETCHER(t, comp, b) pf = new  \
+                GHBPrefetcher<CacheTags<t, comp>, b>(prefetcher_size, \
+                                                     !prefetch_past_page, \
+                                                     prefetch_serial_squash, \
+                                                     prefetch_cache_check_push, \
+                                                        prefetch_data_accesses_only, \
+                                                     prefetch_latency, \
+                                                     prefetch_degree, \
+                                                     prefetch_use_cpu_id)
+#else
+#define BUILD_GHB_PREFETCHER(t, comp, b) BUILD_CACHE_PANIC("GHB Prefetcher")
+#endif
+
+#if defined(USE_TAGGED)
+#define BUILD_NULL_PREFETCHER(t, comp, b) pf = new  \
+                TaggedPrefetcher<CacheTags<t, comp>, b>(prefetcher_size, \
+                                                        !prefetch_past_page, \
+                                                        prefetch_serial_squash, \
+                                                        prefetch_cache_check_push, \
+                                                        prefetch_data_accesses_only, \
+                                                        prefetch_latency, \
+                                                        prefetch_degree)
+#else
+#define BUILD_NULL_PREFETCHER(t, comp, b) BUILD_CACHE_PANIC("NULL Prefetcher (uses Tagged)")
+#endif
+
+CREATE_SIM_OBJECT(BaseCache)
+{
+    string name = getInstanceName();
+    int numSets = size / (assoc * block_size);
+    string pf_policy = prefetch_policy;
+    if (subblock_size == 0) {
+        subblock_size = block_size;
+    }
+
+    // Build BaseCache param object
+    BaseCache::Params base_params(addr_range, latency,
+                                  block_size, max_miss_count);
+
+    //Warnings about prefetcher policy
+    if (pf_policy == "none" && (prefetch_miss || prefetch_access)) {
+        panic("With no prefetcher, you shouldn't prefetch from"
+              " either miss or access stream\n");
+    }
+    if ((pf_policy == "tagged" || pf_policy == "stride" ||
+         pf_policy == "ghb") && !(prefetch_miss || prefetch_access)) {
+        warn("With this prefetcher you should chose a prefetch"
+             " stream (miss or access)\nNo Prefetching will occur\n");
+    }
+    if ((pf_policy == "tagged" || pf_policy == "stride" ||
+         pf_policy == "ghb") && prefetch_miss && prefetch_access) {
+        panic("Can't do prefetches from both miss and access"
+              " stream\n");
+    }
+    if (pf_policy != "tagged" && pf_policy != "stride" &&
+        pf_policy != "ghb"    && pf_policy != "none") {
+        panic("Unrecognized form of a prefetcher: %s, try using"
+              "['none','stride','tagged','ghb']\n", pf_policy);
+    }
+
+#if defined(USE_CACHE_IIC)
+    // Build IIC params
+    IIC::Params iic_params;
+    iic_params.size = size;
+    iic_params.numSets = numSets;
+    iic_params.blkSize = block_size;
+    iic_params.assoc = assoc;
+    iic_params.hashDelay = hash_delay;
+    iic_params.hitLatency = latency;
+    iic_params.rp = repl;
+    iic_params.subblockSize = subblock_size;
+#else
+    const void *repl = NULL;
+#endif
+
+    if (mshrs == 1 || out_bus->doEvents() == false) {
+        BlockingBuffer *mq = new BlockingBuffer(true);
+        BUILD_COHERENCE(BlockingBuffer);
+    } else {
+        MissQueue *mq = new MissQueue(mshrs, tgts_per_mshr, write_buffers,
+                                      true, prefetch_miss);
+        BUILD_COHERENCE(MissQueue);
+    }
+    return NULL;
+}
+
+REGISTER_SIM_OBJECT("BaseCache", BaseCache)
+
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
new file mode 100644 (file)
index 0000000..3dd8d74
--- /dev/null
@@ -0,0 +1,661 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Dave Greene
+ *          Nathan Binkert
+ */
+
+/**
+ * @file
+ * Cache definitions.
+ */
+
+#include <assert.h>
+#include <math.h>
+
+#include <cassert>
+#include <iostream>
+#include <string>
+
+#include "sim/host.hh"
+#include "base/misc.hh"
+#include "cpu/smt.hh"
+
+#include "mem/cache/cache.hh"
+#include "mem/cache/cache_blk.hh"
+#include "mem/cache/miss/mshr.hh"
+#include "mem/cache/prefetch/prefetcher.hh"
+
+#include "mem/bus/bus.hh"
+
+#include "mem/bus/slave_interface.hh"
+#include "mem/memory_interface.hh"
+#include "mem/bus/master_interface.hh"
+
+#include "mem/mem_debug.hh"
+
+#include "sim/sim_events.hh" // for SimExitEvent
+
+using namespace std;
+
+template<class TagStore, class Buffering, class Coherence>
+bool
+Cache<TagStore,Buffering,Coherence>::
+doTimingAccess(Packet *pkt, MemoryPort *memoryPort, bool isCpuSide)
+{
+    if (isCpuSide)
+    {
+        access(pkt);
+    }
+    else
+    {
+        if (pkt->isRespnse())
+            handleResponse(pkt);
+        else
+            snoop(pkt);
+    }
+}
+
+template<class TagStore, class Buffering, class Coherence>
+Tick
+Cache<TagStore,Buffering,Coherence>::
+doAtomicAccess(Packet *pkt, MemoryPort *memoryPort, bool isCpuSide)
+{
+    if (isCpuSide)
+    {
+        probe(pkt, true);
+    }
+    else
+    {
+        if (pkt->isRespnse())
+            handleResponse(pkt);
+        else
+            snoopProbe(pkt, true);
+    }
+}
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::
+doFunctionalAccess(Packet *pkt, MemoryPort *memoryPort, bool isCpuSide)
+{
+    if (isCpuSide)
+    {
+        probe(pkt, false);
+    }
+    else
+    {
+        if (pkt->isRespnse())
+            handleResponse(pkt);
+        else
+            snoopProbe(pkt, false);
+    }
+}
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::
+recvStatusChange(Port::Status status, bool isCpuSide)
+{
+
+}
+
+
+template<class TagStore, class Buffering, class Coherence>
+Cache<TagStore,Buffering,Coherence>::
+Cache(const std::string &_name, HierParams *hier_params,
+      Cache<TagStore,Buffering,Coherence>::Params &params)
+    : BaseCache(_name, hier_params, params.baseParams),
+      prefetchAccess(params.prefetchAccess),
+      tags(params.tags), missQueue(params.missQueue),
+      coherence(params.coherence), prefetcher(params.prefetcher),
+      doCopy(params.doCopy), blockOnCopy(params.blockOnCopy)
+{
+    if (params.in == NULL) {
+        topLevelCache = true;
+    }
+    tags->setCache(this, params.out->width, params.out->clockRate);
+    tags->setPrefetcher(prefetcher);
+    missQueue->setCache(this);
+    missQueue->setPrefetcher(prefetcher);
+    coherence->setCache(this);
+    prefetcher->setCache(this);
+    prefetcher->setTags(tags);
+    prefetcher->setBuffer(missQueue);
+    invalidatePkt = new Packet;
+    invalidatePkt->cmd = Invalidate;
+}
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::regStats()
+{
+    BaseCache::regStats();
+    tags->regStats(name());
+    missQueue->regStats(name());
+    coherence->regStats(name());
+    prefetcher->regStats(name());
+}
+
+template<class TagStore, class Buffering, class Coherence>
+MemAccessResult
+Cache<TagStore,Buffering,Coherence>::access(Packet &pkt)
+{
+    MemDebug::cacheAccess(pkt);
+    BlkType *blk = NULL;
+    PacketList* writebacks;
+    int size = blkSize;
+    int lat = hitLatency;
+    if (prefetchAccess) {
+        //We are determining prefetches on access stream, call prefetcher
+        prefetcher->handleMiss(pkt, curTick);
+    }
+    if (!pkt->isUncacheable()) {
+        if (pkt->cmd.isInvalidate() && !pkt->cmd.isRead()
+            && !pkt->cmd.isWrite()) {
+            //Upgrade or Invalidate
+            //Look into what happens if two slave caches on bus
+            DPRINTF(Cache, "%s %d %x ? blk_addr: %x\n", pkt->cmd.toString(),
+                    pkt->req->asid, pkt->paddr & (((ULL(1))<<48)-1),
+                    pkt->paddr & ~((Addr)blkSize - 1));
+
+            //@todo Should this return latency have the hit latency in it?
+//         respond(pkt,curTick+lat);
+            pkt->flags |= SATISFIED;
+            return MA_HIT;
+        }
+        blk = tags->handleAccess(pkt, lat, writebacks);
+    } else {
+        size = pkt->size;
+    }
+    // If this is a block size write/hint (WH64) allocate the block here
+    // if the coherence protocol allows it.
+    /** @todo make the fast write alloc (wh64) work with coherence. */
+    /** @todo Do we want to do fast writes for writebacks as well? */
+    if (!blk && pkt->size >= blkSize && coherence->allowFastWrites() &&
+        (pkt->cmd == Write || pkt->cmd == WriteInvalidate) ) {
+        // not outstanding misses, can do this
+        MSHR* outstanding_miss = missQueue->findMSHR(pkt->paddr, pkt->req->asid);
+        if (pkt->cmd ==WriteInvalidate || !outstanding_miss) {
+            if (outstanding_miss) {
+                warn("WriteInv doing a fastallocate"
+                     "with an outstanding miss to the same address\n");
+            }
+            blk = tags->handleFill(NULL, pkt, BlkValid | BlkWritable,
+                                   writebacks);
+            ++fastWrites;
+        }
+    }
+    while (!writebacks.empty()) {
+        missQueue->doWriteback(writebacks.front());
+        writebacks.pop_front();
+    }
+    DPRINTF(Cache, "%s %d %x %s blk_addr: %x pc %x\n", pkt->cmd.toString(),
+            pkt->req->asid, pkt->paddr & (((ULL(1))<<48)-1), (blk) ? "hit" : "miss",
+            pkt->paddr & ~((Addr)blkSize - 1), pkt->pc);
+    if (blk) {
+        // Hit
+        hits[pkt->cmd.toIndex()][pkt->thread_num]++;
+        // clear dirty bit if write through
+        if (!pkt->cmd.isNoResponse())
+            respond(pkt, curTick+lat);
+        return MA_HIT;
+    }
+
+    // Miss
+    if (!pkt->isUncacheable()) {
+        misses[pkt->cmd.toIndex()][pkt->thread_num]++;
+        /** @todo Move miss count code into BaseCache */
+        if (missCount) {
+            --missCount;
+            if (missCount == 0)
+                new SimExitEvent("A cache reached the maximum miss count");
+        }
+    }
+    missQueue->handleMiss(pkt, size, curTick + hitLatency);
+    return MA_CACHE_MISS;
+}
+
+
+template<class TagStore, class Buffering, class Coherence>
+Packet *
+Cache<TagStore,Buffering,Coherence>::getPacket()
+{
+    Packet * pkt = missQueue->getPacket();
+    if (pkt) {
+        if (!pkt->isUncacheable()) {
+            if (pkt->cmd == Hard_Prefetch) misses[Hard_Prefetch][pkt->thread_num]++;
+            BlkType *blk = tags->findBlock(pkt);
+            Packet::Command cmd = coherence->getBusCmd(pkt->cmd,
+                                              (blk)? blk->status : 0);
+            missQueue->setBusCmd(pkt, cmd);
+        }
+    }
+
+    assert(!doMasterPktuest() || missQueue->havePending());
+    assert(!pkt || pkt->time <= curTick);
+    return pkt;
+}
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::sendResult(MemPktPtr &pkt, bool success)
+{
+    if (success) {
+        missQueue->markInService(pkt);
+          //Temp Hack for UPGRADES
+          if (pkt->cmd == Upgrade) {
+              handleResponse(pkt);
+          }
+    } else if (pkt && !pkt->isUncacheable()) {
+        missQueue->restoreOrigCmd(pkt);
+    }
+}
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::handleResponse(Packet * &pkt)
+{
+    BlkType *blk = NULL;
+    if (pkt->senderState) {
+        MemDebug::cacheResponse(pkt);
+        DPRINTF(Cache, "Handling reponse to %x, blk addr: %x\n",pkt->paddr,
+                pkt->paddr & (((ULL(1))<<48)-1));
+
+        if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
+            blk = tags->findBlock(pkt);
+            CacheBlk::State old_state = (blk) ? blk->status : 0;
+            MemPktList writebacks;
+            blk = tags->handleFill(blk, pkt->senderState,
+                                   coherence->getNewState(pkt,old_state),
+                                   writebacks);
+            while (!writebacks.empty()) {
+                    missQueue->doWriteback(writebacks.front());
+            }
+        }
+        missQueue->handleResponse(pkt, curTick + hitLatency);
+    }
+}
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::pseudoFill(Addr addr, int asid)
+{
+    // Need to temporarily move this blk into MSHRs
+    MSHR *mshr = missQueue->allocateTargetList(addr, asid);
+    int lat;
+    PacketList* dummy;
+    // Read the data into the mshr
+    BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
+    assert(dummy.empty());
+    assert(mshr->pkt->isSatisfied());
+    // can overload order since it isn't used on non pending blocks
+    mshr->order = blk->status;
+    // temporarily remove the block from the cache.
+    tags->invalidateBlk(addr, asid);
+}
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::pseudoFill(MSHR *mshr)
+{
+    // Need to temporarily move this blk into MSHRs
+    assert(mshr->pkt->cmd == Read);
+    int lat;
+    PacketList* dummy;
+    // Read the data into the mshr
+    BlkType *blk = tags->handleAccess(mshr->pkt, lat, dummy, false);
+    assert(dummy.empty());
+    assert(mshr->pkt->isSatisfied());
+    // can overload order since it isn't used on non pending blocks
+    mshr->order = blk->status;
+    // temporarily remove the block from the cache.
+    tags->invalidateBlk(mshr->pkt->paddr, mshr->pkt->req->asid);
+}
+
+
+template<class TagStore, class Buffering, class Coherence>
+Packet *
+Cache<TagStore,Buffering,Coherence>::getCoherenceReq()
+{
+    return coherence->getPacket();
+}
+
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::snoop(Packet * &pkt)
+{
+    Addr blk_addr = pkt->paddr & ~(Addr(blkSize-1));
+    BlkType *blk = tags->findBlock(pkt);
+    MSHR *mshr = missQueue->findMSHR(blk_addr, pkt->req->asid);
+    if (isTopLevel() && coherence->hasProtocol()) { //@todo Move this into handle bus req
+        //If we find an mshr, and it is in service, we need to NACK or invalidate
+        if (mshr) {
+            if (mshr->inService) {
+                if ((mshr->pkt->cmd.isInvalidate() || !mshr->pkt->isCacheFill())
+                    && (pkt->cmd != Invalidate && pkt->cmd != WriteInvalidate)) {
+                    //If the outstanding request was an invalidate (upgrade,readex,..)
+                    //Then we need to ACK the request until we get the data
+                    //Also NACK if the outstanding request is not a cachefill (writeback)
+                    pkt->flags |= NACKED_LINE;
+                    return;
+                }
+                else {
+                    //The supplier will be someone else, because we are waiting for
+                    //the data.  This should cause this cache to be forced to go to
+                    //the shared state, not the exclusive even though the shared line
+                    //won't be asserted.  But for now we will just invlidate ourselves
+                    //and allow the other cache to go into the exclusive state.
+                    //@todo Make it so a read to a pending read doesn't invalidate.
+                    //@todo Make it so that a read to a pending read can't be exclusive now.
+
+                    //Set the address so find match works
+                    invalidatePkt->paddr = pkt->paddr;
+
+                    //Append the invalidate on
+                    missQueue->addTarget(mshr,invalidatePkt);
+                    DPRINTF(Cache, "Appending Invalidate to blk_addr: %x\n", pkt->paddr & (((ULL(1))<<48)-1));
+                    return;
+                }
+            }
+        }
+        //We also need to check the writeback buffers and handle those
+        std::vector<MSHR *> writebacks;
+        if (missQueue->findWrites(blk_addr, pkt->req->asid, writebacks)) {
+            DPRINTF(Cache, "Snoop hit in writeback to blk_addr: %x\n", pkt->paddr & (((ULL(1))<<48)-1));
+
+            //Look through writebacks for any non-uncachable writes, use that
+            for (int i=0; i<writebacks.size(); i++) {
+                mshr = writebacks[i];
+
+                if (!mshr->pkt->isUncacheable()) {
+                    if (pkt->cmd.isRead()) {
+                        //Only Upgrades don't get here
+                        //Supply the data
+                        pkt->flags |= SATISFIED;
+
+                        //If we are in an exclusive protocol, make it ask again
+                        //to get write permissions (upgrade), signal shared
+                        pkt->flags |= SHARED_LINE;
+
+                        if (doData()) {
+                            assert(pkt->cmd.isRead());
+
+                            assert(pkt->offset < blkSize);
+                            assert(pkt->size <= blkSize);
+                            assert(pkt->offset + pkt->size <=blkSize);
+                            memcpy(pkt->data, mshr->pkt->data + pkt->offset, pkt->size);
+                        }
+                        respondToSnoop(pkt);
+                    }
+
+                    if (pkt->cmd.isInvalidate()) {
+                        //This must be an upgrade or other cache will take ownership
+                        missQueue->markInService(mshr->pkt);
+                    }
+                    return;
+                }
+            }
+        }
+    }
+    CacheBlk::State new_state;
+    bool satisfy = coherence->handleBusRequest(pkt,blk,mshr, new_state);
+    if (satisfy) {
+        tags->handleSnoop(blk, new_state, pkt);
+        respondToSnoop(pkt);
+        return;
+    }
+    tags->handleSnoop(blk, new_state);
+}
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::snoopResponse(Packet * &pkt)
+{
+    //Need to handle the response, if NACKED
+    if (pkt->isNacked()) {
+        //Need to mark it as not in service, and retry for bus
+        assert(0); //Yeah, we saw a NACK come through
+
+        //For now this should never get called, we return false when we see a NACK
+        //instead, by doing this we allow the bus_blocked mechanism to handle the retry
+        //For now it retrys in just 2 cycles, need to figure out how to change that
+        //Eventually we will want to also have success come in as a parameter
+        //Need to make sure that we handle the functionality that happens on successufl
+        //return of the sendAddr function
+    }
+}
+
+template<class TagStore, class Buffering, class Coherence>
+void
+Cache<TagStore,Buffering,Coherence>::invalidateBlk(Addr addr, int asid)
+{
+    tags->invalidateBlk(addr,asid);
+}
+
+
+/**
+ * @todo Fix to not assume write allocate
+ */
+template<class TagStore, class Buffering, class Coherence>
+Tick
+Cache<TagStore,Buffering,Coherence>::probe(Packet * &pkt, bool update)
+{
+    MemDebug::cacheProbe(pkt);
+
+    if (!pkt->isUncacheable()) {
+        if (pkt->cmd.isInvalidate() && !pkt->cmd.isRead()
+            && !pkt->cmd.isWrite()) {
+            //Upgrade or Invalidate, satisfy it, don't forward
+            DPRINTF(Cache, "%s %d %x ? blk_addr: %x\n", pkt->cmd.toString(),
+                    pkt->req->asid, pkt->paddr & (((ULL(1))<<48)-1),
+                    pkt->paddr & ~((Addr)blkSize - 1));
+            pkt->flags |= SATISFIED;
+            return 0;
+        }
+    }
+
+    if (!update && !doData()) {
+        // Nothing to do here
+        return mi->sendProbe(pkt,update);
+    }
+
+    PacketList* writebacks;
+    int lat;
+    BlkType *blk = tags->handleAccess(pkt, lat, writebacks, update);
+
+    if (!blk) {
+        // Need to check for outstanding misses and writes
+        Addr blk_addr = pkt->paddr & ~(blkSize - 1);
+
+        // There can only be one matching outstanding miss.
+        MSHR* mshr = missQueue->findMSHR(blk_addr, pkt->req->asid);
+
+        // There can be many matching outstanding writes.
+        vector<MSHR*> writes;
+        missQueue->findWrites(blk_addr, pkt->req->asid, writes);
+
+        if (!update) {
+            mi->sendProbe(pkt, update);
+            // Check for data in MSHR and writebuffer.
+            if (mshr) {
+                warn("Found outstanding miss on an non-update probe");
+                MSHR::TargetList *targets = mshr->getTargetList();
+                MSHR::TargetList::iterator i = targets->begin();
+                MSHR::TargetList::iterator end = targets->end();
+                for (; i != end; ++i) {
+                    Packet * target = *i;
+                    // If the target contains data, and it overlaps the
+                    // probed request, need to update data
+                    if (target->cmd.isWrite() && target->overlaps(pkt)) {
+                        uint8_t* pkt_data;
+                        uint8_t* write_data;
+                        int data_size;
+                        if (target->paddr < pkt->paddr) {
+                            int offset = pkt->paddr - target->paddr;
+                            pkt_data = pkt->data;
+                            write_data = target->data + offset;
+                            data_size = target->size - offset;
+                            assert(data_size > 0);
+                            if (data_size > pkt->size)
+                                data_size = pkt->size;
+                        } else {
+                            int offset = target->paddr - pkt->paddr;
+                            pkt_data = pkt->data + offset;
+                            write_data = target->data;
+                            data_size = pkt->size - offset;
+                            assert(data_size > pkt->size);
+                            if (data_size > target->size)
+                                data_size = target->size;
+                        }
+
+                        if (pkt->cmd.isWrite()) {
+                            memcpy(pkt_data, write_data, data_size);
+                        } else {
+                            memcpy(write_data, pkt_data, data_size);
+                        }
+                    }
+                }
+            }
+            for (int i = 0; i < writes.size(); ++i) {
+                Packet * write = writes[i]->pkt;
+                if (write->overlaps(pkt)) {
+                    warn("Found outstanding write on an non-update probe");
+                    uint8_t* pkt_data;
+                    uint8_t* write_data;
+                    int data_size;
+                    if (write->paddr < pkt->paddr) {
+                        int offset = pkt->paddr - write->paddr;
+                        pkt_data = pkt->data;
+                        write_data = write->data + offset;
+                        data_size = write->size - offset;
+                        assert(data_size > 0);
+                        if (data_size > pkt->size)
+                            data_size = pkt->size;
+                    } else {
+                        int offset = write->paddr - pkt->paddr;
+                        pkt_data = pkt->data + offset;
+                        write_data = write->data;
+                        data_size = pkt->size - offset;
+                        assert(data_size > pkt->size);
+                        if (data_size > write->size)
+                            data_size = write->size;
+                    }
+
+                    if (pkt->cmd.isWrite()) {
+                        memcpy(pkt_data, write_data, data_size);
+                    } else {
+                        memcpy(write_data, pkt_data, data_size);
+                    }
+
+                }
+            }
+            return 0;
+        } else {
+            // update the cache state and statistics
+            if (mshr || !writes.empty()){
+                // Can't handle it, return pktuest unsatisfied.
+                return 0;
+            }
+            if (!pkt->isUncacheable()) {
+                // Fetch the cache block to fill
+                Packet * busPkt = new MemPkt();
+                busPkt->paddr = blk_addr;
+                busPkt->size = blkSize;
+                busPkt->data = new uint8_t[blkSize];
+
+                BlkType *blk = tags->findBlock(pkt);
+                busPkt->cmd = coherence->getBusCmd(pkt->cmd,
+                                                   (blk)? blk->status : 0);
+
+                busPkt->req->asid = pkt->req->asid;
+                busPkt->xc = pkt->xc;
+                busPkt->thread_num = pkt->thread_num;
+                busPkt->time = curTick;
+
+                lat = mi->sendProbe(busPkt, update);
+
+                if (!busPkt->isSatisfied()) {
+                    // blocked at a higher level, just return
+                    return 0;
+                }
+
+                misses[pkt->cmd.toIndex()][pkt->thread_num]++;
+
+                CacheBlk::State old_state = (blk) ? blk->status : 0;
+                tags->handleFill(blk, busPkt,
+                                 coherence->getNewState(busPkt, old_state),
+                                 writebacks, pkt);
+                // Handle writebacks if needed
+                while (!writebacks.empty()){
+                    mi->sendProbe(writebacks.front(), update);
+                    writebacks.pop_front();
+                }
+                return lat + hitLatency;
+            } else {
+                return mi->sendProbe(pkt,update);
+            }
+        }
+    } else {
+        // There was a cache hit.
+        // Handle writebacks if needed
+        while (!writebacks.empty()){
+            mi->sendProbe(writebacks.front(), update);
+            writebacks.pop_front();
+        }
+
+        if (update) {
+            hits[pkt->cmd.toIndex()][pkt->thread_num]++;
+        } else if (pkt->cmd.isWrite()) {
+            // Still need to change data in all locations.
+            return mi->sendProbe(pkt, update);
+        }
+        return curTick + lat;
+    }
+    fatal("Probe not handled.\n");
+    return 0;
+}
+
+template<class TagStore, class Buffering, class Coherence>
+Tick
+Cache<TagStore,Buffering,Coherence>::snoopProbe(MemPktPtr &pkt, bool update)
+{
+    Addr blk_addr = pkt->paddr & ~(Addr(blkSize-1));
+    BlkType *blk = tags->findBlock(pkt);
+    MSHR *mshr = missQueue->findMSHR(blk_addr, pkt->req->asid);
+    CacheBlk::State new_state = 0;
+    bool satisfy = coherence->handleBusPktuest(pkt,blk,mshr, new_state);
+    if (satisfy) {
+        tags->handleSnoop(blk, new_state, pkt);
+        return hitLatency;
+    }
+    tags->handleSnoop(blk, new_state);
+    return 0;
+}
+
diff --git a/src/mem/cache/coherence/coherence_protocol.cc b/src/mem/cache/coherence/coherence_protocol.cc
new file mode 100644 (file)
index 0000000..107fd25
--- /dev/null
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Steve Reinhardt
+ *          Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Definitions of CoherenceProtocol.
+ */
+
+#include <string>
+
+#include "base/misc.hh"
+#include "mem/cache/miss/mshr.hh"
+#include "mem/cache/cache.hh"
+#include "mem/cache/coherence/coherence_protocol.hh"
+#include "sim/builder.hh"
+
+using namespace std;
+
+
+CoherenceProtocol::StateTransition::StateTransition()
+    : busCmd(InvalidCmd), newState(-1), snoopFunc(invalidTransition)
+{
+}
+
+
+void
+CoherenceProtocol::regStats()
+{
+    // Even though we count all the possible transitions in the
+    // requestCount and snoopCount arrays, most of these are invalid,
+    // so we just select the interesting ones to print here.
+
+    requestCount[Invalid][Read]
+        .name(name() + ".read_invalid")
+        .desc("read misses to invalid blocks")
+        ;
+
+    requestCount[Invalid][Write]
+        .name(name() +".write_invalid")
+        .desc("write misses to invalid blocks")
+        ;
+
+    requestCount[Invalid][Soft_Prefetch]
+        .name(name() +".swpf_invalid")
+        .desc("soft prefetch misses to invalid blocks")
+        ;
+
+    requestCount[Invalid][Hard_Prefetch]
+        .name(name() +".hwpf_invalid")
+        .desc("hard prefetch misses to invalid blocks")
+        ;
+
+    requestCount[Shared][Write]
+        .name(name() + ".write_shared")
+        .desc("write misses to shared blocks")
+        ;
+
+    requestCount[Owned][Write]
+        .name(name() + ".write_owned")
+        .desc("write misses to owned blocks")
+        ;
+
+    snoopCount[Shared][Read]
+        .name(name() + ".snoop_read_shared")
+        .desc("read snoops on shared blocks")
+        ;
+
+    snoopCount[Shared][ReadEx]
+        .name(name() + ".snoop_readex_shared")
+        .desc("readEx snoops on shared blocks")
+        ;
+
+    snoopCount[Shared][Upgrade]
+        .name(name() + ".snoop_upgrade_shared")
+        .desc("upgradee snoops on shared blocks")
+        ;
+
+    snoopCount[Modified][Read]
+        .name(name() + ".snoop_read_modified")
+        .desc("read snoops on modified blocks")
+        ;
+
+    snoopCount[Modified][ReadEx]
+        .name(name() + ".snoop_readex_modified")
+        .desc("readEx snoops on modified blocks")
+        ;
+
+    snoopCount[Owned][Read]
+        .name(name() + ".snoop_read_owned")
+        .desc("read snoops on owned blocks")
+        ;
+
+    snoopCount[Owned][ReadEx]
+        .name(name() + ".snoop_readex_owned")
+        .desc("readEx snoops on owned blocks")
+        ;
+
+    snoopCount[Owned][Upgrade]
+        .name(name() + ".snoop_upgrade_owned")
+        .desc("upgrade snoops on owned blocks")
+        ;
+
+    snoopCount[Exclusive][Read]
+        .name(name() + ".snoop_read_exclusive")
+        .desc("read snoops on exclusive blocks")
+        ;
+
+    snoopCount[Exclusive][ReadEx]
+        .name(name() + ".snoop_readex_exclusive")
+        .desc("readEx snoops on exclusive blocks")
+        ;
+
+    snoopCount[Shared][Invalidate]
+        .name(name() + ".snoop_inv_shared")
+        .desc("Invalidate snoops on shared blocks")
+        ;
+
+    snoopCount[Owned][Invalidate]
+        .name(name() + ".snoop_inv_owned")
+        .desc("Invalidate snoops on owned blocks")
+        ;
+
+    snoopCount[Exclusive][Invalidate]
+        .name(name() + ".snoop_inv_exclusive")
+        .desc("Invalidate snoops on exclusive blocks")
+        ;
+
+    snoopCount[Modified][Invalidate]
+        .name(name() + ".snoop_inv_modified")
+        .desc("Invalidate snoops on modified blocks")
+        ;
+
+    snoopCount[Invalid][Invalidate]
+        .name(name() + ".snoop_inv_invalid")
+        .desc("Invalidate snoops on invalid blocks")
+        ;
+
+    snoopCount[Shared][WriteInvalidate]
+        .name(name() + ".snoop_writeinv_shared")
+        .desc("WriteInvalidate snoops on shared blocks")
+        ;
+
+    snoopCount[Owned][WriteInvalidate]
+        .name(name() + ".snoop_writeinv_owned")
+        .desc("WriteInvalidate snoops on owned blocks")
+        ;
+
+    snoopCount[Exclusive][WriteInvalidate]
+        .name(name() + ".snoop_writeinv_exclusive")
+        .desc("WriteInvalidate snoops on exclusive blocks")
+        ;
+
+    snoopCount[Modified][WriteInvalidate]
+        .name(name() + ".snoop_writeinv_modified")
+        .desc("WriteInvalidate snoops on modified blocks")
+        ;
+
+    snoopCount[Invalid][WriteInvalidate]
+        .name(name() + ".snoop_writeinv_invalid")
+        .desc("WriteInvalidate snoops on invalid blocks")
+        ;
+}
+
+
+bool
+CoherenceProtocol::invalidateTrans(BaseCache *cache, Packet * &pkt,
+                                   CacheBlk *blk, MSHR *mshr,
+                                   CacheBlk::State & new_state)
+{
+    // invalidate the block
+    new_state = (blk->status & ~stateMask) | Invalid;
+    return false;
+}
+
+
+bool
+CoherenceProtocol::supplyTrans(BaseCache *cache, Packet * &pkt,
+                               CacheBlk *blk,
+                               MSHR *mshr,
+                               CacheBlk::State & new_state
+                               )
+{
+    return true;
+}
+
+
+bool
+CoherenceProtocol::supplyAndGotoSharedTrans(BaseCache *cache, Packet * &pkt,
+                                            CacheBlk *blk,
+                                            MSHR *mshr,
+                                            CacheBlk::State & new_state)
+{
+    new_state = (blk->status & ~stateMask) | Shared;
+    pkt->flags |= SHARED_LINE;
+    return supplyTrans(cache, pkt, blk, mshr, new_state);
+}
+
+
+bool
+CoherenceProtocol::supplyAndGotoOwnedTrans(BaseCache *cache, Packet * &pkt,
+                                           CacheBlk *blk,
+                                           MSHR *mshr,
+                                           CacheBlk::State & new_state)
+{
+    new_state = (blk->status & ~stateMask) | Owned;
+    pkt->flags |= SHARED_LINE;
+    return supplyTrans(cache, pkt, blk, mshr, new_state);
+}
+
+
+bool
+CoherenceProtocol::supplyAndInvalidateTrans(BaseCache *cache, Packet * &pkt,
+                                            CacheBlk *blk,
+                                            MSHR *mshr,
+                                            CacheBlk::State & new_state)
+{
+    new_state = (blk->status & ~stateMask) | Invalid;
+    return supplyTrans(cache, pkt, blk, mshr, new_state);
+}
+
+bool
+CoherenceProtocol::assertShared(BaseCache *cache, Packet * &pkt,
+                                            CacheBlk *blk,
+                                            MSHR *mshr,
+                                            CacheBlk::State & new_state)
+{
+    new_state = (blk->status & ~stateMask) | Shared;
+    pkt->flags |= SHARED_LINE;
+    return false;
+}
+
+CoherenceProtocol::CoherenceProtocol(const string &name,
+                                     const string &protocol,
+                                     const bool doUpgrades)
+    : SimObject(name)
+{
+    if ((protocol == "mosi" || protocol == "moesi") && !doUpgrades) {
+        cerr << "CoherenceProtocol: ownership protocols require upgrade transactions"
+             << "(write miss on owned block generates ReadExcl, which will clobber dirty block)"
+             << endl;
+        fatal("");
+    }
+
+    Packet::CommandEnum writeToSharedCmd = doUpgrades ? Upgrade : ReadEx;
+
+//@todo add in hardware prefetch to this list
+    if (protocol == "msi") {
+        // incoming requests: specify outgoing bus request
+        transitionTable[Invalid][Read].onRequest(Read);
+        transitionTable[Invalid][Write].onRequest(ReadEx);
+        transitionTable[Shared][Write].onRequest(writeToSharedCmd);
+        //Prefetching causes a read
+        transitionTable[Invalid][Soft_Prefetch].onRequest(Read);
+        transitionTable[Invalid][Hard_Prefetch].onRequest(Read);
+
+        // on response to given request: specify new state
+        transitionTable[Invalid][Read].onResponse(Shared);
+        transitionTable[Invalid][ReadEx].onResponse(Modified);
+        transitionTable[Shared][writeToSharedCmd].onResponse(Modified);
+
+        // bus snoop transition functions
+        transitionTable[Invalid][Read].onSnoop(nullTransition);
+        transitionTable[Invalid][ReadEx].onSnoop(nullTransition);
+        transitionTable[Shared][Read].onSnoop(nullTransition);
+        transitionTable[Shared][ReadEx].onSnoop(invalidateTrans);
+        transitionTable[Modified][ReadEx].onSnoop(supplyAndInvalidateTrans);
+        transitionTable[Modified][Read].onSnoop(supplyAndGotoSharedTrans);
+        //Tansitions on seeing a DMA (writeInv(samelevel) or DMAInv)
+        transitionTable[Invalid][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Shared][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Modified][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Invalid][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Shared][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Modified][WriteInvalidate].onSnoop(invalidateTrans);
+
+        if (doUpgrades) {
+            transitionTable[Invalid][Upgrade].onSnoop(nullTransition);
+            transitionTable[Shared][Upgrade].onSnoop(invalidateTrans);
+        }
+    }
+
+    else if(protocol == "mesi") {
+        // incoming requests: specify outgoing bus request
+        transitionTable[Invalid][Read].onRequest(Read);
+        transitionTable[Invalid][Write].onRequest(ReadEx);
+        transitionTable[Shared][Write].onRequest(writeToSharedCmd);
+        //Prefetching causes a read
+        transitionTable[Invalid][Soft_Prefetch].onRequest(Read);
+        transitionTable[Invalid][Hard_Prefetch].onRequest(Read);
+
+        // on response to given request: specify new state
+        transitionTable[Invalid][Read].onResponse(Exclusive);
+        //It will move into shared if the shared line is asserted in the
+        //getNewState function
+        transitionTable[Invalid][ReadEx].onResponse(Modified);
+        transitionTable[Shared][writeToSharedCmd].onResponse(Modified);
+
+        // bus snoop transition functions
+        transitionTable[Invalid][Read].onSnoop(nullTransition);
+        transitionTable[Invalid][ReadEx].onSnoop(nullTransition);
+        transitionTable[Shared][Read].onSnoop(assertShared);
+        transitionTable[Shared][ReadEx].onSnoop(invalidateTrans);
+        transitionTable[Exclusive][Read].onSnoop(assertShared);
+        transitionTable[Exclusive][ReadEx].onSnoop(invalidateTrans);
+        transitionTable[Modified][ReadEx].onSnoop(supplyAndInvalidateTrans);
+        transitionTable[Modified][Read].onSnoop(supplyAndGotoSharedTrans);
+        //Tansitions on seeing a DMA (writeInv(samelevel) or DMAInv)
+        transitionTable[Invalid][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Shared][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Modified][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Exclusive][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Invalid][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Shared][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Modified][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Exclusive][WriteInvalidate].onSnoop(invalidateTrans);
+
+        if (doUpgrades) {
+            transitionTable[Invalid][Upgrade].onSnoop(nullTransition);
+            transitionTable[Shared][Upgrade].onSnoop(invalidateTrans);
+        }
+    }
+
+    else if(protocol == "mosi") {
+        // incoming requests: specify outgoing bus request
+        transitionTable[Invalid][Read].onRequest(Read);
+        transitionTable[Invalid][Write].onRequest(ReadEx);
+        transitionTable[Shared][Write].onRequest(writeToSharedCmd);
+        transitionTable[Owned][Write].onRequest(writeToSharedCmd);
+        //Prefetching causes a read
+        transitionTable[Invalid][Soft_Prefetch].onRequest(Read);
+        transitionTable[Invalid][Hard_Prefetch].onRequest(Read);
+
+        // on response to given request: specify new state
+        transitionTable[Invalid][Read].onResponse(Shared);
+        transitionTable[Invalid][ReadEx].onResponse(Modified);
+        transitionTable[Shared][writeToSharedCmd].onResponse(Modified);
+        transitionTable[Owned][writeToSharedCmd].onResponse(Modified);
+
+        // bus snoop transition functions
+        transitionTable[Invalid][Read].onSnoop(nullTransition);
+        transitionTable[Invalid][ReadEx].onSnoop(nullTransition);
+        transitionTable[Invalid][Upgrade].onSnoop(nullTransition);
+        transitionTable[Shared][Read].onSnoop(assertShared);
+        transitionTable[Shared][ReadEx].onSnoop(invalidateTrans);
+        transitionTable[Shared][Upgrade].onSnoop(invalidateTrans);
+        transitionTable[Modified][ReadEx].onSnoop(supplyAndInvalidateTrans);
+        transitionTable[Modified][Read].onSnoop(supplyAndGotoOwnedTrans);
+        transitionTable[Owned][Read].onSnoop(supplyAndGotoOwnedTrans);
+        transitionTable[Owned][ReadEx].onSnoop(supplyAndInvalidateTrans);
+        transitionTable[Owned][Upgrade].onSnoop(invalidateTrans);
+        //Tansitions on seeing a DMA (writeInv(samelevel) or DMAInv)
+        transitionTable[Invalid][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Shared][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Modified][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Owned][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Invalid][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Shared][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Modified][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Owned][WriteInvalidate].onSnoop(invalidateTrans);
+    }
+
+    else if(protocol == "moesi") {
+        // incoming requests: specify outgoing bus request
+        transitionTable[Invalid][Read].onRequest(Read);
+        transitionTable[Invalid][Write].onRequest(ReadEx);
+        transitionTable[Shared][Write].onRequest(writeToSharedCmd);
+        transitionTable[Owned][Write].onRequest(writeToSharedCmd);
+        //Prefetching causes a read
+        transitionTable[Invalid][Soft_Prefetch].onRequest(Read);
+        transitionTable[Invalid][Hard_Prefetch].onRequest(Read);
+
+        // on response to given request: specify new state
+        transitionTable[Invalid][Read].onResponse(Exclusive);
+        //It will move into shared if the shared line is asserted in the
+        //getNewState function
+        transitionTable[Invalid][ReadEx].onResponse(Modified);
+        transitionTable[Shared][writeToSharedCmd].onResponse(Modified);
+        transitionTable[Owned][writeToSharedCmd].onResponse(Modified);
+
+        // bus snoop transition functions
+        transitionTable[Invalid][Read].onSnoop(nullTransition);
+        transitionTable[Invalid][ReadEx].onSnoop(nullTransition);
+        transitionTable[Invalid][Upgrade].onSnoop(nullTransition);
+        transitionTable[Shared][Read].onSnoop(assertShared);
+        transitionTable[Shared][ReadEx].onSnoop(invalidateTrans);
+        transitionTable[Shared][Upgrade].onSnoop(invalidateTrans);
+        transitionTable[Exclusive][Read].onSnoop(assertShared);
+        transitionTable[Exclusive][ReadEx].onSnoop(invalidateTrans);
+        transitionTable[Modified][Read].onSnoop(supplyAndGotoOwnedTrans);
+        transitionTable[Modified][ReadEx].onSnoop(supplyAndInvalidateTrans);
+        transitionTable[Owned][Read].onSnoop(supplyAndGotoOwnedTrans);
+        transitionTable[Owned][ReadEx].onSnoop(supplyAndInvalidateTrans);
+        transitionTable[Owned][Upgrade].onSnoop(invalidateTrans);
+        //Transitions on seeing a DMA (writeInv(samelevel) or DMAInv)
+        transitionTable[Invalid][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Shared][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Exclusive][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Modified][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Owned][Invalidate].onSnoop(invalidateTrans);
+        transitionTable[Invalid][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Shared][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Exclusive][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Modified][WriteInvalidate].onSnoop(invalidateTrans);
+        transitionTable[Owned][WriteInvalidate].onSnoop(invalidateTrans);
+    }
+
+    else {
+        cerr << "CoherenceProtocol: unrecognized protocol " << protocol
+             <<  endl;
+        fatal("");
+    }
+}
+
+
+Packet::Command
+CoherenceProtocol::getBusCmd(Packet::Command cmdIn, CacheBlk::State state,
+                             MSHR *mshr)
+{
+    state &= stateMask;
+    int cmd_idx = cmdIn.toIndex();
+
+    assert(0 <= state && state <= stateMax);
+    assert(0 <= cmd_idx && cmd_idx < NUM_MEM_CMDS);
+
+    Packet::Command cmdOut = transitionTable[state][cmd_idx].busCmd;
+
+    assert(cmdOut != InvalidCmd);
+
+    ++requestCount[state][cmd_idx];
+
+    return cmdOut;
+}
+
+
+CacheBlk::State
+CoherenceProtocol::getNewState(const Packet * &pkt, CacheBlk::State oldState)
+{
+    CacheBlk::State state = oldState & stateMask;
+    int cmd_idx = pkt->cmd.toIndex();
+
+    assert(0 <= state && state <= stateMax);
+    assert(0 <= cmd_idx && cmd_idx < NUM_MEM_CMDS);
+
+    CacheBlk::State newState = transitionTable[state][cmd_idx].newState;
+
+    //Check if it's exclusive and the shared line was asserted,
+    //then  goto shared instead
+    if (newState == Exclusive && (pkt->flags & SHARED_LINE)) {
+        newState = Shared;
+    }
+
+    assert(newState != -1);
+
+    //Make sure not to loose any other state information
+    newState = (oldState & ~stateMask) | newState;
+    return newState;
+}
+
+
+bool
+CoherenceProtocol::handleBusRequest(BaseCache *cache, Packet * &pkt,
+                                    CacheBlk *blk,
+                                    MSHR *mshr,
+                                    CacheBlk::State & new_state)
+{
+    if (blk == NULL) {
+        // nothing to do if we don't have a block
+        return false;
+    }
+
+    CacheBlk::State state = blk->status & stateMask;
+    int cmd_idx = pkt->cmd.toIndex();
+
+    assert(0 <= state && state <= stateMax);
+    assert(0 <= cmd_idx && cmd_idx < NUM_MEM_CMDS);
+
+//    assert(mshr == NULL); // can't currently handle outstanding requests
+    //Check first if MSHR, and also insure, if there is one, that it is not in service
+    assert(!mshr || mshr->inService == 0);
+    ++snoopCount[state][cmd_idx];
+
+    bool ret = transitionTable[state][cmd_idx].snoopFunc(cache, pkt, blk, mshr,
+                                                     new_state);
+
+
+
+    return ret;
+}
+
+bool
+CoherenceProtocol::nullTransition(BaseCache *cache, Packet * &pkt,
+                                  CacheBlk *blk, MSHR *mshr,
+                                  CacheBlk::State & new_state)
+{
+    // do nothing
+    if (blk)
+        new_state = blk->status;
+    return false;
+}
+
+
+bool
+CoherenceProtocol::invalidTransition(BaseCache *cache, Packet * &pkt,
+                                     CacheBlk *blk, MSHR *mshr,
+                                     CacheBlk::State & new_state)
+{
+    panic("Invalid transition");
+    return false;
+}
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+BEGIN_DECLARE_SIM_OBJECT_PARAMS(CoherenceProtocol)
+
+    Param<string> protocol;
+    Param<bool> do_upgrades;
+
+END_DECLARE_SIM_OBJECT_PARAMS(CoherenceProtocol)
+
+
+BEGIN_INIT_SIM_OBJECT_PARAMS(CoherenceProtocol)
+
+    INIT_PARAM(protocol, "name of coherence protocol"),
+    INIT_PARAM_DFLT(do_upgrades, "use upgrade transactions?", true)
+
+END_INIT_SIM_OBJECT_PARAMS(CoherenceProtocol)
+
+
+CREATE_SIM_OBJECT(CoherenceProtocol)
+{
+    return new CoherenceProtocol(getInstanceName(), protocol,
+                                 do_upgrades);
+}
+
+REGISTER_SIM_OBJECT("CoherenceProtocol", CoherenceProtocol)
+
+#endif // DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/coherence/coherence_protocol.hh b/src/mem/cache/coherence/coherence_protocol.hh
new file mode 100644 (file)
index 0000000..4f65205
--- /dev/null
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Steve Reinhardt
+ */
+
+/**
+ * @file
+ * Declaration of CoherenceProcotol a basic coherence policy.
+ */
+#ifndef __COHERENCE_PROTOCOL_HH__
+#define __COHERENCE_PROTOCOL_HH__
+
+#include <string>
+
+#include "sim/sim_object.hh"
+#include "mem/packet.hh"
+#include "mem/mem_cmd.hh"
+#include "mem/cache/cache_blk.hh"
+#include "base/statistics.hh"
+
+class BaseCache;
+class MSHR;
+
+/**
+ * A simple coherence policy for the memory hierarchy. Currently implements
+ * MSI, MESI, and MOESI protocols.
+ */
+class CoherenceProtocol : public SimObject
+{
+  public:
+    /**
+     * Contruct and initialize this policy.
+     * @param name The name of this policy.
+     * @param protocol The string representation of the protocol to use.
+     * @param doUpgrades True if bus upgrades should be used.
+     */
+    CoherenceProtocol(const std::string &name, const std::string &protocol,
+                      const bool doUpgrades);
+
+    /**
+     * Destructor.
+     */
+    virtual ~CoherenceProtocol() {};
+
+    /**
+     * Register statistics
+     */
+    virtual void regStats();
+
+    /**
+     * Get the proper bus command for the given command and status.
+     * @param cmd The request's command.
+     * @param status The current state of the cache block.
+     * @param mshr The MSHR matching the request.
+     * @return The proper bus command, as determined by the protocol.
+     */
+    Packet::Command getBusCmd(Packet::Command cmd, CacheBlk::State status,
+                     MSHR *mshr = NULL);
+
+    /**
+     * Return the proper state given the current state and the bus response.
+     * @param req The bus response.
+     * @param oldState The current block state.
+     * @return The new state.
+     */
+    CacheBlk::State getNewState(const Packet * &pkt,
+                                CacheBlk::State oldState);
+
+    /**
+     * Handle snooped bus requests.
+     * @param cache The cache that snooped the request.
+     * @param req The snooped bus request.
+     * @param blk The cache block corresponding to the request, if any.
+     * @param mshr The MSHR corresponding to the request, if any.
+     * @param new_state The new coherence state of the block.
+     * @return True if the request should be satisfied locally.
+     */
+    bool handleBusRequest(BaseCache *cache, Packet * &pkt, CacheBlk *blk,
+                          MSHR *mshr, CacheBlk::State &new_state);
+
+  protected:
+    /** Snoop function type. */
+    typedef bool (*SnoopFuncType)(BaseCache *, Packet *&, CacheBlk *,
+                                  MSHR *, CacheBlk::State&);
+
+    //
+    // Standard snoop transition functions
+    //
+
+    /**
+     * Do nothing transition.
+     */
+    static bool nullTransition(BaseCache *, Packet *&, CacheBlk *,
+                               MSHR *, CacheBlk::State&);
+
+    /**
+     * Invalid transition, basically panic.
+     */
+    static bool invalidTransition(BaseCache *, Packet *&, CacheBlk *,
+                                  MSHR *, CacheBlk::State&);
+
+    /**
+     * Invalidate block, move to Invalid state.
+     */
+    static bool invalidateTrans(BaseCache *, Packet *&, CacheBlk *,
+                                MSHR *, CacheBlk::State&);
+
+    /**
+     * Supply data, no state transition.
+     */
+    static bool supplyTrans(BaseCache *, Packet *&, CacheBlk *,
+                            MSHR *, CacheBlk::State&);
+
+    /**
+     * Supply data and go to Shared state.
+     */
+    static bool supplyAndGotoSharedTrans(BaseCache *, Packet *&, CacheBlk *,
+                                         MSHR *, CacheBlk::State&);
+
+    /**
+     * Supply data and go to Owned state.
+     */
+    static bool supplyAndGotoOwnedTrans(BaseCache *, Packet *&, CacheBlk *,
+                                        MSHR *, CacheBlk::State&);
+
+    /**
+     * Invalidate block, supply data, and go to Invalid state.
+     */
+    static bool supplyAndInvalidateTrans(BaseCache *, Packet *&, CacheBlk *,
+                                         MSHR *, CacheBlk::State&);
+
+    /**
+     * Assert the shared line for a block that is shared/exclusive.
+     */
+    static bool assertShared(BaseCache *, Packet *&, CacheBlk *,
+                                         MSHR *, CacheBlk::State&);
+
+    /**
+     * Definition of protocol state transitions.
+     */
+    class StateTransition
+    {
+        friend class CoherenceProtocol;
+
+        /** The bus command of this transition. */
+        Packet::Command busCmd;
+        /** The state to transition to. */
+        int newState;
+        /** The snoop function for this transition. */
+        SnoopFuncType snoopFunc;
+
+        /**
+         * Constructor, defaults to invalid transition.
+         */
+        StateTransition();
+
+        /**
+         * Initialize bus command.
+         * @param cmd The bus command to use.
+         */
+        void onRequest(Packet::Command cmd)
+        {
+            busCmd = cmd;
+        }
+
+        /**
+         * Set the transition state.
+         * @param s The new state.
+         */
+        void onResponse(CacheBlk::State s)
+        {
+            newState = s;
+        }
+
+        /**
+         * Initialize the snoop function.
+         * @param f The new snoop function.
+         */
+        void onSnoop(SnoopFuncType f)
+        {
+            snoopFunc = f;
+        }
+    };
+
+    friend class CoherenceProtocol::StateTransition;
+
+    /** Mask to select status bits relevant to coherence protocol. */
+    const static CacheBlk::State
+        stateMask = BlkValid | BlkWritable | BlkDirty;
+
+    /** The Modified (M) state. */
+    const static CacheBlk::State
+        Modified = BlkValid | BlkWritable | BlkDirty;
+    /** The Owned (O) state. */
+    const static CacheBlk::State
+        Owned = BlkValid | BlkDirty;
+    /** The Exclusive (E) state. */
+    const static CacheBlk::State
+        Exclusive = BlkValid | BlkWritable;
+    /** The Shared (S) state. */
+    const static CacheBlk::State
+        Shared = BlkValid;
+    /** The Invalid (I) state. */
+    const static CacheBlk::State
+        Invalid = 0;
+
+    /**
+     * Maximum state encoding value (used to size transition lookup
+     * table).  Could be more than number of states, depends on
+     * encoding of status bits.
+     */
+    const static int stateMax = stateMask;
+
+    /**
+     * The table of all possible transitions, organized by starting state and
+     * request command.
+     */
+    StateTransition transitionTable[stateMax+1][NUM_MEM_CMDS];
+
+    /**
+     * @addtogroup CoherenceStatistics
+     * @{
+     */
+    /**
+     * State accesses from parent cache.
+     */
+    Stats::Scalar<> requestCount[stateMax+1][NUM_MEM_CMDS];
+    /**
+     * State accesses from snooped requests.
+     */
+    Stats::Scalar<> snoopCount[stateMax+1][NUM_MEM_CMDS];
+    /**
+     * @}
+     */
+};
+
+#endif // __COHERENCE_PROTOCOL_HH__
diff --git a/src/mem/cache/coherence/simple_coherence.hh b/src/mem/cache/coherence/simple_coherence.hh
new file mode 100644 (file)
index 0000000..1956745
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declaration of a simple coherence policy.
+ */
+
+#ifndef __SIMPLE_COHERENCE_HH__
+#define __SIMPLE_COHERENCE_HH__
+
+#include <string>
+
+#include "mem/packet.hh"
+#include "mem/mem_cmd.hh"
+#include "mem/cache/cache_blk.hh"
+#include "mem/cache/miss/mshr_queue.hh"
+#include "mem/cache/coherence/coherence_protocol.hh"
+
+class BaseCache;
+
+/**
+ * A simple MP coherence policy. This policy assumes an atomic bus and only one
+ * level of cache.
+ */
+class SimpleCoherence
+{
+  protected:
+    /** Pointer to the parent cache. */
+    BaseCache *cache;
+    /** Pointer to the coherence protocol. */
+    CoherenceProtocol *protocol;
+
+  public:
+    /**
+     * Construct and initialize this coherence policy.
+     * @param _protocol The coherence protocol to use.
+     */
+    SimpleCoherence(CoherenceProtocol *_protocol)
+        : protocol(_protocol)
+    {
+    }
+
+    /**
+     * Set the pointer to the parent cache.
+     * @param _cache The parent cache.
+     */
+    void setCache(BaseCache *_cache)
+    {
+        cache = _cache;
+    }
+
+    /**
+     * Register statistics.
+     * @param name The name to prepend to stat descriptions.
+     */
+    void regStats(const std::string &name)
+    {
+    }
+
+    /**
+     * This policy does not forward invalidates, return NULL.
+     * @return NULL.
+     */
+    Packet * getPacket()
+    {
+        return NULL;
+    }
+
+    /**
+     * Return the proper state given the current state and the bus response.
+     * @param req The bus response.
+     * @param current The current block state.
+     * @return The new state.
+     */
+    CacheBlk::State getNewState(Packet * &pkt, CacheBlk::State current)
+    {
+        return protocol->getNewState(pkt, current);
+    }
+
+    /**
+     * Handle snooped bus requests.
+     * @param req The snooped bus request.
+     * @param blk The cache block corresponding to the request, if any.
+     * @param mshr The MSHR corresponding to the request, if any.
+     * @param new_state Return the new state for the block.
+     */
+    bool handleBusRequest(Packet * &pkt, CacheBlk *blk, MSHR *mshr,
+                          CacheBlk::State &new_state)
+    {
+//     assert(mshr == NULL);
+//Got rid of, there could be an MSHR, but it can't be in service
+        if (blk != NULL)
+        {
+            if (pkt->cmd != Writeback) {
+                return protocol->handleBusRequest(cache, pkt, blk, mshr,
+                                              new_state);
+            }
+            else { //It is a writeback, must be ownership protocol, just keep state
+                new_state = blk->status;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Get the proper bus command for the given command and status.
+     * @param cmd The request's command.
+     * @param state The current state of the cache block.
+     * @return The proper bus command, as determined by the protocol.
+     */
+    Packet::Command getBusCmd(Packet::Command &cmd, CacheBlk::State state)
+    {
+        if (cmd == Writeback) return Writeback;
+        return protocol->getBusCmd(cmd, state);
+    }
+
+    /**
+     * Return true if this coherence policy can handle fast cache writes.
+     */
+    bool allowFastWrites() { return false; }
+
+    bool hasProtocol() { return true; }
+};
+
+#endif //__SIMPLE_COHERENCE_HH__
+
+
+
+
+
+
+
+
diff --git a/src/mem/cache/coherence/uni_coherence.cc b/src/mem/cache/coherence/uni_coherence.cc
new file mode 100644 (file)
index 0000000..68a78e3
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+#include "mem/cache/coherence/uni_coherence.hh"
+#include "mem/cache/base_cache.hh"
+
+#include "base/trace.hh"
+
+using namespace std;
+
+UniCoherence::UniCoherence()
+    : cshrs(50)
+{
+}
+
+Packet *
+UniCoherence::getPacket()
+{
+    bool unblock = cshrs.isFull();
+    Packet * pkt = cshrs.getPkt();
+    cshrs.markInService(pkt->senderState);
+    if (!cshrs.havePending()) {
+        cache->clearSlaveRequest(Request_Coherence);
+    }
+    if (unblock) {
+        //since CSHRs are always used as buffers, should always get rid of one
+        assert(!cshrs.isFull());
+        cache->clearBlocked(Blocked_Coherence);
+    }
+    return pkt;
+}
+
+/**
+ * @todo add support for returning slave requests, not doing them here.
+ */
+bool
+UniCoherence::handleBusRequest(Packet * &pkt, CacheBlk *blk, MSHR *mshr,
+                               CacheBlk::State &new_state)
+{
+    new_state = 0;
+    if (pkt->cmd.isInvalidate()) {
+        DPRINTF(Cache, "snoop inval on blk %x (blk ptr %x)\n",
+                pkt->paddr, blk);
+        if (!cache->isTopLevel()) {
+            // Forward to other caches
+            Packet * tmp = new MemPkt();
+            tmp->cmd = Invalidate;
+            tmp->paddr = pkt->paddr;
+            tmp->size = pkt->size;
+            cshrs.allocate(tmp);
+            cache->setSlaveRequest(Request_Coherence, curTick);
+            if (cshrs.isFull()) {
+                cache->setBlockedForSnoop(Blocked_Coherence);
+            }
+        }
+    } else {
+        if (blk) {
+            new_state = blk->status;
+        }
+    }
+    return false;
+}
diff --git a/src/mem/cache/coherence/uni_coherence.hh b/src/mem/cache/coherence/uni_coherence.hh
new file mode 100644 (file)
index 0000000..b64f6c9
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+#ifndef __UNI_COHERENCE_HH__
+#define __UNI_COHERENCE_HH__
+
+#include "base/trace.hh"
+#include "mem/cache/cache_blk.hh"
+#include "mem/cache/miss/mshr_queue.hh"
+#include "mem/mem_cmd.hh"
+#include "mem/packet.hh"
+
+class BaseCache;
+
+class UniCoherence
+{
+  protected:
+    /** Buffers to hold forwarded invalidates. */
+    MSHRQueue cshrs;
+    /** Pointer to the parent cache. */
+    BaseCache *cache;
+
+  public:
+    /**
+     * Construct and initialize this coherence policy.
+     */
+    UniCoherence();
+
+    /**
+     * Set the pointer to the parent cache.
+     * @param _cache The parent cache.
+     */
+    void setCache(BaseCache *_cache)
+    {
+        cache = _cache;
+    }
+
+    /**
+     * Register statistics.
+     * @param name The name to prepend to stat descriptions.
+     */
+    void regStats(const std::string &name)
+    {
+    }
+
+    /**
+     * Return Read.
+     * @param cmd The request's command.
+     * @param state The current state of the cache block.
+     * @return The proper bus command, as determined by the protocol.
+     * @todo Make changes so writebacks don't get here.
+     */
+    Packet::Command getBusCmd(Packet::Command &cmd, CacheBlk::State state)
+    {
+        if (cmd == Hard_Prefetch && state)
+            warn("Trying to issue a prefetch to a block we already have\n");
+        if (cmd == Writeback)
+            return Writeback;
+        return Read;
+    }
+
+    /**
+     * Just return readable and writeable.
+     * @param req The bus response.
+     * @param current The current block state.
+     * @return The new state.
+     */
+    CacheBlk::State getNewState(Packet * &pkt, CacheBlk::State current)
+    {
+        if (pkt->senderState) //Blocking Buffers don't get mshrs
+        {
+            if (pkt->senderState->originalCmd == Hard_Prefetch) {
+                DPRINTF(HWPrefetch, "Marking a hardware prefetch as such in the state\n");
+                return BlkHWPrefetched | BlkValid | BlkWritable;
+            }
+            else {
+                return BlkValid | BlkWritable;
+            }
+        }
+        //@todo What about prefetching with blocking buffers
+        else
+            return BlkValid | BlkWritable;
+    }
+    /**
+     * Return outstanding invalidate to forward.
+     * @return The next invalidate to forward to lower levels of cache.
+     */
+    Packet * getPacket();
+
+    /**
+     * Handle snooped bus requests.
+     * @param req The snooped bus request.
+     * @param blk The cache block corresponding to the request, if any.
+     * @param mshr The MSHR corresponding to the request, if any.
+     * @param new_state The new coherence state of the block.
+     * @return True if the request should be satisfied locally.
+     */
+    bool handleBusRequest(Packet * &pkt, CacheBlk *blk, MSHR *mshr,
+                          CacheBlk::State &new_state);
+
+    /**
+     * Return true if this coherence policy can handle fast cache writes.
+     */
+    bool allowFastWrites() { return true; }
+
+    bool hasProtocol() { return false; }
+};
+
+#endif //__UNI_COHERENCE_HH__
diff --git a/src/mem/cache/miss/blocking_buffer.cc b/src/mem/cache/miss/blocking_buffer.cc
new file mode 100644 (file)
index 0000000..621855c
--- /dev/null
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Definitions of a simple buffer for a blocking cache.
+ */
+
+#include "cpu/exec_context.hh"
+#include "cpu/smt.hh" //for maxThreadsPerCPU
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/miss/blocking_buffer.hh"
+#include "mem/cache/prefetch/base_prefetcher.hh"
+#include "sim/eventq.hh" // for Event declaration.
+
+using namespace TheISA;
+
+/**
+ * @todo Move writebacks into shared BaseBuffer class.
+ */
+void
+BlockingBuffer::regStats(const std::string &name)
+{
+    using namespace Stats;
+    writebacks
+        .init(maxThreadsPerCPU)
+        .name(name + ".writebacks")
+        .desc("number of writebacks")
+        .flags(total)
+        ;
+}
+
+void
+BlockingBuffer::setCache(BaseCache *_cache)
+{
+    cache = _cache;
+    blkSize = cache->getBlockSize();
+}
+
+void
+BlockingBuffer::setPrefetcher(BasePrefetcher *_prefetcher)
+{
+    prefetcher = _prefetcher;
+}
+void
+BlockingBuffer::handleMiss(Packet * &pkt, int blk_size, Tick time)
+{
+    Addr blk_addr = pkt->paddr & ~(Addr)(blk_size - 1);
+    if (pkt->cmd.isWrite() && (pkt->isUncacheable() || !writeAllocate ||
+                               pkt->cmd.isNoResponse())) {
+        if (pkt->cmd.isNoResponse()) {
+            wb.allocateAsBuffer(pkt);
+        } else {
+            wb.allocate(pkt->cmd, blk_addr, pkt->req->asid, blk_size, pkt);
+        }
+        if (cache->doData()) {
+            memcpy(wb.pkt->data, pkt->data, blk_size);
+        }
+        cache->setBlocked(Blocked_NoWBBuffers);
+        cache->setMasterRequest(Request_WB, time);
+        return;
+    }
+
+    if (pkt->cmd.isNoResponse()) {
+        miss.allocateAsBuffer(pkt);
+    } else {
+        miss.allocate(pkt->cmd, blk_addr, pkt->req->asid, blk_size, pkt);
+    }
+    if (!pkt->isUncacheable()) {
+        miss.pkt->flags |= CACHE_LINE_FILL;
+    }
+    cache->setBlocked(Blocked_NoMSHRs);
+    cache->setMasterRequest(Request_MSHR, time);
+}
+
+Packet *
+BlockingBuffer::getPacket()
+{
+    if (miss.pkt && !miss.inService) {
+        return miss.pkt;
+    }
+    return wb.pkt;
+}
+
+void
+BlockingBuffer::setBusCmd(Packet * &pkt, Packet::Command cmd)
+{
+    MSHR *mshr = pkt->senderState;
+    mshr->originalCmd = pkt->cmd;
+    if (pkt->isCacheFill())
+        pkt->cmd = cmd;
+}
+
+void
+BlockingBuffer::restoreOrigCmd(Packet * &pkt)
+{
+    pkt->cmd = pkt->senderState->originalCmd;
+}
+
+void
+BlockingBuffer::markInService(Packet * &pkt)
+{
+    if (!pkt->isCacheFill() && pkt->cmd.isWrite()) {
+        // Forwarding a write/ writeback, don't need to change
+        // the command
+        assert(pkt->senderState == &wb);
+        cache->clearMasterRequest(Request_WB);
+        if (pkt->cmd.isNoResponse()) {
+            assert(wb.getNumTargets() == 0);
+            wb.deallocate();
+            cache->clearBlocked(Blocked_NoWBBuffers);
+        } else {
+            wb.inService = true;
+        }
+    } else {
+        assert(pkt->senderState == &miss);
+        cache->clearMasterRequest(Request_MSHR);
+        if (pkt->cmd.isNoResponse()) {
+            assert(miss.getNumTargets() == 0);
+            miss.deallocate();
+            cache->clearBlocked(Blocked_NoMSHRs);
+        } else {
+            //mark in service
+            miss.inService = true;
+        }
+    }
+}
+
+void
+BlockingBuffer::handleResponse(Packet * &pkt, Tick time)
+{
+    if (pkt->isCacheFill()) {
+        // targets were handled in the cache tags
+        assert(pkt->senderState == &miss);
+        miss.deallocate();
+        cache->clearBlocked(Blocked_NoMSHRs);
+    } else {
+        if (pkt->senderState->hasTargets()) {
+            // Should only have 1 target if we had any
+            assert(pkt->senderState->getNumTargets() == 1);
+            Packet * target = pkt->senderState->getTarget();
+            pkt->senderState->popTarget();
+            if (cache->doData() && pkt->cmd.isRead()) {
+                memcpy(target->data, pkt->data, target->size);
+            }
+            cache->respond(target, time);
+            assert(!pkt->senderState->hasTargets());
+        }
+
+        if (pkt->cmd.isWrite()) {
+            assert(pkt->senderState == &wb);
+            wb.deallocate();
+            cache->clearBlocked(Blocked_NoWBBuffers);
+        } else {
+            miss.deallocate();
+            cache->clearBlocked(Blocked_NoMSHRs);
+        }
+    }
+}
+
+void
+BlockingBuffer::squash(int thread_number)
+{
+    if (miss.threadNum == thread_number) {
+        Packet * target = miss.getTarget();
+        miss.popTarget();
+        assert(target->thread_num == thread_number);
+        if (target->completionEvent != NULL) {
+            delete target->completionEvent;
+        }
+        target = NULL;
+        assert(!miss.hasTargets());
+        miss.ntargets=0;
+        if (!miss.inService) {
+            miss.deallocate();
+            cache->clearBlocked(Blocked_NoMSHRs);
+            cache->clearMasterRequest(Request_MSHR);
+        }
+    }
+}
+
+void
+BlockingBuffer::doWriteback(Addr addr, int asid, ExecContext *xc,
+                            int size, uint8_t *data, bool compressed)
+{
+
+    // Generate request
+    Packet * pkt = new Packet();
+    pkt->paddr = addr;
+    pkt->req->asid = asid;
+    pkt->size = size;
+    pkt->data = new uint8_t[size];
+    if (data) {
+        memcpy(pkt->data, data, size);
+    }
+    /**
+     * @todo Need to find a way to charge the writeback to the "correct"
+     * thread.
+     */
+    pkt->xc = xc;
+    if (xc)
+        pkt->thread_num = xc->getThreadNum();
+    else
+        pkt->thread_num = 0;
+
+    pkt->cmd = Writeback;
+    if (compressed) {
+        pkt->flags |= COMPRESSED;
+    }
+
+    writebacks[pkt->thread_num]++;
+
+    wb.allocateAsBuffer(pkt);
+    cache->setMasterRequest(Request_WB, curTick);
+    cache->setBlocked(Blocked_NoWBBuffers);
+}
+
+
+
+void
+BlockingBuffer::doWriteback(Packet * &pkt)
+{
+    writebacks[pkt->thread_num]++;
+
+    wb.allocateAsBuffer(pkt);
+
+    // Since allocate as buffer copies the request,
+    // need to copy data here.
+    if (cache->doData()) {
+        memcpy(wb.pkt->data, pkt->data, pkt->size);
+    }
+    cache->setBlocked(Blocked_NoWBBuffers);
+    cache->setMasterRequest(Request_WB, curTick);
+}
diff --git a/src/mem/cache/miss/blocking_buffer.hh b/src/mem/cache/miss/blocking_buffer.hh
new file mode 100644 (file)
index 0000000..52256be
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declaration of a simple buffer for a blocking cache.
+ */
+
+#ifndef __BLOCKING_BUFFER_HH__
+#define __BLOCKING_BUFFER_HH__
+
+#include <vector>
+
+#include "mem/cache/miss/mshr.hh"
+#include "base/statistics.hh"
+
+class BaseCache;
+class BasePrefetcher;
+
+/**
+ * Miss and writeback storage for a blocking cache.
+ */
+class BlockingBuffer
+{
+protected:
+    /** Miss storage. */
+    MSHR miss;
+    /** WB storage. */
+    MSHR wb;
+
+    //Params
+
+    /** Allocate on write misses. */
+    const bool writeAllocate;
+
+    /** Pointer to the parent cache. */
+    BaseCache* cache;
+
+    BasePrefetcher* prefetcher;
+
+    /** Block size of the parent cache. */
+    int blkSize;
+
+    // Statistics
+    /**
+     * @addtogroup CacheStatistics
+     * @{
+     */
+    /** Number of blocks written back per thread. */
+    Stats::Vector<> writebacks;
+
+    /**
+     * @}
+     */
+
+public:
+    /**
+     * Builds and initializes this buffer.
+     * @param write_allocate If true, treat write misses the same as reads.
+     */
+    BlockingBuffer(bool write_allocate)
+        : writeAllocate(write_allocate)
+    {
+    }
+
+    /**
+     * Register statistics for this object.
+     * @param name The name of the parent cache.
+     */
+    void regStats(const std::string &name);
+
+    /**
+     * Called by the parent cache to set the back pointer.
+     * @param _cache A pointer to the parent cache.
+     */
+    void setCache(BaseCache *_cache);
+
+    void setPrefetcher(BasePrefetcher *_prefetcher);
+
+    /**
+     * Handle a cache miss properly. Requests the bus and marks the cache as
+     * blocked.
+     * @param req The request that missed in the cache.
+     * @param blk_size The block size of the cache.
+     * @param time The time the miss is detected.
+     */
+    void handleMiss(Packet * &pkt, int blk_size, Tick time);
+
+    /**
+     * Fetch the block for the given address and buffer the given target.
+     * @param addr The address to fetch.
+     * @param asid The address space of the address.
+     * @param blk_size The block size of the cache.
+     * @param time The time the miss is detected.
+     * @param target The target for the fetch.
+     */
+    MSHR* fetchBlock(Addr addr, int asid, int blk_size, Tick time,
+                     Packet * &target)
+    {
+        fatal("Unimplemented");
+    }
+
+    /**
+     * Selects a outstanding request to service.
+     * @return The request to service, NULL if none found.
+     */
+    Packet * getPacket();
+
+    /**
+     * Set the command to the given bus command.
+     * @param req The request to update.
+     * @param cmd The bus command to use.
+     */
+    void setBusCmd(Packet * &pkt, Packet::Command cmd);
+
+    /**
+     * Restore the original command in case of a bus transmission error.
+     * @param req The request to reset.
+     */
+    void restoreOrigCmd(Packet * &pkt);
+
+    /**
+     * Marks a request as in service (sent on the bus). This can have side
+     * effect since storage for no response commands is deallocated once they
+     * are successfully sent.
+     * @param req The request that was sent on the bus.
+     */
+    void markInService(Packet * &pkt);
+
+    /**
+     * Frees the resources of the request and unblock the cache.
+     * @param req The request that has been satisfied.
+     * @param time The time when the request is satisfied.
+     */
+    void handleResponse(Packet * &pkt, Tick time);
+
+    /**
+     * Removes all outstanding requests for a given thread number. If a request
+     * has been sent to the bus, this function removes all of its targets.
+     * @param thread_number The thread number of the requests to squash.
+     */
+    void squash(int thread_number);
+
+    /**
+     * Return the current number of outstanding misses.
+     * @return the number of outstanding misses.
+     */
+    int getMisses()
+    {
+        return miss.getNumTargets();
+    }
+
+    /**
+     * Searches for the supplied address in the miss "queue".
+     * @param addr The address to look for.
+     * @param asid The address space id.
+     * @return A pointer to miss if it matches.
+     */
+    MSHR* findMSHR(Addr addr, int asid)
+    {
+        if (miss.addr == addr && miss.pkt)
+            return &miss;
+        return NULL;
+    }
+
+    /**
+     * Searches for the supplied address in the write buffer.
+     * @param addr The address to look for.
+     * @param asid The address space id.
+     * @param writes List of pointers to the matching writes.
+     * @return True if there is a matching write.
+     */
+    bool findWrites(Addr addr, int asid, std::vector<MSHR*>& writes)
+    {
+        if (wb.addr == addr && wb.pkt) {
+            writes.push_back(&wb);
+            return true;
+        }
+        return false;
+    }
+
+
+
+    /**
+     * Perform a writeback of dirty data to the given address.
+     * @param addr The address to write to.
+     * @param asid The address space id.
+     * @param xc The execution context of the address space.
+     * @param size The number of bytes to write.
+     * @param data The data to write, can be NULL.
+     * @param compressed True if the data is compressed.
+     */
+    void doWriteback(Addr addr, int asid, ExecContext *xc,
+                     int size, uint8_t *data, bool compressed);
+
+    /**
+     * Perform a writeback request.
+     * @param req The writeback request.
+     */
+    void doWriteback(Packet * &pkt);
+
+    /**
+     * Returns true if there are outstanding requests.
+     * @return True if there are outstanding requests.
+     */
+    bool havePending()
+    {
+        return !miss.inService || !wb.inService;
+    }
+
+    /**
+     * Add a target to the given MSHR. This assumes it is in the miss queue.
+     * @param mshr The mshr to add a target to.
+     * @param req The target to add.
+     */
+    void addTarget(MSHR *mshr, Packet * &pkt)
+    {
+        fatal("Shouldn't call this on a blocking buffer.");
+    }
+
+    /**
+     * Dummy implmentation.
+     */
+    MSHR* allocateTargetList(Addr addr, int asid)
+    {
+        fatal("Unimplemented");
+    }
+};
+
+#endif // __BLOCKING_BUFFER_HH__
diff --git a/src/mem/cache/miss/miss_queue.cc b/src/mem/cache/miss/miss_queue.cc
new file mode 100644 (file)
index 0000000..7902fbc
--- /dev/null
@@ -0,0 +1,736 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Miss and writeback queue definitions.
+ */
+
+#include "cpu/exec_context.hh"
+#include "cpu/smt.hh" //for maxThreadsPerCPU
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/miss/miss_queue.hh"
+#include "mem/cache/prefetch/base_prefetcher.hh"
+
+using namespace std;
+
+// simple constructor
+/**
+ * @todo Remove the +16 from the write buffer constructor once we handle
+ * stalling on writebacks do to compression writes.
+ */
+MissQueue::MissQueue(int numMSHRs, int numTargets, int write_buffers,
+                     bool write_allocate, bool prefetch_miss)
+    : mq(numMSHRs, 4), wb(write_buffers,numMSHRs+1000), numMSHR(numMSHRs),
+      numTarget(numTargets), writeBuffers(write_buffers),
+      writeAllocate(write_allocate), order(0), prefetchMiss(prefetch_miss)
+{
+    noTargetMSHR = NULL;
+}
+
+void
+MissQueue::regStats(const string &name)
+{
+    using namespace Stats;
+
+    writebacks
+        .init(maxThreadsPerCPU)
+        .name(name + ".writebacks")
+        .desc("number of writebacks")
+        .flags(total)
+        ;
+
+    // MSHR hit statistics
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::Command)access_idx;
+        const string &cstr = cmd.toString();
+
+        mshr_hits[access_idx]
+            .init(maxThreadsPerCPU)
+            .name(name + "." + cstr + "_mshr_hits")
+            .desc("number of " + cstr + " MSHR hits")
+            .flags(total | nozero | nonan)
+            ;
+    }
+
+    demandMshrHits
+        .name(name + ".demand_mshr_hits")
+        .desc("number of demand (read+write) MSHR hits")
+        .flags(total)
+        ;
+    demandMshrHits = mshr_hits[Read] + mshr_hits[Write];
+
+    overallMshrHits
+        .name(name + ".overall_mshr_hits")
+        .desc("number of overall MSHR hits")
+        .flags(total)
+        ;
+    overallMshrHits = demandMshrHits + mshr_hits[Soft_Prefetch] +
+        mshr_hits[Hard_Prefetch];
+
+    // MSHR miss statistics
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        mshr_misses[access_idx]
+            .init(maxThreadsPerCPU)
+            .name(name + "." + cstr + "_mshr_misses")
+            .desc("number of " + cstr + " MSHR misses")
+            .flags(total | nozero | nonan)
+            ;
+    }
+
+    demandMshrMisses
+        .name(name + ".demand_mshr_misses")
+        .desc("number of demand (read+write) MSHR misses")
+        .flags(total)
+        ;
+    demandMshrMisses = mshr_misses[Read] + mshr_misses[Write];
+
+    overallMshrMisses
+        .name(name + ".overall_mshr_misses")
+        .desc("number of overall MSHR misses")
+        .flags(total)
+        ;
+    overallMshrMisses = demandMshrMisses + mshr_misses[Soft_Prefetch] +
+        mshr_misses[Hard_Prefetch];
+
+    // MSHR miss latency statistics
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        mshr_miss_latency[access_idx]
+            .init(maxThreadsPerCPU)
+            .name(name + "." + cstr + "_mshr_miss_latency")
+            .desc("number of " + cstr + " MSHR miss cycles")
+            .flags(total | nozero | nonan)
+            ;
+    }
+
+    demandMshrMissLatency
+        .name(name + ".demand_mshr_miss_latency")
+        .desc("number of demand (read+write) MSHR miss cycles")
+        .flags(total)
+        ;
+    demandMshrMissLatency = mshr_miss_latency[Read] + mshr_miss_latency[Write];
+
+    overallMshrMissLatency
+        .name(name + ".overall_mshr_miss_latency")
+        .desc("number of overall MSHR miss cycles")
+        .flags(total)
+        ;
+    overallMshrMissLatency = demandMshrMissLatency +
+        mshr_miss_latency[Soft_Prefetch] + mshr_miss_latency[Hard_Prefetch];
+
+    // MSHR uncacheable statistics
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        mshr_uncacheable[access_idx]
+            .init(maxThreadsPerCPU)
+            .name(name + "." + cstr + "_mshr_uncacheable")
+            .desc("number of " + cstr + " MSHR uncacheable")
+            .flags(total | nozero | nonan)
+            ;
+    }
+
+    overallMshrUncacheable
+        .name(name + ".overall_mshr_uncacheable_misses")
+        .desc("number of overall MSHR uncacheable misses")
+        .flags(total)
+        ;
+    overallMshrUncacheable = mshr_uncacheable[Read] + mshr_uncacheable[Write]
+        + mshr_uncacheable[Soft_Prefetch] + mshr_uncacheable[Hard_Prefetch];
+
+    // MSHR miss latency statistics
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        mshr_uncacheable_lat[access_idx]
+            .init(maxThreadsPerCPU)
+            .name(name + "." + cstr + "_mshr_uncacheable_latency")
+            .desc("number of " + cstr + " MSHR uncacheable cycles")
+            .flags(total | nozero | nonan)
+            ;
+    }
+
+    overallMshrUncacheableLatency
+        .name(name + ".overall_mshr_uncacheable_latency")
+        .desc("number of overall MSHR uncacheable cycles")
+        .flags(total)
+        ;
+    overallMshrUncacheableLatency = mshr_uncacheable_lat[Read]
+        + mshr_uncacheable_lat[Write] + mshr_uncacheable_lat[Soft_Prefetch]
+        + mshr_uncacheable_lat[Hard_Prefetch];
+
+#if 0
+    // MSHR access formulas
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        mshrAccesses[access_idx]
+            .name(name + "." + cstr + "_mshr_accesses")
+            .desc("number of " + cstr + " mshr accesses(hits+misses)")
+            .flags(total | nozero | nonan)
+            ;
+        mshrAccesses[access_idx] =
+            mshr_hits[access_idx] + mshr_misses[access_idx]
+            + mshr_uncacheable[access_idx];
+    }
+
+    demandMshrAccesses
+        .name(name + ".demand_mshr_accesses")
+        .desc("number of demand (read+write) mshr accesses")
+        .flags(total | nozero | nonan)
+        ;
+    demandMshrAccesses = demandMshrHits + demandMshrMisses;
+
+    overallMshrAccesses
+        .name(name + ".overall_mshr_accesses")
+        .desc("number of overall (read+write) mshr accesses")
+        .flags(total | nozero | nonan)
+        ;
+    overallMshrAccesses = overallMshrHits + overallMshrMisses
+        + overallMshrUncacheable;
+#endif
+
+    // MSHR miss rate formulas
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        mshrMissRate[access_idx]
+            .name(name + "." + cstr + "_mshr_miss_rate")
+            .desc("mshr miss rate for " + cstr + " accesses")
+            .flags(total | nozero | nonan)
+            ;
+
+        mshrMissRate[access_idx] =
+            mshr_misses[access_idx] / cache->accesses[access_idx];
+    }
+
+    demandMshrMissRate
+        .name(name + ".demand_mshr_miss_rate")
+        .desc("mshr miss rate for demand accesses")
+        .flags(total)
+        ;
+    demandMshrMissRate = demandMshrMisses / cache->demandAccesses;
+
+    overallMshrMissRate
+        .name(name + ".overall_mshr_miss_rate")
+        .desc("mshr miss rate for overall accesses")
+        .flags(total)
+        ;
+    overallMshrMissRate = overallMshrMisses / cache->overallAccesses;
+
+    // mshrMiss latency formulas
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        avgMshrMissLatency[access_idx]
+            .name(name + "." + cstr + "_avg_mshr_miss_latency")
+            .desc("average " + cstr + " mshr miss latency")
+            .flags(total | nozero | nonan)
+            ;
+
+        avgMshrMissLatency[access_idx] =
+            mshr_miss_latency[access_idx] / mshr_misses[access_idx];
+    }
+
+    demandAvgMshrMissLatency
+        .name(name + ".demand_avg_mshr_miss_latency")
+        .desc("average overall mshr miss latency")
+        .flags(total)
+        ;
+    demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
+
+    overallAvgMshrMissLatency
+        .name(name + ".overall_avg_mshr_miss_latency")
+        .desc("average overall mshr miss latency")
+        .flags(total)
+        ;
+    overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
+
+    // mshrUncacheable latency formulas
+    for (int access_idx = 0; access_idx < NUM_MEM_CMDS; ++access_idx) {
+        Packet::Command cmd = (Packet::CommandEnum)access_idx;
+        const string &cstr = cmd.toString();
+
+        avgMshrUncacheableLatency[access_idx]
+            .name(name + "." + cstr + "_avg_mshr_uncacheable_latency")
+            .desc("average " + cstr + " mshr uncacheable latency")
+            .flags(total | nozero | nonan)
+            ;
+
+        avgMshrUncacheableLatency[access_idx] =
+            mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
+    }
+
+    overallAvgMshrUncacheableLatency
+        .name(name + ".overall_avg_mshr_uncacheable_latency")
+        .desc("average overall mshr uncacheable latency")
+        .flags(total)
+        ;
+    overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable;
+
+    mshr_cap_events
+        .init(maxThreadsPerCPU)
+        .name(name + ".mshr_cap_events")
+        .desc("number of times MSHR cap was activated")
+        .flags(total)
+        ;
+
+    //software prefetching stats
+    soft_prefetch_mshr_full
+        .init(maxThreadsPerCPU)
+        .name(name + ".soft_prefetch_mshr_full")
+        .desc("number of mshr full events for SW prefetching instrutions")
+        .flags(total)
+        ;
+
+    mshr_no_allocate_misses
+        .name(name +".no_allocate_misses")
+        .desc("Number of misses that were no-allocate")
+        ;
+
+}
+
+void
+MissQueue::setCache(BaseCache *_cache)
+{
+    cache = _cache;
+    blkSize = cache->getBlockSize();
+}
+
+void
+MissQueue::setPrefetcher(BasePrefetcher *_prefetcher)
+{
+    prefetcher = _prefetcher;
+}
+
+MSHR*
+MissQueue::allocateMiss(Packet * &pkt, int size, Tick time)
+{
+    MSHR* mshr = mq.allocate(pkt, size);
+    mshr->order = order++;
+    if (!pkt->isUncacheable() ){//&& !pkt->isNoAllocate()) {
+        // Mark this as a cache line fill
+        mshr->pkt->flags |= CACHE_LINE_FILL;
+    }
+    if (mq.isFull()) {
+        cache->setBlocked(Blocked_NoMSHRs);
+    }
+    if (pkt->cmd != Hard_Prefetch) {
+        //If we need to request the bus (not on HW prefetch), do so
+        cache->setMasterRequest(Request_MSHR, time);
+    }
+    return mshr;
+}
+
+
+MSHR*
+MissQueue::allocateWrite(Packet * &pkt, int size, Tick time)
+{
+    MSHR* mshr = wb.allocate(pkt,pkt->size);
+    mshr->order = order++;
+    if (cache->doData()){
+        if (pkt->isCompressed()) {
+            delete [] mshr->pkt->data;
+            mshr->pkt->actualSize = pkt->actualSize;
+            mshr->pkt->data = new uint8_t[pkt->actualSize];
+            memcpy(mshr->pkt->data, pkt->data, pkt->actualSize);
+        } else {
+            memcpy(mshr->pkt->data, pkt->data, pkt->size);
+        }
+    }
+    if (wb.isFull()) {
+        cache->setBlocked(Blocked_NoWBBuffers);
+    }
+
+    cache->setMasterRequest(Request_WB, time);
+
+    return mshr;
+}
+
+
+/**
+ * @todo Remove SW prefetches on mshr hits.
+ */
+void
+MissQueue::handleMiss(Packet * &pkt, int blkSize, Tick time)
+{
+//    if (!cache->isTopLevel())
+    if (prefetchMiss) prefetcher->handleMiss(pkt, time);
+
+    int size = blkSize;
+    Addr blkAddr = pkt->paddr & ~(Addr)(blkSize-1);
+    MSHR* mshr = NULL;
+    if (!pkt->isUncacheable()) {
+        mshr = mq.findMatch(blkAddr, pkt->req->asid);
+        if (mshr) {
+            //@todo remove hw_pf here
+            mshr_hits[pkt->cmd.toIndex()][pkt->thread_num]++;
+            if (mshr->threadNum != pkt->thread_num) {
+                mshr->threadNum = -1;
+            }
+            mq.allocateTarget(mshr, pkt);
+            if (mshr->pkt->isNoAllocate() && !pkt->isNoAllocate()) {
+                //We are adding an allocate after a no-allocate
+                mshr->pkt->flags &= ~NO_ALLOCATE;
+            }
+            if (mshr->getNumTargets() == numTarget) {
+                noTargetMSHR = mshr;
+                cache->setBlocked(Blocked_NoTargets);
+                mq.moveToFront(mshr);
+            }
+            return;
+        }
+        if (pkt->isNoAllocate()) {
+            //Count no-allocate requests differently
+            mshr_no_allocate_misses++;
+        }
+        else {
+            mshr_misses[pkt->cmd.toIndex()][pkt->thread_num]++;
+        }
+    } else {
+        //Count uncacheable accesses
+        mshr_uncacheable[pkt->cmd.toIndex()][pkt->thread_num]++;
+        size = pkt->size;
+    }
+    if (pkt->cmd.isWrite() && (pkt->isUncacheable() || !writeAllocate ||
+                               pkt->cmd.isNoResponse())) {
+        /**
+         * @todo Add write merging here.
+         */
+        mshr = allocateWrite(pkt, pkt->size, time);
+        return;
+    }
+
+    mshr = allocateMiss(pkt, size, time);
+}
+
+MSHR*
+MissQueue::fetchBlock(Addr addr, int asid, int blk_size, Tick time,
+                      Packet * &target)
+{
+    Addr blkAddr = addr & ~(Addr)(blk_size - 1);
+    assert(mq.findMatch(addr, asid) == NULL);
+    MSHR *mshr = mq.allocateFetch(blkAddr, asid, blk_size, target);
+    mshr->order = order++;
+    mshr->pkt->flags |= CACHE_LINE_FILL;
+    if (mq.isFull()) {
+        cache->setBlocked(Blocked_NoMSHRs);
+    }
+    cache->setMasterRequest(Request_MSHR, time);
+    return mshr;
+}
+
+Packet *
+MissQueue::getPacket()
+{
+    Packet * pkt = mq.getReq();
+    if (((wb.isFull() && wb.inServiceMSHRs == 0) || !pkt ||
+         pkt->time > curTick) && wb.havePending()) {
+        pkt = wb.getReq();
+        // Need to search for earlier miss.
+        MSHR *mshr = mq.findPending(pkt);
+        if (mshr && mshr->order < pkt->senderState->order) {
+            // Service misses in order until conflict is cleared.
+            return mq.getReq();
+        }
+    }
+    if (pkt) {
+        MSHR* mshr = wb.findPending(pkt);
+        if (mshr /*&& mshr->order < pkt->senderState->order*/) {
+            // The only way this happens is if we are
+            // doing a write and we didn't have permissions
+            // then subsequently saw a writeback(owned got evicted)
+            // We need to make sure to perform the writeback first
+            // To preserve the dirty data, then we can issue the write
+            return wb.getReq();
+        }
+    }
+    else if (!mq.isFull()){
+        //If we have a miss queue slot, we can try a prefetch
+        pkt = prefetcher->getPacket();
+        if (pkt) {
+            //Update statistic on number of prefetches issued (hwpf_mshr_misses)
+            mshr_misses[pkt->cmd.toIndex()][pkt->thread_num]++;
+            //It will request the bus for the future, but should clear that immedieatley
+            allocateMiss(pkt, pkt->size, curTick);
+            pkt = mq.getReq();
+            assert(pkt); //We should get back a req b/c we just put one in
+        }
+    }
+    return pkt;
+}
+
+void
+MissQueue::setBusCmd(Packet * &pkt, Packet::Command cmd)
+{
+    assert(pkt->senderState != 0);
+    MSHR * mshr = pkt->senderState;
+    mshr->originalCmd = pkt->cmd;
+    if (pkt->isCacheFill() || pkt->isNoAllocate())
+        pkt->cmd = cmd;
+}
+
+void
+MissQueue::restoreOrigCmd(Packet * &pkt)
+{
+    pkt->cmd = pkt->senderState->originalCmd;
+}
+
+void
+MissQueue::markInService(Packet * &pkt)
+{
+    assert(pkt->senderState != 0);
+    bool unblock = false;
+    BlockedCause cause = NUM_BLOCKED_CAUSES;
+
+    /**
+     * @todo Should include MSHRQueue pointer in MSHR to select the correct
+     * one.
+     */
+    if ((!pkt->isCacheFill() && pkt->cmd.isWrite()) || pkt->cmd == Copy) {
+        // Forwarding a write/ writeback, don't need to change
+        // the command
+        unblock = wb.isFull();
+        wb.markInService(pkt->senderState);
+        if (!wb.havePending()){
+            cache->clearMasterRequest(Request_WB);
+        }
+        if (unblock) {
+            // Do we really unblock?
+            unblock = !wb.isFull();
+            cause = Blocked_NoWBBuffers;
+        }
+    } else {
+        unblock = mq.isFull();
+        mq.markInService(pkt->senderState);
+        if (!mq.havePending()){
+            cache->clearMasterRequest(Request_MSHR);
+        }
+        if (pkt->senderState->originalCmd == Hard_Prefetch) {
+            DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
+                    cache->name());
+            //Also clear pending if need be
+            if (!prefetcher->havePending())
+            {
+                cache->clearMasterRequest(Request_PF);
+            }
+        }
+        if (unblock) {
+            unblock = !mq.isFull();
+            cause = Blocked_NoMSHRs;
+        }
+    }
+    if (unblock) {
+        cache->clearBlocked(cause);
+    }
+}
+
+
+void
+MissQueue::handleResponse(Packet * &pkt, Tick time)
+{
+    MSHR* mshr = pkt->senderState;
+    if (pkt->senderState->originalCmd == Hard_Prefetch) {
+        DPRINTF(HWPrefetch, "%s:Handling the response to a HW_PF\n",
+                cache->name());
+    }
+#ifndef NDEBUG
+    int num_targets = mshr->getNumTargets();
+#endif
+
+    bool unblock = false;
+    bool unblock_target = false;
+    BlockedCause cause = NUM_BLOCKED_CAUSES;
+
+    if (pkt->isCacheFill() && !pkt->isNoAllocate()) {
+        mshr_miss_latency[mshr->originalCmd][pkt->thread_num] +=
+            curTick - pkt->time;
+        // targets were handled in the cache tags
+        if (mshr == noTargetMSHR) {
+            // we always clear at least one target
+            unblock_target = true;
+            cause = Blocked_NoTargets;
+            noTargetMSHR = NULL;
+        }
+
+        if (mshr->hasTargets()) {
+            // Didn't satisfy all the targets, need to resend
+            Packet::Command cmd = mshr->getTarget()->cmd;
+            mq.markPending(mshr, cmd);
+            mshr->order = order++;
+            cache->setMasterRequest(Request_MSHR, time);
+        }
+        else {
+            unblock = mq.isFull();
+            mq.deallocate(mshr);
+            if (unblock) {
+                unblock = !mq.isFull();
+                cause = Blocked_NoMSHRs;
+            }
+        }
+    } else {
+        if (pkt->isUncacheable()) {
+            mshr_uncacheable_lat[pkt->cmd][pkt->thread_num] +=
+                curTick - pkt->time;
+        }
+        if (mshr->hasTargets() && pkt->isUncacheable()) {
+            // Should only have 1 target if we had any
+            assert(num_targets == 1);
+            Packet * target = mshr->getTarget();
+            mshr->popTarget();
+            if (cache->doData() && pkt->cmd.isRead()) {
+                memcpy(target->data, pkt->data, target->size);
+            }
+            cache->respond(target, time);
+            assert(!mshr->hasTargets());
+        }
+        else if (mshr->hasTargets()) {
+            //Must be a no_allocate with possibly more than one target
+            assert(mshr->pkt->isNoAllocate());
+            while (mshr->hasTargets()) {
+                Packet * target = mshr->getTarget();
+                mshr->popTarget();
+                if (cache->doData() && pkt->cmd.isRead()) {
+                    memcpy(target->data, pkt->data, target->size);
+                }
+                cache->respond(target, time);
+            }
+        }
+
+        if (pkt->cmd.isWrite()) {
+            // If the wrtie buffer is full, we might unblock now
+            unblock = wb.isFull();
+            wb.deallocate(mshr);
+            if (unblock) {
+                // Did we really unblock?
+                unblock = !wb.isFull();
+                cause = Blocked_NoWBBuffers;
+            }
+        } else {
+            unblock = mq.isFull();
+            mq.deallocate(mshr);
+            if (unblock) {
+                unblock = !mq.isFull();
+                cause = Blocked_NoMSHRs;
+            }
+        }
+    }
+    if (unblock || unblock_target) {
+        cache->clearBlocked(cause);
+    }
+}
+
+void
+MissQueue::squash(int thread_number)
+{
+    bool unblock = false;
+    BlockedCause cause = NUM_BLOCKED_CAUSES;
+
+    if (noTargetMSHR && noTargetMSHR->threadNum == thread_number) {
+        noTargetMSHR = NULL;
+        unblock = true;
+        cause = Blocked_NoTargets;
+    }
+    if (mq.isFull()) {
+        unblock = true;
+        cause = Blocked_NoMSHRs;
+    }
+    mq.squash(thread_number);
+    if (!mq.havePending()) {
+        cache->clearMasterRequest(Request_MSHR);
+    }
+    if (unblock && !mq.isFull()) {
+        cache->clearBlocked(cause);
+    }
+
+}
+
+MSHR*
+MissQueue::findMSHR(Addr addr, int asid) const
+{
+    return mq.findMatch(addr,asid);
+}
+
+bool
+MissQueue::findWrites(Addr addr, int asid, vector<MSHR*> &writes) const
+{
+    return wb.findMatches(addr,asid,writes);
+}
+
+void
+MissQueue::doWriteback(Addr addr, int asid,
+                       int size, uint8_t *data, bool compressed)
+{
+    // Generate request
+    Packet * pkt = buildWritebackReq(addr, asid, size, data,
+                                      compressed);
+
+    writebacks[pkt->thread_num]++;
+
+    allocateWrite(pkt, 0, curTick);
+}
+
+
+void
+MissQueue::doWriteback(Packet * &pkt)
+{
+    writebacks[pkt->thread_num]++;
+    allocateWrite(pkt, 0, curTick);
+}
+
+
+MSHR*
+MissQueue::allocateTargetList(Addr addr, int asid)
+{
+   MSHR* mshr = mq.allocateTargetList(addr, asid, blkSize);
+   mshr->pkt->flags |= CACHE_LINE_FILL;
+   if (mq.isFull()) {
+       cache->setBlocked(Blocked_NoMSHRs);
+   }
+   return mshr;
+}
+
+bool
+MissQueue::havePending()
+{
+    return mq.havePending() || wb.havePending() || prefetcher->havePending();
+}
diff --git a/src/mem/cache/miss/miss_queue.hh b/src/mem/cache/miss/miss_queue.hh
new file mode 100644 (file)
index 0000000..ce827fe
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Miss and writeback queue declarations.
+ */
+
+#ifndef __MISS_QUEUE_HH__
+#define __MISS_QUEUE_HH__
+
+#include <vector>
+
+#include "mem/cache/miss/mshr.hh"
+#include "mem/cache/miss/mshr_queue.hh"
+#include "base/statistics.hh"
+
+class BaseCache;
+class BasePrefetcher;
+/**
+ * Manages cache misses and writebacks. Contains MSHRs to store miss data
+ * and the writebuffer for writes/writebacks.
+ * @todo need to handle data on writes better (encapsulate).
+ * @todo need to make replacements/writebacks happen in Cache::access
+ */
+class MissQueue
+{
+  protected:
+    /** The MSHRs. */
+    MSHRQueue mq;
+    /** Write Buffer. */
+    MSHRQueue wb;
+
+    // PARAMTERS
+
+    /** The number of MSHRs in the miss queue. */
+    const int numMSHR;
+    /** The number of targets for each MSHR. */
+    const int numTarget;
+    /** The number of write buffers. */
+    const int writeBuffers;
+    /** True if the cache should allocate on a write miss. */
+    const bool writeAllocate;
+    /** Pointer to the parent cache. */
+    BaseCache* cache;
+
+    /** The Prefetcher */
+    BasePrefetcher *prefetcher;
+
+    /** The block size of the parent cache. */
+    int blkSize;
+
+    /** Increasing order number assigned to each incoming request. */
+    uint64_t order;
+
+    bool prefetchMiss;
+
+    // Statistics
+    /**
+     * @addtogroup CacheStatistics
+     * @{
+     */
+    /** Number of blocks written back per thread. */
+    Stats::Vector<> writebacks;
+
+    /** Number of misses that hit in the MSHRs per command and thread. */
+    Stats::Vector<> mshr_hits[NUM_MEM_CMDS];
+    /** Demand misses that hit in the MSHRs. */
+    Stats::Formula demandMshrHits;
+    /** Total number of misses that hit in the MSHRs. */
+    Stats::Formula overallMshrHits;
+
+    /** Number of misses that miss in the MSHRs, per command and thread. */
+    Stats::Vector<> mshr_misses[NUM_MEM_CMDS];
+    /** Demand misses that miss in the MSHRs. */
+    Stats::Formula demandMshrMisses;
+    /** Total number of misses that miss in the MSHRs. */
+    Stats::Formula overallMshrMisses;
+
+    /** Number of misses that miss in the MSHRs, per command and thread. */
+    Stats::Vector<> mshr_uncacheable[NUM_MEM_CMDS];
+    /** Total number of misses that miss in the MSHRs. */
+    Stats::Formula overallMshrUncacheable;
+
+    /** Total cycle latency of each MSHR miss, per command and thread. */
+    Stats::Vector<> mshr_miss_latency[NUM_MEM_CMDS];
+    /** Total cycle latency of demand MSHR misses. */
+    Stats::Formula demandMshrMissLatency;
+    /** Total cycle latency of overall MSHR misses. */
+    Stats::Formula overallMshrMissLatency;
+
+    /** Total cycle latency of each MSHR miss, per command and thread. */
+    Stats::Vector<> mshr_uncacheable_lat[NUM_MEM_CMDS];
+    /** Total cycle latency of overall MSHR misses. */
+    Stats::Formula overallMshrUncacheableLatency;
+
+    /** The total number of MSHR accesses per command and thread. */
+    Stats::Formula mshrAccesses[NUM_MEM_CMDS];
+    /** The total number of demand MSHR accesses. */
+    Stats::Formula demandMshrAccesses;
+    /** The total number of MSHR accesses. */
+    Stats::Formula overallMshrAccesses;
+
+    /** The miss rate in the MSHRs pre command and thread. */
+    Stats::Formula mshrMissRate[NUM_MEM_CMDS];
+    /** The demand miss rate in the MSHRs. */
+    Stats::Formula demandMshrMissRate;
+    /** The overall miss rate in the MSHRs. */
+    Stats::Formula overallMshrMissRate;
+
+    /** The average latency of an MSHR miss, per command and thread. */
+    Stats::Formula avgMshrMissLatency[NUM_MEM_CMDS];
+    /** The average latency of a demand MSHR miss. */
+    Stats::Formula demandAvgMshrMissLatency;
+    /** The average overall latency of an MSHR miss. */
+    Stats::Formula overallAvgMshrMissLatency;
+
+    /** The average latency of an MSHR miss, per command and thread. */
+    Stats::Formula avgMshrUncacheableLatency[NUM_MEM_CMDS];
+    /** The average overall latency of an MSHR miss. */
+    Stats::Formula overallAvgMshrUncacheableLatency;
+
+    /** The number of times a thread hit its MSHR cap. */
+    Stats::Vector<> mshr_cap_events;
+    /** The number of times software prefetches caused the MSHR to block. */
+    Stats::Vector<> soft_prefetch_mshr_full;
+
+    Stats::Scalar<> mshr_no_allocate_misses;
+
+    /**
+     * @}
+     */
+
+  private:
+    /** Pointer to the MSHR that has no targets. */
+    MSHR* noTargetMSHR;
+
+    /**
+     * Allocate a new MSHR to handle the provided miss.
+     * @param req The miss to buffer.
+     * @param size The number of bytes to fetch.
+     * @param time The time the miss occurs.
+     * @return A pointer to the new MSHR.
+     */
+    MSHR* allocateMiss(Packet * &pkt, int size, Tick time);
+
+    /**
+     * Allocate a new WriteBuffer to handle the provided write.
+     * @param req The write to handle.
+     * @param size The number of bytes to write.
+     * @param time The time the write occurs.
+     * @return A pointer to the new write buffer.
+     */
+    MSHR* allocateWrite(Packet * &pkt, int size, Tick time);
+
+  public:
+    /**
+     * Simple Constructor. Initializes all needed internal storage and sets
+     * parameters.
+     * @param numMSHRs The number of outstanding misses to handle.
+     * @param numTargets The number of outstanding targets to each miss.
+     * @param write_buffers The number of outstanding writes to handle.
+     * @param write_allocate If true, treat write misses the same as reads.
+     */
+    MissQueue(int numMSHRs, int numTargets, int write_buffers,
+              bool write_allocate, bool prefetch_miss);
+
+    /**
+     * Deletes all allocated internal storage.
+     */
+    ~MissQueue();
+
+    /**
+     * Register statistics for this object.
+     * @param name The name of the parent cache.
+     */
+    void regStats(const std::string &name);
+
+    /**
+     * Called by the parent cache to set the back pointer.
+     * @param _cache A pointer to the parent cache.
+     */
+    void setCache(BaseCache *_cache);
+
+    void setPrefetcher(BasePrefetcher *_prefetcher);
+
+    /**
+     * Handle a cache miss properly. Either allocate an MSHR for the request,
+     * or forward it through the write buffer.
+     * @param req The request that missed in the cache.
+     * @param blk_size The block size of the cache.
+     * @param time The time the miss is detected.
+     */
+    void handleMiss(Packet * &pkt, int blk_size, Tick time);
+
+    /**
+     * Fetch the block for the given address and buffer the given target.
+     * @param addr The address to fetch.
+     * @param asid The address space of the address.
+     * @param blk_size The block size of the cache.
+     * @param time The time the miss is detected.
+     * @param target The target for the fetch.
+     */
+    MSHR* fetchBlock(Addr addr, int asid, int blk_size, Tick time,
+                     Packet * &target);
+
+    /**
+     * Selects a outstanding request to service.
+     * @return The request to service, NULL if none found.
+     */
+    Packet * getPacket();
+
+    /**
+     * Set the command to the given bus command.
+     * @param req The request to update.
+     * @param cmd The bus command to use.
+     */
+    void setBusCmd(Packet * &pkt, Packet::Command cmd);
+
+    /**
+     * Restore the original command in case of a bus transmission error.
+     * @param req The request to reset.
+     */
+    void restoreOrigCmd(Packet * &pkt);
+
+    /**
+     * Marks a request as in service (sent on the bus). This can have side
+     * effect since storage for no response commands is deallocated once they
+     * are successfully sent.
+     * @param req The request that was sent on the bus.
+     */
+    void markInService(Packet * &pkt);
+
+    /**
+     * Collect statistics and free resources of a satisfied request.
+     * @param req The request that has been satisfied.
+     * @param time The time when the request is satisfied.
+     */
+    void handleResponse(Packet * &pkt, Tick time);
+
+    /**
+     * Removes all outstanding requests for a given thread number. If a request
+     * has been sent to the bus, this function removes all of its targets.
+     * @param thread_number The thread number of the requests to squash.
+     */
+    void squash(int thread_number);
+
+    /**
+     * Return the current number of outstanding misses.
+     * @return the number of outstanding misses.
+     */
+    int getMisses()
+    {
+        return mq.getAllocatedTargets();
+    }
+
+    /**
+     * Searches for the supplied address in the miss queue.
+     * @param addr The address to look for.
+     * @param asid The address space id.
+     * @return The MSHR that contains the address, NULL if not found.
+     * @warning Currently only searches the miss queue. If non write allocate
+     * might need to search the write buffer for coherence.
+     */
+    MSHR* findMSHR(Addr addr, int asid) const;
+
+    /**
+     * Searches for the supplied address in the write buffer.
+     * @param addr The address to look for.
+     * @param asid The address space id.
+     * @param writes The list of writes that match the address.
+     * @return True if any writes are found
+     */
+    bool findWrites(Addr addr, int asid, std::vector<MSHR*>& writes) const;
+
+    /**
+     * Perform a writeback of dirty data to the given address.
+     * @param addr The address to write to.
+     * @param asid The address space id.
+     * @param xc The execution context of the address space.
+     * @param size The number of bytes to write.
+     * @param data The data to write, can be NULL.
+     * @param compressed True if the data is compressed.
+     */
+    void doWriteback(Addr addr, int asid,
+                     int size, uint8_t *data, bool compressed);
+
+    /**
+     * Perform the given writeback request.
+     * @param req The writeback request.
+     */
+    void doWriteback(Packet * &pkt);
+
+    /**
+     * Returns true if there are outstanding requests.
+     * @return True if there are outstanding requests.
+     */
+    bool havePending();
+
+    /**
+     * Add a target to the given MSHR. This assumes it is in the miss queue.
+     * @param mshr The mshr to add a target to.
+     * @param req The target to add.
+     */
+    void addTarget(MSHR *mshr, Packet * &pkt)
+    {
+        mq.allocateTarget(mshr, pkt);
+    }
+
+    /**
+     * Allocate a MSHR to hold a list of targets to a block involved in a copy.
+     * If the block is marked done then the MSHR already holds the data to
+     * fill the block. Otherwise the block needs to be fetched.
+     * @param addr The address to buffer.
+     * @param asid The address space ID.
+     * @return A pointer to the allocated MSHR.
+     */
+    MSHR* allocateTargetList(Addr addr, int asid);
+
+};
+
+#endif //__MISS_QUEUE_HH__
diff --git a/src/mem/cache/miss/mshr.cc b/src/mem/cache/miss/mshr.cc
new file mode 100644 (file)
index 0000000..73aeaf6
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Dave Greene
+ */
+
+/**
+ * @file
+ * Miss Status and Handling Register (MSHR) definitions.
+ */
+
+#include <assert.h>
+#include <string>
+#include <vector>
+
+#include "mem/cache/miss/mshr.hh"
+#include "sim/root.hh" // for curTick
+#include "sim/host.hh"
+#include "base/misc.hh"
+#include "mem/cache/cache.hh"
+
+using namespace std;
+
+MSHR::MSHR()
+{
+    inService = false;
+    ntargets = 0;
+    threadNum = -1;
+}
+
+void
+MSHR::allocate(Packet::Command cmd, Addr _addr, int _asid, int size,
+               Packet * &target)
+{
+    assert(targets.empty());
+    addr = _addr;
+    asid = _asid;
+
+    pkt = new Packet(); // allocate new memory request
+    pkt->addr = addr; //picked physical address for now
+    pkt->cmd = cmd;
+    pkt->size = size;
+    pkt->data = new uint8_t[size];
+    pkt->senderState = this;
+    //Set the time here for latency calculations
+    //pkt->time = curTick;
+
+    if (target) {
+        pkt->req = target->req;
+        allocateTarget(target);
+    }
+}
+
+// Since we aren't sure if data is being used, don't copy here.
+/**
+ * @todo When we have a "global" data flag, might want to copy data here.
+ */
+void
+MSHR::allocateAsBuffer(Packet * &target)
+{
+    addr = target->paddr;
+    asid = target->req->asid;
+    threadNum = target->thread_num;
+    pkt = new Packet();
+    pkt->addr = target->addr;
+    pkt->dest = target->dest;
+    pkt->cmd = target->cmd;
+    pkt->size = target->size;
+    pkt->req = target->req;
+    pkt->data = new uint8_t[target->size];
+    pkt->senderState = this;
+}
+
+void
+MSHR::deallocate()
+{
+    assert(targets.empty());
+    assert(ntargets == 0);
+    pkt = NULL;
+    inService = false;
+    allocIter = NULL;
+    readyIter = NULL;
+}
+
+/*
+ * Adds a target to an MSHR
+ */
+void
+MSHR::allocateTarget(Packet * &target)
+{
+    //If we append an invalidate and we issued a read to the bus,
+    //but now have some pending writes, we need to move
+    //the invalidate to before the first non-read
+    if (inService && pkt->cmd.isRead() && target->cmd.isInvalidate()) {
+        std::list<Packet *> temp;
+
+        while (!targets.empty()) {
+            if (!targets.front()->cmd.isRead()) break;
+            //Place on top of temp stack
+            temp.push_front(targets.front());
+            //Remove from targets
+            targets.pop_front();
+        }
+
+        //Now that we have all the reads off until first non-read, we can
+        //place the invalidate on
+        targets.push_front(target);
+
+        //Now we pop off the temp_stack and put them back
+        while (!temp.empty()) {
+            targets.push_front(temp.front());
+            temp.pop_front();
+        }
+    }
+    else {
+        targets.push_back(target);
+    }
+
+    ++ntargets;
+    assert(targets.size() == ntargets);
+    /**
+     * @todo really prioritize the target commands.
+     */
+
+    if (!inService && target->cmd.isWrite()) {
+        pkt->cmd = WriteReq;
+    }
+}
+
+
+
+void
+MSHR::dump()
+{
+    ccprintf(cerr,
+             "inService: %d thread: %d\n"
+             "Addr: %x asid: %d ntargets %d\n"
+             "Targets:\n",
+             inService, threadNum, addr, asid, ntargets);
+
+    TargetListIterator tar_it = targets.begin();
+    for (int i = 0; i < ntargets; i++) {
+        assert(tar_it != targets.end());
+
+        ccprintf(cerr, "\t%d: Addr: %x cmd: %d\n",
+                 i, (*tar_it)->paddr, (*tar_it)->cmd.toIndex());
+
+        tar_it++;
+    }
+    ccprintf(cerr, "\n");
+}
+
+MSHR::~MSHR()
+{
+    if (pkt)
+        pkt = NULL;
+}
diff --git a/src/mem/cache/miss/mshr.hh b/src/mem/cache/miss/mshr.hh
new file mode 100644 (file)
index 0000000..167aa26
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Miss Status and Handling Register (MSHR) declaration.
+ */
+
+#ifndef __MSHR_HH__
+#define __MSHR_HH__
+
+#include "mem/packet.hh"
+#include <list>
+#include <deque>
+
+class MSHR;
+
+/**
+ * Miss Status and handling Register. This class keeps all the information
+ * needed to handle a cache miss including a list of target requests.
+ */
+class MSHR {
+  public:
+    /** Defines the Data structure of the MSHR targetlist. */
+    typedef std::list<Packet *> TargetList;
+    /** Target list iterator. */
+    typedef std::list<Packet *>::iterator TargetListIterator;
+    /** A list of MSHRs. */
+    typedef std::list<MSHR *> List;
+    /** MSHR list iterator. */
+    typedef List::iterator Iterator;
+    /** MSHR list const_iterator. */
+    typedef List::const_iterator ConstIterator;
+
+    /** Address of the miss. */
+    Addr addr;
+    /** Adress space id of the miss. */
+    short asid;
+    /** True if the request has been sent to the bus. */
+    bool inService;
+    /** Thread number of the miss. */
+    int threadNum;
+    /** The request that is forwarded to the next level of the hierarchy. */
+    Packet * pkt;
+    /** The number of currently allocated targets. */
+    short ntargets;
+    /** The original requesting command. */
+    Packet::Command originalCmd;
+    /** Order number of assigned by the miss queue. */
+    uint64_t order;
+
+    /**
+     * Pointer to this MSHR on the ready list.
+     * @sa MissQueue, MSHRQueue::readyList
+     */
+    Iterator readyIter;
+    /**
+     * Pointer to this MSHR on the allocated list.
+     * @sa MissQueue, MSHRQueue::allocatedList
+     */
+    Iterator allocIter;
+
+private:
+    /** List of all requests that match the address */
+    TargetList targets;
+
+public:
+    /**
+     * Allocate a miss to this MSHR.
+     * @param cmd The requesting command.
+     * @param addr The address of the miss.
+     * @param asid The address space id of the miss.
+     * @param size The number of bytes to request.
+     * @param req  The original miss.
+     */
+    void allocate(Packet::Command cmd, Addr addr, int asid, int size,
+                  Packet * &pkt);
+
+    /**
+     * Allocate this MSHR as a buffer for the given request.
+     * @param target The memory request to buffer.
+     */
+    void allocateAsBuffer(Packet * &target);
+
+    /**
+     * Mark this MSHR as free.
+     */
+    void deallocate();
+
+    /**
+     * Add a request to the list of targets.
+     * @param target The target.
+     */
+    void allocateTarget(Packet * &target);
+
+    /** A simple constructor. */
+    MSHR();
+    /** A simple destructor. */
+    ~MSHR();
+
+    /**
+     * Returns the current number of allocated targets.
+     * @return The current number of allocated targets.
+     */
+    int getNumTargets()
+    {
+        return(ntargets);
+    }
+
+    /**
+     * Returns a pointer to the target list.
+     * @return a pointer to the target list.
+     */
+    TargetList* getTargetList()
+    {
+        return &targets;
+    }
+
+    /**
+     * Returns a reference to the first target.
+     * @return A pointer to the first target.
+     */
+    Packet * getTarget()
+    {
+        return targets.front();
+    }
+
+    /**
+     * Pop first target.
+     */
+    void popTarget()
+    {
+        --ntargets;
+        targets.pop_front();
+    }
+
+    /**
+     * Returns true if there are targets left.
+     * @return true if there are targets
+     */
+    bool hasTargets()
+    {
+        return !targets.empty();
+    }
+
+    /**
+     * Prints the contents of this MSHR to stderr.
+     */
+    void dump();
+};
+
+#endif //__MSHR_HH__
diff --git a/src/mem/cache/miss/mshr_queue.cc b/src/mem/cache/miss/mshr_queue.cc
new file mode 100644 (file)
index 0000000..72c8cc4
--- /dev/null
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/** @file
+ * Definition of the MSHRQueue.
+ */
+
+#include "mem/cache/miss/mshr_queue.hh"
+#include "sim/eventq.hh"
+
+using namespace std;
+
+MSHRQueue::MSHRQueue(int num_mshrs, int reserve)
+    : numMSHRs(num_mshrs + reserve - 1), numReserve(reserve)
+{
+    allocated = 0;
+    inServiceMSHRs = 0;
+    allocatedTargets = 0;
+    registers = new MSHR[numMSHRs];
+    for (int i = 0; i < numMSHRs; ++i) {
+        freeList.push_back(&registers[i]);
+    }
+}
+
+MSHRQueue::~MSHRQueue()
+{
+    delete [] registers;
+}
+
+MSHR*
+MSHRQueue::findMatch(Addr addr, int asid) const
+{
+    MSHR::ConstIterator i = allocatedList.begin();
+    MSHR::ConstIterator end = allocatedList.end();
+    for (; i != end; ++i) {
+        MSHR *mshr = *i;
+        if (mshr->addr == addr) {
+            return mshr;
+        }
+    }
+    return NULL;
+}
+
+bool
+MSHRQueue::findMatches(Addr addr, int asid, vector<MSHR*>& matches) const
+{
+    // Need an empty vector
+    assert(matches.empty());
+    bool retval = false;
+    MSHR::ConstIterator i = allocatedList.begin();
+    MSHR::ConstIterator end = allocatedList.end();
+    for (; i != end; ++i) {
+        MSHR *mshr = *i;
+        if (mshr->addr == addr) {
+            retval = true;
+            matches.push_back(mshr);
+        }
+    }
+    return retval;
+
+}
+
+MSHR*
+MSHRQueue::findPending(Packet * &pkt) const
+{
+    MSHR::ConstIterator i = pendingList.begin();
+    MSHR::ConstIterator end = pendingList.end();
+    for (; i != end; ++i) {
+        MSHR *mshr = *i;
+        if (mshr->addr < pkt->addr) {
+            if (mshr->addr + mshr->pkt->size > pkt->addr) {
+                return mshr;
+            }
+        } else {
+            if (pkt->addr + pkt->size > mshr->addr) {
+                return mshr;
+            }
+        }
+
+        //need to check destination address for copies.
+        if (mshr->pkt->cmd == Copy) {
+            Addr dest = mshr->pkt->dest;
+            if (dest < pkt->addr) {
+                if (dest + mshr->pkt->size > pkt->addr) {
+                    return mshr;
+                }
+            } else {
+                if (pkt->addr + pkt->size > dest) {
+                    return mshr;
+                }
+            }
+        }
+    }
+    return NULL;
+}
+
+MSHR*
+MSHRQueue::allocate(Packet * &pkt, int size)
+{
+    Addr aligned_addr = pkt->addr & ~((Addr)size - 1);
+    MSHR *mshr = freeList.front();
+    assert(mshr->getNumTargets() == 0);
+    freeList.pop_front();
+
+    if (pkt->cmd.isNoResponse()) {
+        mshr->allocateAsBuffer(pkt);
+    } else {
+        assert(size !=0);
+        mshr->allocate(pkt->cmd, aligned_addr, pkt->req->req->asid, size, pkt);
+        allocatedTargets += 1;
+    }
+    mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
+    mshr->readyIter = pendingList.insert(pendingList.end(), mshr);
+
+    allocated += 1;
+    return mshr;
+}
+
+MSHR*
+MSHRQueue::allocateFetch(Addr addr, int asid, int size, Packet * &target)
+{
+    MSHR *mshr = freeList.front();
+    assert(mshr->getNumTargets() == 0);
+    freeList.pop_front();
+    mshr->allocate(Read, addr, asid, size, target);
+    mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
+    mshr->readyIter = pendingList.insert(pendingList.end(), mshr);
+
+    allocated += 1;
+    return mshr;
+}
+
+MSHR*
+MSHRQueue::allocateTargetList(Addr addr, int asid, int size)
+{
+    MSHR *mshr = freeList.front();
+    assert(mshr->getNumTargets() == 0);
+    freeList.pop_front();
+    Packet * dummy;
+    mshr->allocate(Read, addr, asid, size, dummy);
+    mshr->allocIter = allocatedList.insert(allocatedList.end(), mshr);
+    mshr->inService = true;
+    ++inServiceMSHRs;
+    ++allocated;
+    return mshr;
+}
+
+
+void
+MSHRQueue::deallocate(MSHR* mshr)
+{
+    deallocateOne(mshr);
+}
+
+MSHR::Iterator
+MSHRQueue::deallocateOne(MSHR* mshr)
+{
+    MSHR::Iterator retval = allocatedList.erase(mshr->allocIter);
+    freeList.push_front(mshr);
+    allocated--;
+    allocatedTargets -= mshr->getNumTargets();
+    if (mshr->inService) {
+        inServiceMSHRs--;
+    } else {
+        pendingList.erase(mshr->readyIter);
+    }
+    mshr->deallocate();
+    return retval;
+}
+
+void
+MSHRQueue::moveToFront(MSHR *mshr)
+{
+    if (!mshr->inService) {
+        assert(mshr == *(mshr->readyIter));
+        pendingList.erase(mshr->readyIter);
+        mshr->readyIter = pendingList.insert(pendingList.begin(), mshr);
+    }
+}
+
+void
+MSHRQueue::markInService(MSHR* mshr)
+{
+    //assert(mshr == pendingList.front());
+    if (mshr->pkt->cmd.isNoResponse()) {
+        assert(mshr->getNumTargets() == 0);
+        deallocate(mshr);
+        return;
+    }
+    mshr->inService = true;
+    pendingList.erase(mshr->readyIter);
+    mshr->readyIter = NULL;
+    inServiceMSHRs += 1;
+    //pendingList.pop_front();
+}
+
+void
+MSHRQueue::markPending(MSHR* mshr, Packet::Command cmd)
+{
+    assert(mshr->readyIter == NULL);
+    mshr->pkt->cmd = cmd;
+    mshr->pkt->flags &= ~SATISFIED;
+    mshr->inService = false;
+    --inServiceMSHRs;
+    /**
+     * @ todo might want to add rerequests to front of pending list for
+     * performance.
+     */
+    mshr->readyIter = pendingList.insert(pendingList.end(), mshr);
+}
+
+void
+MSHRQueue::squash(int thread_number)
+{
+    MSHR::Iterator i = allocatedList.begin();
+    MSHR::Iterator end = allocatedList.end();
+    for (; i != end;) {
+        MSHR *mshr = *i;
+        if (mshr->threadNum == thread_number) {
+            while (mshr->hasTargets()) {
+                Packet * target = mshr->getTarget();
+                mshr->popTarget();
+
+                assert(target->thread_num == thread_number);
+                if (target->completionEvent != NULL) {
+                    delete target->completionEvent;
+                }
+                target = NULL;
+            }
+            assert(!mshr->hasTargets());
+            assert(mshr->ntargets==0);
+            if (!mshr->inService) {
+                i = deallocateOne(mshr);
+            } else {
+                //mshr->pkt->flags &= ~CACHE_LINE_FILL;
+                ++i;
+            }
+        } else {
+            ++i;
+        }
+    }
+}
diff --git a/src/mem/cache/miss/mshr_queue.hh b/src/mem/cache/miss/mshr_queue.hh
new file mode 100644 (file)
index 0000000..3e1d3f3
--- /dev/null
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/** @file
+ * Declaration of a structure to manage MSHRs.
+ */
+
+#ifndef __MSHR_QUEUE_HH__
+#define __MSHR_QUEUE_HH__
+
+#include <vector>
+#include "mem/cache/miss/mshr.hh"
+
+/**
+ * A Class for maintaining a list of pending and allocated memory requests.
+ */
+class MSHRQueue {
+  private:
+    /**  MSHR storage. */
+    MSHR* registers;
+    /** Holds pointers to all allocated MSHRs. */
+    MSHR::List allocatedList;
+    /** Holds pointers to MSHRs that haven't been sent to the bus. */
+    MSHR::List pendingList;
+    /** Holds non allocated MSHRs. */
+    MSHR::List freeList;
+
+    // Parameters
+    /**
+     * The total number of MSHRs in this queue. This number is set as the
+     * number of MSHRs requested plus (numReserve - 1). This allows for
+     * the same number of effective MSHRs while still maintaining the reserve.
+     */
+    const int numMSHRs;
+
+    /**
+     * The number of MSHRs to hold in reserve. This is needed because copy
+     * operations can allocate upto 4 MSHRs at one time.
+     */
+    const int numReserve;
+
+  public:
+    /** The number of allocated MSHRs. */
+    int allocated;
+    /** The number of MSHRs that have been forwarded to the bus. */
+    int inServiceMSHRs;
+    /** The number of targets waiting for response. */
+    int allocatedTargets;
+
+    /**
+     * Create a queue with a given number of MSHRs.
+     * @param num_mshrs The number of MSHRs in this queue.
+     * @param reserve The minimum number of MSHRs needed to satisfy any access.
+     */
+    MSHRQueue(int num_mshrs, int reserve = 1);
+
+    /** Destructor */
+    ~MSHRQueue();
+
+    /**
+     * Find the first MSHR that matches the provide address and asid.
+     * @param addr The address to find.
+     * @param asid The address space id.
+     * @return Pointer to the matching MSHR, null if not found.
+     */
+    MSHR* findMatch(Addr addr, int asid) const;
+
+    /**
+     * Find and return all the matching MSHRs in the provided vector.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @param matches The vector to return pointers to the matching MSHRs.
+     * @return True if any matches are found, false otherwise.
+     * @todo Typedef the vector??
+     */
+    bool findMatches(Addr addr, int asid, std::vector<MSHR*>& matches) const;
+
+    /**
+     * Find any pending requests that overlap the given request.
+     * @param req The request to find.
+     * @return A pointer to the earliest matching MSHR.
+     */
+    MSHR* findPending(Packet * &pkt) const;
+
+    /**
+     * Allocates a new MSHR for the request and size. This places the request
+     * as the first target in the MSHR.
+     * @param req The request to handle.
+     * @param size The number in bytes to fetch from memory.
+     * @return The a pointer to the MSHR allocated.
+     *
+     * @pre There are free MSHRs.
+     */
+    MSHR* allocate(Packet * &pkt, int size = 0);
+
+    /**
+     * Allocate a read request for the given address, and places the given
+     * target on the target list.
+     * @param addr The address to fetch.
+     * @param asid The address space for the fetch.
+     * @param size The number of bytes to request.
+     * @param target The first target for the request.
+     * @return Pointer to the new MSHR.
+     */
+    MSHR* allocateFetch(Addr addr, int asid, int size, Packet * &target);
+
+    /**
+     * Allocate a target list for the given address.
+     * @param addr The address to fetch.
+     * @param asid The address space for the fetch.
+     * @param size The number of bytes to request.
+     * @return Pointer to the new MSHR.
+     */
+    MSHR* allocateTargetList(Addr addr, int asid, int size);
+
+    /**
+     * Removes the given MSHR from the queue. This places the MSHR on the
+     * free list.
+     * @param mshr
+     */
+    void deallocate(MSHR* mshr);
+
+    /**
+     * Allocates a target to the given MSHR. Used to keep track of the number
+     * of outstanding targets.
+     * @param mshr The MSHR to allocate the target to.
+     * @param req The target request.
+     */
+    void allocateTarget(MSHR* mshr, Packet * &pkt)
+    {
+        mshr->allocateTarget(pkt);
+        allocatedTargets += 1;
+    }
+
+    /**
+     * Remove a MSHR from the queue. Returns an iterator into the allocatedList
+     * for faster squash implementation.
+     * @param mshr The MSHR to remove.
+     * @return An iterator to the next entry in the allocatedList.
+     */
+    MSHR::Iterator deallocateOne(MSHR* mshr);
+
+    /**
+     * Moves the MSHR to the front of the pending list if it is not in service.
+     * @param mshr The mshr to move.
+     */
+    void moveToFront(MSHR *mshr);
+
+    /**
+     * Mark the given MSHR as in service. This removes the MSHR from the
+     * pendingList. Deallocates the MSHR if it does not expect a response.
+     * @param mshr The MSHR to mark in service.
+     */
+    void markInService(MSHR* mshr);
+
+    /**
+     * Mark an in service mshr as pending, used to resend a request.
+     * @param mshr The MSHR to resend.
+     * @param cmd The command to resend.
+     */
+    void markPending(MSHR* mshr, Packet::Command cmd);
+
+    /**
+     * Squash outstanding requests with the given thread number. If a request
+     * is in service, just squashes the targets.
+     * @param thread_number The thread to squash.
+     */
+    void squash(int thread_number);
+
+    /**
+     * Returns true if the pending list is not empty.
+     * @return True if there are outstanding requests.
+     */
+    bool havePending() const
+    {
+        return !pendingList.empty();
+    }
+
+    /**
+     * Returns true if there are no free MSHRs.
+     * @return True if this queue is full.
+     */
+    bool isFull() const
+    {
+        return (allocated > numMSHRs - numReserve);
+    }
+
+    /**
+     * Returns the request at the head of the pendingList.
+     * @return The next request to service.
+     */
+    Packet * getReq() const
+    {
+        if (pendingList.empty()) {
+            return NULL;
+        }
+        MSHR* mshr = pendingList.front();
+        return mshr->pkt;
+    }
+
+    /**
+     * Returns the number of outstanding targets.
+     * @return the number of allocated targets.
+     */
+    int getAllocatedTargets() const
+    {
+        return allocatedTargets;
+    }
+
+};
+
+#endif //__MSHR_QUEUE_HH__
diff --git a/src/mem/cache/prefetch/base_prefetcher.cc b/src/mem/cache/prefetch/base_prefetcher.cc
new file mode 100644 (file)
index 0000000..14beef2
--- /dev/null
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Hardware Prefetcher Definition.
+ */
+
+#include "base/trace.hh"
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/prefetch/base_prefetcher.hh"
+#include <list>
+
+BasePrefetcher::BasePrefetcher(int size, bool pageStop, bool serialSquash,
+                               bool cacheCheckPush, bool onlyData)
+    :size(size), pageStop(pageStop), serialSquash(serialSquash),
+     cacheCheckPush(cacheCheckPush), only_data(onlyData)
+{
+}
+
+void
+BasePrefetcher::setCache(BaseCache *_cache)
+{
+    cache = _cache;
+    blkSize = cache->getBlockSize();
+}
+
+void
+BasePrefetcher::regStats(const std::string &name)
+{
+    pfIdentified
+        .name(name + ".prefetcher.num_hwpf_identified")
+        .desc("number of hwpf identified")
+        ;
+
+    pfMSHRHit
+        .name(name + ".prefetcher.num_hwpf_already_in_mshr")
+        .desc("number of hwpf that were already in mshr")
+        ;
+
+    pfCacheHit
+        .name(name + ".prefetcher.num_hwpf_already_in_cache")
+        .desc("number of hwpf that were already in the cache")
+        ;
+
+    pfBufferHit
+        .name(name + ".prefetcher.num_hwpf_already_in_prefetcher")
+        .desc("number of hwpf that were already in the prefetch queue")
+        ;
+
+    pfRemovedFull
+        .name(name + ".prefetcher.num_hwpf_evicted")
+        .desc("number of hwpf removed due to no buffer left")
+        ;
+
+    pfRemovedMSHR
+        .name(name + ".prefetcher.num_hwpf_removed_MSHR_hit")
+        .desc("number of hwpf removed because MSHR allocated")
+        ;
+
+    pfIssued
+        .name(name + ".prefetcher.num_hwpf_issued")
+        .desc("number of hwpf issued")
+        ;
+
+    pfSpanPage
+        .name(name + ".prefetcher.num_hwpf_span_page")
+        .desc("number of hwpf spanning a virtual page")
+        ;
+
+    pfSquashed
+        .name(name + ".prefetcher.num_hwpf_squashed_from_miss")
+        .desc("number of hwpf that got squashed due to a miss aborting calculation time")
+        ;
+}
+
+Packet *
+BasePrefetcher::getPacket()
+{
+    DPRINTF(HWPrefetch, "%s:Requesting a hw_pf to issue\n", cache->name());
+
+    if (pf.empty()) {
+        DPRINTF(HWPrefetch, "%s:No HW_PF found\n", cache->name());
+        return NULL;
+    }
+
+    Packet * pkt;
+    bool keepTrying = false;
+    do {
+        pkt = *pf.begin();
+        pf.pop_front();
+        if (!cacheCheckPush) {
+            keepTrying = inCache(pkt);
+        }
+        if (pf.empty()) {
+            cache->clearMasterRequest(Request_PF);
+            if (keepTrying) return NULL; //None left, all were in cache
+        }
+    } while (keepTrying);
+
+    pfIssued++;
+    return pkt;
+}
+
+void
+BasePrefetcher::handleMiss(Packet * &pkt, Tick time)
+{
+    if (!pkt->isUncacheable() && !(pkt->isInstRead() && only_data))
+    {
+        //Calculate the blk address
+        Addr blkAddr = pkt->paddr & ~(Addr)(blkSize-1);
+
+        //Check if miss is in pfq, if so remove it
+        std::list<Packet *>::iterator iter = inPrefetch(blkAddr);
+        if (iter != pf.end()) {
+            DPRINTF(HWPrefetch, "%s:Saw a miss to a queued prefetch, removing it\n", cache->name());
+            pfRemovedMSHR++;
+            pf.erase(iter);
+            if (pf.empty())
+                cache->clearMasterRequest(Request_PF);
+        }
+
+        //Remove anything in queue with delay older than time
+        //since everything is inserted in time order, start from end
+        //and work until pf.empty() or time is earlier
+        //This is done to emulate Aborting the previous work on a new miss
+        //Needed for serial calculators like GHB
+        if (serialSquash) {
+            iter = pf.end();
+            iter--;
+            while (!pf.empty() && ((*iter)->time >= time)) {
+                pfSquashed++;
+                pf.pop_back();
+                iter--;
+            }
+            if (pf.empty())
+                cache->clearMasterRequest(Request_PF);
+        }
+
+
+        std::list<Addr> addresses;
+        std::list<Tick> delays;
+        calculatePrefetch(pkt, addresses, delays);
+
+        std::list<Addr>::iterator addr = addresses.begin();
+        std::list<Tick>::iterator delay = delays.begin();
+        while (addr != addresses.end())
+        {
+            DPRINTF(HWPrefetch, "%s:Found a pf canidate, inserting into prefetch queue\n", cache->name());
+            //temp calc this here...
+            pfIdentified++;
+            //create a prefetch memreq
+            Packet * prefetch;
+            prefetch = new Packet();
+            prefetch->paddr = (*addr);
+            prefetch->size = blkSize;
+            prefetch->cmd = Hard_Prefetch;
+            prefetch->xc = pkt->xc;
+            prefetch->data = new uint8_t[blkSize];
+            prefetch->req->asid = pkt->req->asid;
+            prefetch->thread_num = pkt->thread_num;
+            prefetch->time = time + (*delay); //@todo ADD LATENCY HERE
+            //... initialize
+
+            //Check if it is already in the cache
+            if (cacheCheckPush) {
+                if (inCache(prefetch)) {
+                    addr++;
+                    delay++;
+                    continue;
+                }
+            }
+
+            //Check if it is already in the miss_queue
+            if (inMissQueue(prefetch->paddr, prefetch->req->asid)) {
+                addr++;
+                delay++;
+                continue;
+            }
+
+            //Check if it is already in the pf buffer
+            if (inPrefetch(prefetch->paddr) != pf.end()) {
+                pfBufferHit++;
+                addr++;
+                delay++;
+                continue;
+            }
+
+            //We just remove the head if we are full
+            if (pf.size() == size)
+            {
+                DPRINTF(HWPrefetch, "%s:Inserting into prefetch queue, it was full removing oldest\n", cache->name());
+                pfRemovedFull++;
+                pf.pop_front();
+            }
+
+            pf.push_back(prefetch);
+            prefetch->flags |= CACHE_LINE_FILL;
+
+            //Make sure to request the bus, with proper delay
+            cache->setMasterRequest(Request_PF, prefetch->time);
+
+            //Increment through the list
+            addr++;
+            delay++;
+        }
+    }
+}
+
+std::list<Packet *>::iterator
+BasePrefetcher::inPrefetch(Addr address)
+{
+    //Guaranteed to only be one match, we always check before inserting
+    std::list<Packet *>::iterator iter;
+    for (iter=pf.begin(); iter != pf.end(); iter++) {
+        if (((*iter)->paddr & ~(Addr)(blkSize-1)) == address) {
+            return iter;
+        }
+    }
+    return pf.end();
+}
+
+
diff --git a/src/mem/cache/prefetch/base_prefetcher.hh b/src/mem/cache/prefetch/base_prefetcher.hh
new file mode 100644 (file)
index 0000000..3e4fc89
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Miss and writeback queue declarations.
+ */
+
+#ifndef __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
+#define __MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
+
+#include "mem/packet.hh"
+#include <list>
+
+class BaseCache;
+class BasePrefetcher
+{
+  protected:
+
+    /** The Prefetch Queue. */
+    std::list<Packet *> pf;
+
+    // PARAMETERS
+
+    /** The number of MSHRs in the Prefetch Queue. */
+    const int size;
+
+    /** Pointr to the parent cache. */
+    BaseCache* cache;
+
+    /** The block size of the parent cache. */
+    int blkSize;
+
+    /** Do we prefetch across page boundaries. */
+    bool pageStop;
+
+    /** Do we remove prefetches with later times than a new miss.*/
+    bool serialSquash;
+
+    /** Do we check if it is in the cache when inserting into buffer,
+        or removing.*/
+    bool cacheCheckPush;
+
+    /** Do we prefetch on only data reads, or on inst reads as well. */
+    bool only_data;
+
+  public:
+
+    Stats::Scalar<> pfIdentified;
+    Stats::Scalar<> pfMSHRHit;
+    Stats::Scalar<> pfCacheHit;
+    Stats::Scalar<> pfBufferHit;
+    Stats::Scalar<> pfRemovedFull;
+    Stats::Scalar<> pfRemovedMSHR;
+    Stats::Scalar<> pfIssued;
+    Stats::Scalar<> pfSpanPage;
+    Stats::Scalar<> pfSquashed;
+
+    void regStats(const std::string &name);
+
+  public:
+    BasePrefetcher(int numMSHRS, bool pageStop, bool serialSquash,
+                   bool cacheCheckPush, bool onlyData);
+
+    virtual ~BasePrefetcher() {}
+
+    void setCache(BaseCache *_cache);
+
+    void handleMiss(Packet * &pkt, Tick time);
+
+    Packet * getPacket();
+
+    bool havePending()
+    {
+        return !pf.empty();
+    }
+
+    virtual void calculatePrefetch(Packet * &pkt,
+                                   std::list<Addr> &addresses,
+                                   std::list<Tick> &delays) = 0;
+
+    virtual bool inCache(Packet * &pkt) = 0;
+
+    virtual bool inMissQueue(Addr address, int asid) = 0;
+
+    std::list<Packet *>::iterator inPrefetch(Addr address);
+};
+
+
+#endif //__MEM_CACHE_PREFETCH_BASE_PREFETCHER_HH__
diff --git a/src/mem/cache/prefetch/ghb_prefetcher.cc b/src/mem/cache/prefetch/ghb_prefetcher.cc
new file mode 100644 (file)
index 0000000..247ec6e
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ *          Steve Reinhardt
+ */
+
+/**
+ * @file
+ * GHB Prefetcher template instantiations.
+ */
+
+#include "mem/cache/tags/cache_tags.hh"
+
+#include "mem/cache/tags/lru.hh"
+
+#include "base/compression/null_compression.hh"
+
+#include "mem/cache/miss/miss_queue.hh"
+#include "mem/cache/miss/blocking_buffer.hh"
+
+#include "mem/cache/prefetch/ghb_prefetcher.hh"
+
+// Template Instantiations
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+template class GHBPrefetcher<CacheTags<LRU,NullCompression>, MissQueue>;
+template class GHBPrefetcher<CacheTags<LRU,NullCompression>, BlockingBuffer>;
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/prefetch/ghb_prefetcher.hh b/src/mem/cache/prefetch/ghb_prefetcher.hh
new file mode 100644 (file)
index 0000000..f25ebe1
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Describes a ghb prefetcher based on template policies.
+ */
+
+#ifndef __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
+#define __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
+
+#include "base/misc.hh" // fatal, panic, and warn
+
+#include "mem/cache/prefetch/prefetcher.hh"
+
+/**
+ * A template-policy based cache. The behavior of the cache can be altered by
+ * supplying different template policies. TagStore handles all tag and data
+ * storage @sa TagStore. Buffering handles all misses and writes/writebacks
+ * @sa MissQueue. Coherence handles all coherence policy details @sa
+ * UniCoherence, SimpleMultiCoherence.
+ */
+template <class TagStore, class Buffering>
+class GHBPrefetcher : public Prefetcher<TagStore, Buffering>
+{
+  protected:
+
+    Buffering* mq;
+    TagStore* tags;
+
+    Addr second_last_miss_addr[64/*MAX_CPUS*/];
+    Addr last_miss_addr[64/*MAX_CPUS*/];
+
+    Tick latency;
+    int degree;
+    bool useCPUId;
+
+  public:
+
+    GHBPrefetcher(int size, bool pageStop, bool serialSquash,
+                  bool cacheCheckPush, bool onlyData,
+                  Tick latency, int degree, bool useCPUId)
+        :Prefetcher<TagStore, Buffering>(size, pageStop, serialSquash,
+                                         cacheCheckPush, onlyData),
+         latency(latency), degree(degree), useCPUId(useCPUId)
+    {
+    }
+
+    ~GHBPrefetcher() {}
+
+    void calculatePrefetch(Packet * &pkt, std::list<Addr> &addresses,
+                           std::list<Tick> &delays)
+    {
+        Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1);
+        int cpuID = pkt->cpu_num;
+        if (!useCPUId) cpuID = 0;
+
+
+        int new_stride = blkAddr - last_miss_addr[cpuID];
+        int old_stride = last_miss_addr[cpuID] -
+                         second_last_miss_addr[cpuID];
+
+        second_last_miss_addr[cpuID] = last_miss_addr[cpuID];
+        last_miss_addr[cpuID] = blkAddr;
+
+        if (new_stride == old_stride) {
+            for (int d=1; d <= degree; d++) {
+                Addr newAddr = blkAddr + d * new_stride;
+                if (this->pageStop &&
+                    (blkAddr & ~(TheISA::VMPageSize - 1)) !=
+                    (newAddr & ~(TheISA::VMPageSize - 1)))
+                {
+                    //Spanned the page, so now stop
+                    this->pfSpanPage += degree - d + 1;
+                    return;
+                }
+                else
+                {
+                    addresses.push_back(newAddr);
+                    delays.push_back(latency);
+                }
+            }
+        }
+    }
+};
+
+#endif // __MEM_CACHE_PREFETCH_GHB_PREFETCHER_HH__
diff --git a/src/mem/cache/prefetch/stride_prefetcher.cc b/src/mem/cache/prefetch/stride_prefetcher.cc
new file mode 100644 (file)
index 0000000..93a0964
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ *          Steve Reinhardt
+ */
+
+/**
+ * @file
+ * Stride Prefetcher template instantiations.
+ */
+
+#include "mem/cache/tags/cache_tags.hh"
+
+#include "mem/cache/tags/lru.hh"
+
+#include "base/compression/null_compression.hh"
+
+#include "mem/cache/miss/miss_queue.hh"
+#include "mem/cache/miss/blocking_buffer.hh"
+
+#include "mem/cache/prefetch/stride_prefetcher.hh"
+
+// Template Instantiations
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+template class StridePrefetcher<CacheTags<LRU,NullCompression>, MissQueue>;
+template class StridePrefetcher<CacheTags<LRU,NullCompression>, BlockingBuffer>;
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/prefetch/stride_prefetcher.hh b/src/mem/cache/prefetch/stride_prefetcher.hh
new file mode 100644 (file)
index 0000000..f897762
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Describes a strided prefetcher based on template policies.
+ */
+
+#ifndef __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
+#define __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
+
+#include "base/misc.hh" // fatal, panic, and warn
+
+#include "mem/cache/prefetch/prefetcher.hh"
+
+/**
+ * A template-policy based cache. The behavior of the cache can be altered by
+ * supplying different template policies. TagStore handles all tag and data
+ * storage @sa TagStore. Buffering handles all misses and writes/writebacks
+ * @sa MissQueue. Coherence handles all coherence policy details @sa
+ * UniCoherence, SimpleMultiCoherence.
+ */
+template <class TagStore, class Buffering>
+class StridePrefetcher : public Prefetcher<TagStore, Buffering>
+{
+  protected:
+
+    Buffering* mq;
+    TagStore* tags;
+
+    class strideEntry
+    {
+      public:
+        Addr IAddr;
+        Addr MAddr;
+        int stride;
+        int64_t confidence;
+
+/*     bool operator < (strideEntry a,strideEntry b)
+        {
+            if (a.confidence == b.confidence) {
+                return true; //??????
+            }
+            else return a.confidence < b.confidence;
+            }*/
+    };
+    Addr* lastMissAddr[64/*MAX_CPUS*/];
+
+    std::list<strideEntry*> table[64/*MAX_CPUS*/];
+    Tick latency;
+    int degree;
+    bool useCPUId;
+
+
+  public:
+
+    StridePrefetcher(int size, bool pageStop, bool serialSquash,
+                     bool cacheCheckPush, bool onlyData,
+                     Tick latency, int degree, bool useCPUId)
+        :Prefetcher<TagStore, Buffering>(size, pageStop, serialSquash,
+                                         cacheCheckPush, onlyData),
+         latency(latency), degree(degree), useCPUId(useCPUId)
+    {
+    }
+
+    ~StridePrefetcher() {}
+
+    void calculatePrefetch(Packet * &pkt, std::list<Addr> &addresses,
+                           std::list<Tick> &delays)
+    {
+//     Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1);
+        int cpuID = pkt->cpu_num;
+        if (!useCPUId) cpuID = 0;
+
+        /* Scan Table for IAddr Match */
+/*     std::list<strideEntry*>::iterator iter;
+        for (iter=table[cpuID].begin();
+             iter !=table[cpuID].end();
+             iter++) {
+            if ((*iter)->IAddr == pkt->pc) break;
+        }
+
+        if (iter != table[cpuID].end()) {
+            //Hit in table
+
+            int newStride = blkAddr - (*iter)->MAddr;
+            if (newStride == (*iter)->stride) {
+                (*iter)->confidence++;
+            }
+            else {
+                (*iter)->stride = newStride;
+                (*iter)->confidence--;
+            }
+
+            (*iter)->MAddr = blkAddr;
+
+            for (int d=1; d <= degree; d++) {
+                Addr newAddr = blkAddr + d * newStride;
+                if (this->pageStop &&
+                    (blkAddr & ~(TheISA::VMPageSize - 1)) !=
+                    (newAddr & ~(TheISA::VMPageSize - 1)))
+                {
+                    //Spanned the page, so now stop
+                    this->pfSpanPage += degree - d + 1;
+                    return;
+                }
+                else
+                {
+                    addresses.push_back(newAddr);
+                    delays.push_back(latency);
+                }
+            }
+        }
+        else {
+            //Miss in table
+            //Find lowest confidence and replace
+
+        }
+*/    }
+};
+
+#endif // __MEM_CACHE_PREFETCH_STRIDE_PREFETCHER_HH__
diff --git a/src/mem/cache/prefetch/tagged_prefetcher.hh b/src/mem/cache/prefetch/tagged_prefetcher.hh
new file mode 100644 (file)
index 0000000..17f500d
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Describes a tagged prefetcher based on template policies.
+ */
+
+#ifndef __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
+#define __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
+
+#include "mem/cache/prefetch/prefetcher.hh"
+
+/**
+ * A template-policy based cache. The behavior of the cache can be altered by
+ * supplying different template policies. TagStore handles all tag and data
+ * storage @sa TagStore. Buffering handles all misses and writes/writebacks
+ * @sa MissQueue. Coherence handles all coherence policy details @sa
+ * UniCoherence, SimpleMultiCoherence.
+ */
+template <class TagStore, class Buffering>
+class TaggedPrefetcher : public Prefetcher<TagStore, Buffering>
+{
+  protected:
+
+    Buffering* mq;
+    TagStore* tags;
+
+    Tick latency;
+    int degree;
+
+  public:
+
+    TaggedPrefetcher(int size, bool pageStop, bool serialSquash,
+                     bool cacheCheckPush, bool onlyData,
+                     Tick latency, int degree);
+
+    ~TaggedPrefetcher() {}
+
+    void calculatePrefetch(Packet * &pkt, std::list<Addr> &addresses,
+                           std::list<Tick> &delays);
+};
+
+#endif // __MEM_CACHE_PREFETCH_TAGGED_PREFETCHER_HH__
index 7bdabbe1420d7ecc3cf8c1984af74c0a19492a6a..9e46ba89374428d08d78281c4d068d424c80862f 100644 (file)
@@ -49,10 +49,10 @@ TaggedPrefetcher(int size, bool pageStop, bool serialSquash,
 template <class TagStore, class Buffering>
 void
 TaggedPrefetcher<TagStore, Buffering>::
-calculatePrefetch(MemReqPtr &req, std::list<Addr> &addresses,
+calculatePrefetch(Packet * &pkt, std::list<Addr> &addresses,
                   std::list<Tick> &delays)
 {
-    Addr blkAddr = req->paddr & ~(Addr)(this->blkSize-1);
+    Addr blkAddr = pkt->paddr & ~(Addr)(this->blkSize-1);
 
     for (int d=1; d <= degree; d++) {
         Addr newAddr = blkAddr + d*(this->blkSize);
diff --git a/src/mem/cache/tags/base_tags.cc b/src/mem/cache/tags/base_tags.cc
new file mode 100644 (file)
index 0000000..1537373
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Definitions of BaseTags.
+ */
+
+#include "mem/cache/tags/base_tags.hh"
+
+#include "mem/cache/base_cache.hh"
+#include "cpu/smt.hh" //maxThreadsPerCPU
+#include "sim/sim_exit.hh"
+
+using namespace std;
+
+void
+BaseTags::setCache(BaseCache *_cache)
+{
+    cache = _cache;
+    objName = cache->name();
+}
+
+void
+BaseTags::regStats(const string &name)
+{
+    using namespace Stats;
+    replacements
+        .init(maxThreadsPerCPU)
+        .name(name + ".replacements")
+        .desc("number of replacements")
+        .flags(total)
+        ;
+
+    tagsInUse
+        .name(name + ".tagsinuse")
+        .desc("Cycle average of tags in use")
+        ;
+
+    totalRefs
+        .name(name + ".total_refs")
+        .desc("Total number of references to valid blocks.")
+        ;
+
+    sampledRefs
+        .name(name + ".sampled_refs")
+        .desc("Sample count of references to valid blocks.")
+        ;
+
+    avgRefs
+        .name(name + ".avg_refs")
+        .desc("Average number of references to valid blocks.")
+        ;
+
+    avgRefs = totalRefs/sampledRefs;
+
+    warmupCycle
+        .name(name + ".warmup_cycle")
+        .desc("Cycle when the warmup percentage was hit.")
+        ;
+
+    registerExitCallback(new BaseTagsCallback(this));
+}
diff --git a/src/mem/cache/tags/base_tags.hh b/src/mem/cache/tags/base_tags.hh
new file mode 100644 (file)
index 0000000..b7b0c7e
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Ron Dreslinski
+ */
+
+/**
+ * @file
+ * Declaration of a common base class for cache tagstore objects.
+ */
+
+#ifndef __BASE_TAGS_HH__
+#define __BASE_TAGS_HH__
+
+#include <string>
+#include "base/statistics.hh"
+#include "base/callback.hh"
+
+class BaseCache;
+
+/**
+ * A common base class of Cache tagstore objects.
+ */
+class BaseTags
+{
+  protected:
+    /** Pointer to the parent cache. */
+    BaseCache *cache;
+
+    /** Local copy of the parent cache name. Used for DPRINTF. */
+    std::string objName;
+
+    /**
+     * The number of tags that need to be touched to meet the warmup
+     * percentage.
+     */
+    int warmupBound;
+    /** Marked true when the cache is warmed up. */
+    bool warmedUp;
+
+    // Statistics
+    /**
+     * @addtogroup CacheStatistics
+     * @{
+     */
+
+    /** Number of replacements of valid blocks per thread. */
+    Stats::Vector<> replacements;
+    /** Per cycle average of the number of tags that hold valid data. */
+    Stats::Average<> tagsInUse;
+
+    /** The total number of references to a block before it is replaced. */
+    Stats::Scalar<> totalRefs;
+
+    /**
+     * The number of reference counts sampled. This is different from
+     * replacements because we sample all the valid blocks when the simulator
+     * exits.
+     */
+    Stats::Scalar<> sampledRefs;
+
+    /**
+     * Average number of references to a block before is was replaced.
+     * @todo This should change to an average stat once we have them.
+     */
+    Stats::Formula avgRefs;
+
+    /** The cycle that the warmup percentage was hit. */
+    Stats::Scalar<> warmupCycle;
+    /**
+     * @}
+     */
+
+  public:
+
+    /**
+     * Destructor.
+     */
+    virtual ~BaseTags() {}
+
+    /**
+     * Set the parent cache back pointer. Also copies the cache name to
+     * objName.
+     * @param _cache Pointer to parent cache.
+     */
+    void setCache(BaseCache *_cache);
+
+    /**
+     * Return the parent cache name.
+     * @return the parent cache name.
+     */
+    const std::string &name() const
+    {
+        return objName;
+    }
+
+    /**
+     * Register local statistics.
+     * @param name The name to preceed each statistic name.
+     */
+    void regStats(const std::string &name);
+
+    /**
+     * Average in the reference count for valid blocks when the simulation
+     * exits.
+     */
+    virtual void cleanupRefs() {}
+};
+
+class BaseTagsCallback : public Callback
+{
+    BaseTags *tags;
+  public:
+    BaseTagsCallback(BaseTags *t) : tags(t) {}
+    virtual void process() { tags->cleanupRefs(); };
+};
+
+#endif //__BASE_TAGS_HH__
diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc
new file mode 100644 (file)
index 0000000..66d91b3
--- /dev/null
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Definitions a fully associative LRU tagstore.
+ */
+
+#include <sstream>
+
+#include <assert.h>
+
+#include "mem/cache/tags/fa_lru.hh"
+#include "base/intmath.hh"
+
+using namespace std;
+
+FALRU::FALRU(int _blkSize, int _size, int hit_latency)
+    : blkSize(_blkSize), size(_size),
+      numBlks(size/blkSize), hitLatency(hit_latency)
+{
+    if (!isPowerOf2(blkSize))
+        fatal("cache block size (in bytes) `%d' must be a power of two",
+              blkSize);
+    if (!(hitLatency > 0))
+        fatal("Access latency in cycles must be at least one cycle");
+    if (!isPowerOf2(size))
+        fatal("Cache Size must be power of 2 for now");
+
+    // Track all cache sizes from 128K up by powers of 2
+    numCaches = floorLog2(size) - 17;
+    if (numCaches >0){
+        cacheBoundaries = new FALRUBlk *[numCaches];
+        cacheMask = (1 << numCaches) - 1;
+    } else {
+        cacheMask = 0;
+    }
+
+    warmedUp = false;
+    warmupBound = size/blkSize;
+
+    blks = new FALRUBlk[numBlks];
+    head = &(blks[0]);
+    tail = &(blks[numBlks-1]);
+
+    head->prev = NULL;
+    head->next = &(blks[1]);
+    head->inCache = cacheMask;
+
+    tail->prev = &(blks[numBlks-2]);
+    tail->next = NULL;
+    tail->inCache = 0;
+
+    int index = (1 << 17) / blkSize;
+    int j = 0;
+    int flags = cacheMask;
+    for (int i = 1; i < numBlks-1; i++) {
+        blks[i].inCache = flags;
+        if (i == index - 1){
+            cacheBoundaries[j] = &(blks[i]);
+            flags &= ~ (1<<j);
+            ++j;
+            index = index << 1;
+        }
+        blks[i].prev = &(blks[i-1]);
+        blks[i].next = &(blks[i+1]);
+        blks[i].isTouched = false;
+    }
+    assert(j == numCaches);
+    assert(index == numBlks);
+    //assert(check());
+}
+
+void
+FALRU::regStats(const string &name)
+{
+    using namespace Stats;
+    BaseTags::regStats(name);
+    hits
+        .init(numCaches+1)
+        .name(name + ".falru_hits")
+        .desc("The number of hits in each cache size.")
+        ;
+    misses
+        .init(numCaches+1)
+        .name(name + ".falru_misses")
+        .desc("The number of misses in each cache size.")
+        ;
+    accesses
+        .name(name + ".falru_accesses")
+        .desc("The number of accesses to the FA LRU cache.")
+        ;
+
+    for (int i = 0; i < numCaches+1; ++i) {
+        stringstream size_str;
+        if (i < 3){
+            size_str << (1<<(i+7)) <<"K";
+        } else {
+            size_str << (1<<(i-3)) <<"M";
+        }
+
+        hits.subname(i, size_str.str());
+        hits.subdesc(i, "Hits in a " + size_str.str() +" cache");
+        misses.subname(i, size_str.str());
+        misses.subdesc(i, "Misses in a " + size_str.str() +" cache");
+    }
+}
+
+FALRUBlk *
+FALRU::hashLookup(Addr addr) const
+{
+    tagIterator iter = tagHash.find(addr);
+    if (iter != tagHash.end()) {
+        return (*iter).second;
+    }
+    return NULL;
+}
+
+bool
+FALRU::probe(int asid, Addr addr) const
+{
+    Addr blkAddr = blkAlign(addr);
+    FALRUBlk* blk = hashLookup(blkAddr);
+    return blk && blk->tag == blkAddr && blk->isValid();
+}
+
+void
+FALRU::invalidateBlk(int asid, Addr addr)
+{
+    Addr blkAddr = blkAlign(addr);
+    FALRUBlk* blk = (*tagHash.find(blkAddr)).second;
+    if (blk) {
+        assert(blk->tag == blkAddr);
+        blk->status = 0;
+        blk->isTouched = false;
+        tagsInUse--;
+    }
+}
+
+FALRUBlk*
+FALRU::findBlock(Addr addr, int asid, int &lat, int *inCache)
+{
+    accesses++;
+    int tmp_in_cache = 0;
+    Addr blkAddr = blkAlign(addr);
+    FALRUBlk* blk = hashLookup(blkAddr);
+
+    if (blk && blk->isValid()) {
+        assert(blk->tag == blkAddr);
+        tmp_in_cache = blk->inCache;
+        for (int i = 0; i < numCaches; i++) {
+            if (1<<i & blk->inCache) {
+                hits[i]++;
+            } else {
+                misses[i]++;
+            }
+        }
+        hits[numCaches]++;
+        if (blk != head){
+            moveToHead(blk);
+        }
+    } else {
+        blk = NULL;
+        for (int i = 0; i < numCaches+1; ++i) {
+            misses[i]++;
+        }
+    }
+    if (inCache) {
+        *inCache = tmp_in_cache;
+    }
+
+    lat = hitLatency;
+    //assert(check());
+    return blk;
+}
+
+FALRUBlk*
+FALRU::findBlock(Packet * &pkt, int &lat, int *inCache)
+{
+    Addr addr = pkt->paddr;
+
+    accesses++;
+    int tmp_in_cache = 0;
+    Addr blkAddr = blkAlign(addr);
+    FALRUBlk* blk = hashLookup(blkAddr);
+
+    if (blk && blk->isValid()) {
+        assert(blk->tag == blkAddr);
+        tmp_in_cache = blk->inCache;
+        for (int i = 0; i < numCaches; i++) {
+            if (1<<i & blk->inCache) {
+                hits[i]++;
+            } else {
+                misses[i]++;
+            }
+        }
+        hits[numCaches]++;
+        if (blk != head){
+            moveToHead(blk);
+        }
+    } else {
+        blk = NULL;
+        for (int i = 0; i < numCaches+1; ++i) {
+            misses[i]++;
+        }
+    }
+    if (inCache) {
+        *inCache = tmp_in_cache;
+    }
+
+    lat = hitLatency;
+    //assert(check());
+    return blk;
+}
+
+FALRUBlk*
+FALRU::findBlock(Addr addr, int asid) const
+{
+    Addr blkAddr = blkAlign(addr);
+    FALRUBlk* blk = hashLookup(blkAddr);
+
+    if (blk && blk->isValid()) {
+        assert(blk->tag == blkAddr);
+    } else {
+        blk = NULL;
+    }
+    return blk;
+}
+
+FALRUBlk*
+FALRU::findReplacement(Packet * &pkt, PacketList* &writebacks,
+                       BlkList &compress_blocks)
+{
+    FALRUBlk * blk = tail;
+    assert(blk->inCache == 0);
+    moveToHead(blk);
+    tagHash.erase(blk->tag);
+    tagHash[blkAlign(pkt->paddr)] = blk;
+    if (blk->isValid()) {
+        int thread_num = (blk->xc) ? blk->xc->getThreadNum() : 0;
+        replacements[thread_num]++;
+    } else {
+        tagsInUse++;
+        blk->isTouched = true;
+        if (!warmedUp && tagsInUse.value() >= warmupBound) {
+            warmedUp = true;
+            warmupCycle = curTick;
+        }
+    }
+    //assert(check());
+    return blk;
+}
+
+void
+FALRU::moveToHead(FALRUBlk *blk)
+{
+    int updateMask = blk->inCache ^ cacheMask;
+    for (int i = 0; i < numCaches; i++){
+        if ((1<<i) & updateMask) {
+            cacheBoundaries[i]->inCache &= ~(1<<i);
+            cacheBoundaries[i] = cacheBoundaries[i]->prev;
+        } else if (cacheBoundaries[i] == blk) {
+            cacheBoundaries[i] = blk->prev;
+        }
+    }
+    blk->inCache = cacheMask;
+    if (blk != head) {
+        if (blk == tail){
+            assert(blk->next == NULL);
+            tail = blk->prev;
+            tail->next = NULL;
+        } else {
+            blk->prev->next = blk->next;
+            blk->next->prev = blk->prev;
+        }
+        blk->next = head;
+        blk->prev = NULL;
+        head->prev = blk;
+        head = blk;
+    }
+}
+
+bool
+FALRU::check()
+{
+    FALRUBlk* blk = head;
+    int size = 0;
+    int boundary = 1<<17;
+    int j = 0;
+    int flags = cacheMask;
+    while (blk) {
+        size += blkSize;
+        if (blk->inCache != flags) {
+            return false;
+        }
+        if (size == boundary && blk != tail) {
+            if (cacheBoundaries[j] != blk) {
+                return false;
+            }
+            flags &=~(1 << j);
+            boundary = boundary<<1;
+            ++j;
+        }
+        blk = blk->next;
+    }
+    return true;
+}
diff --git a/src/mem/cache/tags/fa_lru.hh b/src/mem/cache/tags/fa_lru.hh
new file mode 100644 (file)
index 0000000..7855f84
--- /dev/null
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declaration of a fully associative LRU tag store.
+ */
+
+#ifndef __FA_LRU_HH__
+#define __FA_LRU_HH__
+
+#include <list>
+
+#include "mem/cache/cache_blk.hh"
+#include "mem/packet.hh"
+#include "base/hashmap.hh"
+#include "mem/cache/tags/base_tags.hh"
+
+/**
+ * A fully associative cache block.
+ */
+class FALRUBlk : public CacheBlk
+{
+public:
+    /** The previous block in LRU order. */
+    FALRUBlk *prev;
+    /** The next block in LRU order. */
+    FALRUBlk *next;
+    /** Has this block been touched? */
+    bool isTouched;
+
+    /**
+     * A bit mask of the sizes of cache that this block is resident in.
+     * Each bit represents a power of 2 in MB size cache.
+     * If bit 0 is set, this block is in a 1MB cache
+     * If bit 2 is set, this block is in a 4MB cache, etc.
+     * There is one bit for each cache smaller than the full size (default
+     * 16MB).
+     */
+    int inCache;
+};
+
+/**
+ * A fully associative LRU cache. Keeps statistics for accesses to a number of
+ * cache sizes at once.
+ */
+class FALRU : public BaseTags
+{
+  public:
+    /** Typedef the block type used in this class. */
+    typedef FALRUBlk BlkType;
+    /** Typedef a list of pointers to the local block type. */
+    typedef std::list<FALRUBlk*> BlkList;
+  protected:
+    /** The block size of the cache. */
+    const int blkSize;
+    /** The size of the cache. */
+    const int size;
+    /** The number of blocks in the cache. */
+    const int numBlks; // calculated internally
+    /** The hit latency of the cache. */
+    const int hitLatency;
+
+    /** Array of pointers to blocks at the cache size  boundaries. */
+    FALRUBlk **cacheBoundaries;
+    /** A mask for the FALRUBlk::inCache bits. */
+    int cacheMask;
+    /** The number of different size caches being tracked. */
+    int numCaches;
+
+    /** The cache blocks. */
+    FALRUBlk *blks;
+
+    /** The MRU block. */
+    FALRUBlk *head;
+    /** The LRU block. */
+    FALRUBlk *tail;
+
+    /** Hash table type mapping addresses to cache block pointers. */
+    typedef m5::hash_map<Addr, FALRUBlk *, m5::hash<Addr> > hash_t;
+    /** Iterator into the address hash table. */
+    typedef hash_t::const_iterator tagIterator;
+
+    /** The address hash table. */
+    hash_t tagHash;
+
+    /**
+     * Find the cache block for the given address.
+     * @param addr The address to find.
+     * @return The cache block of the address, if any.
+     */
+    FALRUBlk * hashLookup(Addr addr) const;
+
+    /**
+     * Move a cache block to the MRU position.
+     * @param blk The block to promote.
+     */
+    void moveToHead(FALRUBlk *blk);
+
+    /**
+     * Check to make sure all the cache boundaries are still where they should
+     * be. Used for debugging.
+     * @return True if everything is correct.
+     */
+    bool check();
+
+    /**
+     * @defgroup FALRUStats Fully Associative LRU specific statistics
+     * The FA lru stack lets us track multiple cache sizes at once. These
+     * statistics track the hits and misses for different cache sizes.
+     * @{
+     */
+
+    /** Hits in each cache size >= 128K. */
+    Stats::Vector<> hits;
+    /** Misses in each cache size >= 128K. */
+    Stats::Vector<> misses;
+    /** Total number of accesses. */
+    Stats::Scalar<> accesses;
+
+    /**
+     * @}
+     */
+
+public:
+    /**
+     * Construct and initialize this cache tagstore.
+     * @param blkSize The block size of the cache.
+     * @param size The size of the cache.
+     * @param hit_latency The hit latency of the cache.
+     */
+    FALRU(int blkSize, int size, int hit_latency);
+
+    /**
+     * Register the stats for this object.
+     * @param name The name to prepend to the stats name.
+     */
+    void regStats(const std::string &name);
+
+    /**
+     * Return true if the address is found in the cache.
+     * @param asid The address space ID.
+     * @param addr The address to look for.
+     * @return True if the address is in the cache.
+     */
+    bool probe(int asid, Addr addr) const;
+
+    /**
+     * Invalidate the cache block that contains the given addr.
+     * @param asid The address space ID.
+     * @param addr The address to invalidate.
+     */
+    void invalidateBlk(int asid, Addr addr);
+
+    /**
+     * Find the block in the cache and update the replacement data. Returns
+     * the access latency and the in cache flags as a side effect
+     * @param addr The address to look for.
+     * @param asid The address space ID.
+     * @param lat The latency of the access.
+     * @param inCache The FALRUBlk::inCache flags.
+     * @return Pointer to the cache block.
+     */
+    FALRUBlk* findBlock(Addr addr, int asid, int &lat, int *inCache = 0);
+
+    /**
+     * Find the block in the cache and update the replacement data. Returns
+     * the access latency and the in cache flags as a side effect
+     * @param req The req whose block to find
+     * @param lat The latency of the access.
+     * @param inCache The FALRUBlk::inCache flags.
+     * @return Pointer to the cache block.
+     */
+    FALRUBlk* findBlock(Packet * &pkt, int &lat, int *inCache = 0);
+
+    /**
+     * Find the block in the cache, do not update the replacement data.
+     * @param addr The address to look for.
+     * @param asid The address space ID.
+     * @return Pointer to the cache block.
+     */
+    FALRUBlk* findBlock(Addr addr, int asid) const;
+
+    /**
+     * Find a replacement block for the address provided.
+     * @param req The request to a find a replacement candidate for.
+     * @param writebacks List for any writebacks to be performed.
+     * @param compress_blocks List of blocks to compress, for adaptive comp.
+     * @return The block to place the replacement in.
+     */
+    FALRUBlk* findReplacement(Packet * &pkt, PacketList* & writebacks,
+                              BlkList &compress_blocks);
+
+    /**
+     * Return the hit latency of this cache.
+     * @return The hit latency.
+     */
+    int getHitLatency() const
+    {
+        return hitLatency;
+    }
+
+    /**
+     * Return the block size of this cache.
+     * @return The block size.
+     */
+    int getBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Return the subblock size of this cache, always the block size.
+     * @return The block size.
+     */
+    int getSubBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Align an address to the block size.
+     * @param addr the address to align.
+     * @return The aligned address.
+     */
+    Addr blkAlign(Addr addr) const
+    {
+        return (addr & ~(Addr)(blkSize-1));
+    }
+
+    /**
+     * Generate the tag from the addres. For fully associative this is just the
+     * block address.
+     * @param addr The address to get the tag from.
+     * @param blk ignored here
+     * @return The tag.
+     */
+    Addr extractTag(Addr addr, FALRUBlk *blk) const
+    {
+        return blkAlign(addr);
+    }
+
+    /**
+     * Return the set of an address. Only one set in a fully associative cache.
+     * @param addr The address to get the set from.
+     * @return 0.
+     */
+    int extractSet(Addr addr) const
+    {
+        return 0;
+    }
+
+    /**
+     * Calculate the block offset of an address.
+     * @param addr the address to get the offset of.
+     * @return the block offset.
+     */
+    int extractBlkOffset(Addr addr) const
+    {
+        return (addr & (Addr)(blkSize-1));
+    }
+
+    /**
+     * Regenerate the block address from the tag and the set.
+     * @param tag The tag of the block.
+     * @param set The set the block belongs to.
+     * @return the block address.
+     */
+    Addr regenerateBlkAddr(Addr tag, int set) const
+    {
+        return (tag);
+    }
+
+    /**
+     * Read the data out of the internal storage of a cache block. FALRU
+     * currently doesn't support data storage.
+     * @param blk The cache block to read.
+     * @param data The buffer to read the data into.
+     * @return The data from the cache block.
+     */
+    void readData(FALRUBlk *blk, uint8_t *data)
+    {
+    }
+
+    /**
+     * Write data into the internal storage of a cache block. FALRU
+     * currently doesn't support data storage.
+     * @param blk The cache block to be written.
+     * @param data The data to write.
+     * @param size The number of bytes to write.
+     * @param writebacks A list for any writebacks to be performed. May be
+     * needed when writing to a compressed block.
+     */
+    void writeData(FALRUBlk *blk, uint8_t *data, int size,
+                   PacketList* &writebacks)
+    {
+    }
+
+    /**
+     * Unimplemented. Perform a cache block copy from block aligned addresses.
+     * @param source The block aligned source address.
+     * @param dest The block aligned destination adddress.
+     * @param asid The address space ID.
+     * @param writebacks List for any generated writeback requests.
+     */
+    void doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks)
+    {
+    }
+
+    /**
+     * Unimplemented.
+     */
+    void fixCopy(Packet * &pkt, PacketList* &writebacks)
+    {
+    }
+
+};
+
+#endif
diff --git a/src/mem/cache/tags/iic.cc b/src/mem/cache/tags/iic.cc
new file mode 100644 (file)
index 0000000..a574ada
--- /dev/null
@@ -0,0 +1,869 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Definitions of the Indirect Index Cache tagstore.
+ */
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include <math.h>
+
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/tags/iic.hh"
+#include "base/intmath.hh"
+#include "sim/root.hh" // for curTick
+
+#include "base/trace.hh" // for DPRINTF
+
+
+using namespace std;
+
+/** Track the number of accesses to each cache set. */
+#define PROFILE_IIC 1
+
+IIC::IIC(IIC::Params &params) :
+    hashSets(params.numSets), blkSize(params.blkSize), assoc(params.assoc),
+    hitLatency(params.hitLatency), subSize(params.subblockSize),
+    numSub(blkSize/subSize),
+    trivialSize((floorLog2(params.size/subSize)*numSub)/8),
+    tagShift(floorLog2(blkSize)), blkMask(blkSize - 1),
+    subShift(floorLog2(subSize)), subMask(numSub - 1),
+    hashDelay(params.hashDelay),
+    numBlocks(params.size/subSize),
+    numTags(hashSets * assoc + params.size/blkSize -1),
+    numSecondary(params.size/blkSize),
+    tagNull(numTags),
+    primaryBound(hashSets * assoc)
+{
+    int i;
+
+    // Check parameters
+    if (blkSize < 4 || !isPowerOf2(blkSize)) {
+        fatal("Block size must be at least 4 and a power of 2");
+    }
+    if (hashSets <= 0 || !isPowerOf2(hashSets)) {
+        fatal("# of hashsets must be non-zero and a power of 2");
+    }
+    if (assoc <= 0) {
+        fatal("associativity must be greater than zero");
+    }
+    if (hitLatency <= 0) {
+        fatal("access latency must be greater than zero");
+    }
+    if (numSub*subSize != blkSize) {
+        fatal("blocksize must be evenly divisible by subblock size");
+    }
+
+    // debug stuff
+    freeSecond = numSecondary;
+
+    warmedUp = false;
+    warmupBound = params.size/blkSize;
+
+    // Replacement Policy Initialization
+    repl = params.rp;
+    repl->setIIC(this);
+
+    //last_miss_time = 0
+
+    // allocate data reference counters
+    dataReferenceCount = new int[numBlocks];
+    memset(dataReferenceCount, 0, numBlocks*sizeof(int));
+
+    // Allocate storage for both internal data and block fast access data.
+    // We allocate it as one large chunk to reduce overhead and to make
+    // deletion easier.
+    int data_index = 0;
+    dataStore = new uint8_t[(numBlocks + numTags) * blkSize];
+    dataBlks = new uint8_t*[numBlocks];
+    for (i = 0; i < numBlocks; ++i) {
+        dataBlks[i] = &dataStore[data_index];
+        freeDataBlock(i);
+        data_index += subSize;
+    }
+
+    assert(data_index == numBlocks * subSize);
+
+    // allocate and init tag store
+    tagStore = new IICTag[numTags];
+
+    int blkIndex = 0;
+    // allocate and init sets
+    sets = new IICSet[hashSets];
+    for (i = 0; i < hashSets; ++i) {
+        sets[i].assoc = assoc;
+        sets[i].tags = new IICTag*[assoc];
+        sets[i].chain_ptr = tagNull;
+
+        for (int j = 0; j < assoc; ++j) {
+            IICTag *tag = &tagStore[blkIndex++];
+            tag->chain_ptr = tagNull;
+            tag->data_ptr.resize(numSub);
+            tag->size = blkSize;
+            tag->trivialData = new uint8_t[trivialSize];
+            tag->numData = 0;
+            sets[i].tags[j] = tag;
+            tag->set = i;
+            tag->data = &dataStore[data_index];
+            data_index += blkSize;
+        }
+    }
+
+    assert(blkIndex == primaryBound);
+
+    for (i = primaryBound; i < tagNull; i++) {
+        tagStore[i].chain_ptr = i+1;
+        //setup data ptrs to subblocks
+        tagStore[i].data_ptr.resize(numSub);
+        tagStore[i].size = blkSize;
+        tagStore[i].trivialData = new uint8_t[trivialSize];
+        tagStore[i].numData = 0;
+        tagStore[i].set = 0;
+        tagStore[i].data = &dataStore[data_index];
+        data_index += blkSize;
+    }
+    freelist = primaryBound;
+}
+
+IIC::~IIC()
+{
+    delete [] dataReferenceCount;
+    delete [] dataStore;
+    delete [] tagStore;
+    delete [] sets;
+}
+
+/* register cache stats */
+void
+IIC::regStats(const string &name)
+{
+    using namespace Stats;
+
+    BaseTags::regStats(name);
+
+    hitHashDepth.init(0, 20, 1);
+    missHashDepth.init(0, 20, 1);
+    setAccess.init(0, hashSets, 1);
+
+    /** IIC Statistics */
+    hitHashDepth
+        .name(name + ".hit_hash_depth_dist")
+        .desc("Dist. of Hash lookup depths")
+        .flags(pdf)
+        ;
+
+    missHashDepth
+        .name(name + ".miss_hash_depth_dist")
+        .desc("Dist. of Hash lookup depths")
+        .flags(pdf)
+        ;
+
+    repl->regStats(name);
+
+    if (PROFILE_IIC)
+        setAccess
+            .name(name + ".set_access_dist")
+            .desc("Dist. of Accesses across sets")
+            .flags(pdf)
+            ;
+
+    missDepthTotal
+        .name(name + ".miss_depth_total")
+        .desc("Total of miss depths")
+        ;
+
+    hashMiss
+        .name(name + ".hash_miss")
+        .desc("Total of misses in hash table")
+        ;
+
+    hitDepthTotal
+        .name(name + ".hit_depth_total")
+        .desc("Total of hit depths")
+        ;
+
+    hashHit
+        .name(name + ".hash_hit")
+        .desc("Total of hites in hash table")
+        ;
+}
+
+// probe cache for presence of given block.
+bool
+IIC::probe(int asid, Addr addr) const
+{
+    return (findBlock(addr,asid) != NULL);
+}
+
+IICTag*
+IIC::findBlock(Addr addr, int asid, int &lat)
+{
+    Addr tag = extractTag(addr);
+    unsigned set = hash(addr);
+    int set_lat;
+
+    unsigned long chain_ptr;
+
+    if (PROFILE_IIC)
+        setAccess.sample(set);
+
+    IICTag *tag_ptr = sets[set].findTag(asid, tag, chain_ptr);
+    set_lat = 1;
+    if (tag_ptr == NULL && chain_ptr != tagNull) {
+        int secondary_depth;
+        tag_ptr = secondaryChain(asid, tag, chain_ptr, &secondary_depth);
+        set_lat += secondary_depth;
+        // set depth for statistics fix this later!!! egh
+        sets[set].depth = set_lat;
+
+        if (tag_ptr != NULL) {
+            /* need to move tag into primary table */
+            // need to preserve chain: fix this egh
+            sets[set].tags[assoc-1]->chain_ptr = tag_ptr->chain_ptr;
+            tagSwap(tag_ptr - tagStore, sets[set].tags[assoc-1] - tagStore);
+            tag_ptr = sets[set].findTag(asid, tag, chain_ptr);
+            assert(tag_ptr!=NULL);
+        }
+
+    }
+    set_lat = set_lat * hashDelay + hitLatency;
+    if (tag_ptr != NULL) {
+        // IIC replacement: if this is not the first element of
+        //   list, reorder
+        sets[set].moveToHead(tag_ptr);
+
+        hitHashDepth.sample(sets[set].depth);
+        hashHit++;
+        hitDepthTotal += sets[set].depth;
+        tag_ptr->status |= BlkReferenced;
+        lat = set_lat;
+        if (tag_ptr->whenReady > curTick && tag_ptr->whenReady - curTick > set_lat) {
+            lat = tag_ptr->whenReady - curTick;
+        }
+
+        tag_ptr->refCount += 1;
+    }
+    else {
+        // fall through: cache block not found, not a hit...
+        missHashDepth.sample(sets[set].depth);
+        hashMiss++;
+        missDepthTotal += sets[set].depth;
+        lat = set_lat;
+    }
+    return tag_ptr;
+}
+
+IICTag*
+IIC::findBlock(Packet * &pkt, int &lat)
+{
+    Addr addr = pkt->paddr;
+    int asid = pkt->req->asid;
+
+    Addr tag = extractTag(addr);
+    unsigned set = hash(addr);
+    int set_lat;
+
+    unsigned long chain_ptr;
+
+    if (PROFILE_IIC)
+        setAccess.sample(set);
+
+    IICTag *tag_ptr = sets[set].findTag(asid, tag, chain_ptr);
+    set_lat = 1;
+    if (tag_ptr == NULL && chain_ptr != tagNull) {
+        int secondary_depth;
+        tag_ptr = secondaryChain(asid, tag, chain_ptr, &secondary_depth);
+        set_lat += secondary_depth;
+        // set depth for statistics fix this later!!! egh
+        sets[set].depth = set_lat;
+
+        if (tag_ptr != NULL) {
+            /* need to move tag into primary table */
+            // need to preserve chain: fix this egh
+            sets[set].tags[assoc-1]->chain_ptr = tag_ptr->chain_ptr;
+            tagSwap(tag_ptr - tagStore, sets[set].tags[assoc-1] - tagStore);
+            tag_ptr = sets[set].findTag(asid, tag, chain_ptr);
+            assert(tag_ptr!=NULL);
+        }
+
+    }
+    set_lat = set_lat * hashDelay + hitLatency;
+    if (tag_ptr != NULL) {
+        // IIC replacement: if this is not the first element of
+        //   list, reorder
+        sets[set].moveToHead(tag_ptr);
+
+        hitHashDepth.sample(sets[set].depth);
+        hashHit++;
+        hitDepthTotal += sets[set].depth;
+        tag_ptr->status |= BlkReferenced;
+        lat = set_lat;
+        if (tag_ptr->whenReady > curTick && tag_ptr->whenReady - curTick > set_lat) {
+            lat = tag_ptr->whenReady - curTick;
+        }
+
+        tag_ptr->refCount += 1;
+    }
+    else {
+        // fall through: cache block not found, not a hit...
+        missHashDepth.sample(sets[set].depth);
+        hashMiss++;
+        missDepthTotal += sets[set].depth;
+        lat = set_lat;
+    }
+    return tag_ptr;
+}
+
+IICTag*
+IIC::findBlock(Addr addr, int asid) const
+{
+    Addr tag = extractTag(addr);
+    unsigned set = hash(addr);
+
+    unsigned long chain_ptr;
+
+    IICTag *tag_ptr = sets[set].findTag(asid, tag, chain_ptr);
+    if (tag_ptr == NULL && chain_ptr != tagNull) {
+        int secondary_depth;
+        tag_ptr = secondaryChain(asid, tag, chain_ptr, &secondary_depth);
+    }
+    return tag_ptr;
+}
+
+
+IICTag*
+IIC::findReplacement(Packet * &pkt, PacketList* &writebacks,
+                     BlkList &compress_blocks)
+{
+    DPRINTF(IIC, "Finding Replacement for %x\n", pkt->paddr);
+    unsigned set = hash(pkt->paddr);
+    IICTag *tag_ptr;
+    unsigned long *tmp_data = new unsigned long[numSub];
+
+    // Get a enough subblocks for a full cache line
+    for (int i = 0; i < numSub; ++i){
+        tmp_data[i] = getFreeDataBlock(writebacks);
+        assert(dataReferenceCount[tmp_data[i]]==0);
+    }
+
+    tag_ptr = getFreeTag(set, writebacks);
+
+    tag_ptr->set = set;
+    for (int i=0; i< numSub; ++i) {
+        tag_ptr->data_ptr[i] = tmp_data[i];
+        dataReferenceCount[tag_ptr->data_ptr[i]]++;
+    }
+    tag_ptr->numData = numSub;
+    assert(tag_ptr - tagStore < primaryBound); // make sure it is in primary
+    tag_ptr->chain_ptr = tagNull;
+    sets[set].moveToHead(tag_ptr);
+    delete [] tmp_data;
+
+    list<unsigned long> tag_indexes;
+    repl->doAdvance(tag_indexes);
+    while (!tag_indexes.empty()) {
+        if (!tagStore[tag_indexes.front()].isCompressed()) {
+            compress_blocks.push_back(&tagStore[tag_indexes.front()]);
+        }
+        tag_indexes.pop_front();
+    }
+
+    tag_ptr->re = (void*)repl->add(tag_ptr-tagStore);
+
+    return tag_ptr;
+}
+
+void
+IIC::freeReplacementBlock(PacketList* & writebacks)
+{
+    IICTag *tag_ptr;
+    unsigned long data_ptr;
+    /* consult replacement policy */
+    tag_ptr = &tagStore[repl->getRepl()];
+    assert(tag_ptr->isValid());
+
+    DPRINTF(Cache, "Replacing %x in IIC: %s\n",
+            regenerateBlkAddr(tag_ptr->tag,0),
+            tag_ptr->isModified() ? "writeback" : "clean");
+    /* write back replaced block data */
+    if (tag_ptr && (tag_ptr->isValid())) {
+        int thread_num = (tag_ptr->xc) ? tag_ptr->xc->getThreadNum() : 0;
+        replacements[thread_num]++;
+        totalRefs += tag_ptr->refCount;
+        ++sampledRefs;
+        tag_ptr->refCount = 0;
+
+        if (tag_ptr->isModified()) {
+            Packet * writeback =
+                buildWritebackReq(regenerateBlkAddr(tag_ptr->tag, 0),
+                                  tag_ptr->req->asid, tag_ptr->xc, blkSize,
+                                  (cache->doData())?tag_ptr->data:0,
+                                  tag_ptr->size);
+            writebacks.push_back(writeback);
+        }
+    }
+
+    // free the data blocks
+    for (int i = 0; i < tag_ptr->numData; ++i) {
+        data_ptr = tag_ptr->data_ptr[i];
+        assert(dataReferenceCount[data_ptr]>0);
+        if (--dataReferenceCount[data_ptr] == 0) {
+            freeDataBlock(data_ptr);
+        }
+    }
+    freeTag(tag_ptr);
+}
+
+unsigned long
+IIC::getFreeDataBlock(PacketList* & writebacks)
+{
+    struct IICTag *tag_ptr;
+    unsigned long data_ptr;
+
+    tag_ptr = NULL;
+    /* find data block */
+    while (blkFreelist.empty()) {
+        freeReplacementBlock(writebacks);
+    }
+
+    data_ptr = blkFreelist.front();
+    blkFreelist.pop_front();
+    DPRINTF(IICMore,"Found free data at %d\n",data_ptr);
+    return data_ptr;
+}
+
+
+
+IICTag*
+IIC::getFreeTag(int set, PacketList* & writebacks)
+{
+    unsigned long tag_index;
+    IICTag *tag_ptr;
+    // Add new tag
+    tag_ptr = sets[set].findFree();
+    // if no free in primary, and secondary exists
+    if (!tag_ptr && numSecondary) {
+        // need to spill a tag into secondary storage
+        while (freelist == tagNull) {
+            // get replacements until one is in secondary
+            freeReplacementBlock(writebacks);
+        }
+
+        tag_index = freelist;
+        freelist = tagStore[freelist].chain_ptr;
+        freeSecond--;
+
+        assert(tag_index != tagNull);
+        tagSwap(tag_index, sets[set].tags[assoc-1] - tagStore);
+        tagStore[tag_index].chain_ptr = sets[set].chain_ptr;
+        sets[set].chain_ptr = tag_index;
+
+        tag_ptr = sets[set].tags[assoc-1];
+    }
+    DPRINTF(IICMore,"Found free tag at %d\n",tag_ptr - tagStore);
+    tagsInUse++;
+    if (!warmedUp && tagsInUse.value() >= warmupBound) {
+        warmedUp = true;
+        warmupCycle = curTick;
+    }
+
+    return tag_ptr;
+}
+
+void
+IIC::freeTag(IICTag *tag_ptr)
+{
+    unsigned long tag_index, tmp_index;
+    // Fix tag_ptr
+    if (tag_ptr) {
+        // we have a tag to clear
+        DPRINTF(IICMore,"Freeing Tag for %x\n",
+                regenerateBlkAddr(tag_ptr->tag,0));
+        tagsInUse--;
+        tag_ptr->status = 0;
+        tag_ptr->numData = 0;
+        tag_ptr->re = NULL;
+        tag_index = tag_ptr - tagStore;
+        if (tag_index >= primaryBound) {
+            // tag_ptr points to secondary store
+            assert(tag_index < tagNull); // remove this?? egh
+            if (tag_ptr->chain_ptr == tagNull) {
+                // need to fix chain list
+                unsigned tmp_set = hash(tag_ptr->tag << tagShift);
+                if (sets[tmp_set].chain_ptr == tag_index) {
+                    sets[tmp_set].chain_ptr = tagNull;
+                } else {
+                    tmp_index = sets[tmp_set].chain_ptr;
+                    while (tmp_index != tagNull
+                           && tagStore[tmp_index].chain_ptr != tag_index) {
+                        tmp_index = tagStore[tmp_index].chain_ptr;
+                    }
+                    assert(tmp_index != tagNull);
+                    tagStore[tmp_index].chain_ptr = tagNull;
+                }
+                tag_ptr->chain_ptr = freelist;
+                freelist = tag_index;
+                freeSecond++;
+            } else {
+                // copy next chained entry to this tag location
+                tmp_index = tag_ptr->chain_ptr;
+                tagSwap(tmp_index, tag_index);
+                tagStore[tmp_index].chain_ptr = freelist;
+                freelist = tmp_index;
+                freeSecond++;
+            }
+        } else {
+            // tag_ptr in primary hash table
+            assert(tag_index < primaryBound);
+            tag_ptr->status = 0;
+            unsigned tmp_set = hash(tag_ptr->tag << tagShift);
+            if (sets[tmp_set].chain_ptr != tagNull) { // collapse chain
+                tmp_index = sets[tmp_set].chain_ptr;
+                tagSwap(tag_index, tmp_index);
+                tagStore[tmp_index].chain_ptr = freelist;
+                freelist = tmp_index;
+                freeSecond++;
+                sets[tmp_set].chain_ptr = tag_ptr->chain_ptr;
+                sets[tmp_set].moveToTail(tag_ptr);
+            }
+        }
+    }
+}
+
+void
+IIC::freeDataBlock(unsigned long data_ptr)
+{
+    assert(dataReferenceCount[data_ptr] == 0);
+    DPRINTF(IICMore, "Freeing data at %d\n", data_ptr);
+    blkFreelist.push_front(data_ptr);
+}
+
+/** Use a simple modulo hash. */
+#define SIMPLE_HASH 0
+
+unsigned
+IIC::hash(Addr addr) const {
+#if SIMPLE_HASH
+    return extractTag(addr) % iic_hash_size;
+#else
+    Addr tag, mask, x, y;
+    tag = extractTag(addr);
+    mask = hashSets-1; /* assumes iic_hash_size is a power of 2 */
+    x = tag & mask;
+    y = (tag >> (int)(::log(hashSets)/::log(2))) & mask;
+    assert (x < hashSets && y < hashSets);
+    return x ^ y;
+#endif
+}
+
+
+void
+IICSet::moveToHead(IICTag *tag)
+{
+    if (tags[0] == tag)
+        return;
+
+    // write 'next' block into blks[i], moving up from MRU toward LRU
+    // until we overwrite the block we moved to head.
+
+    // start by setting up to write 'blk' into blks[0]
+    int i = 0;
+    IICTag *next = tag;
+
+    do {
+        assert(i < assoc);
+        // swap blks[i] and next
+        IICTag *tmp = tags[i];
+        tags[i] = next;
+        next = tmp;
+        ++i;
+    } while (next != tag);
+}
+
+void
+IICSet::moveToTail(IICTag *tag)
+{
+    if (tags[assoc-1] == tag)
+        return;
+
+    // write 'next' block into blks[i], moving up from MRU toward LRU
+    // until we overwrite the block we moved to head.
+
+    // start by setting up to write 'blk' into blks[0]
+    int i = assoc - 1;
+    IICTag *next = tag;
+
+    do {
+        assert(i >= 0);
+        // swap blks[i] and next
+        IICTag *tmp = tags[i];
+        tags[i] = next;
+        next = tmp;
+        --i;
+    } while (next != tag);
+}
+
+void
+IIC::tagSwap(unsigned long index1, unsigned long index2)
+{
+    DPRINTF(IIC,"Swapping tag[%d]=%x for tag[%d]=%x\n",index1,
+            tagStore[index1].tag<<tagShift, index2,
+            tagStore[index2].tag<<tagShift);
+    IICTag tmp_tag;
+    tmp_tag = tagStore[index1];
+    tagStore[index1] = tagStore[index2];
+    tagStore[index2] = tmp_tag;
+    if (tagStore[index1].isValid())
+        repl->fixTag(tagStore[index1].re, index2, index1);
+    if (tagStore[index2].isValid())
+        repl->fixTag(tagStore[index2].re, index1, index2);
+}
+
+
+IICTag *
+IIC::secondaryChain(int asid, Addr tag, unsigned long chain_ptr,
+                    int *_depth) const
+{
+    int depth = 0;
+    while (chain_ptr != tagNull) {
+        DPRINTF(IIC,"Searching secondary at %d for %x\n", chain_ptr,
+                tag<<tagShift);
+        if (tagStore[chain_ptr].tag == tag &&
+            tagStore[chain_ptr].asid == asid &&
+            (tagStore[chain_ptr].isValid())) {
+            *_depth = depth;
+            return &tagStore[chain_ptr];
+        }
+        depth++;
+        chain_ptr = tagStore[chain_ptr].chain_ptr;
+    }
+    *_depth = depth;
+    return NULL;
+}
+
+void
+IIC::decompressBlock(unsigned long index)
+{
+    IICTag *tag_ptr = &tagStore[index];
+    if (tag_ptr->isCompressed()) {
+        // decompress the data here.
+    }
+}
+
+void
+IIC::compressBlock(unsigned long index)
+{
+    IICTag *tag_ptr = &tagStore[index];
+    if (!tag_ptr->isCompressed()) {
+        // Compress the data here.
+    }
+}
+
+void
+IIC::invalidateBlk(int asid, Addr addr)
+{
+    IICTag* tag_ptr = findBlock(addr, asid);
+    if (tag_ptr) {
+        for (int i = 0; i < tag_ptr->numData; ++i) {
+            dataReferenceCount[tag_ptr->data_ptr[i]]--;
+            if (dataReferenceCount[tag_ptr->data_ptr[i]] == 0) {
+                freeDataBlock(tag_ptr->data_ptr[i]);
+            }
+        }
+        repl->removeEntry(tag_ptr->re);
+        freeTag(tag_ptr);
+    }
+}
+
+void
+IIC::readData(IICTag *blk, uint8_t *data){
+    assert(cache->doData());
+    assert(blk->size <= trivialSize || blk->numData > 0);
+    int data_size = blk->size;
+    if (data_size > trivialSize) {
+        for (int i = 0; i < blk->numData; ++i){
+            memcpy(data+i*subSize,
+                   &(dataBlks[blk->data_ptr[i]][0]),
+                   (data_size>subSize)?subSize:data_size);
+            data_size -= subSize;
+        }
+    } else {
+        memcpy(data,blk->trivialData,data_size);
+    }
+}
+
+void
+IIC::writeData(IICTag *blk, uint8_t *write_data, int size,
+               PacketList* & writebacks){
+    assert(cache->doData());
+    assert(size < blkSize || !blk->isCompressed());
+    DPRINTF(IIC, "Writing %d bytes to %x\n", size,
+            blk->tag<<tagShift);
+    // Find the number of subblocks needed, (round up)
+    int num_subs = (size + (subSize -1))/subSize;
+    if (size <= trivialSize) {
+        num_subs = 0;
+    }
+    assert(num_subs <= numSub);
+    if (num_subs > blk->numData) {
+        // need to allocate more data blocks
+        for (int i = blk->numData; i < num_subs; ++i){
+            blk->data_ptr[i] = getFreeDataBlock(writebacks);
+            dataReferenceCount[blk->data_ptr[i]] += 1;
+        }
+    } else if (num_subs < blk->numData){
+        // can free data blocks
+        for (int i=num_subs; i < blk->numData; ++i){
+            // decrement reference count and compare to zero
+            /**
+             * @todo
+             * Make this work with copying.
+             */
+            if (--dataReferenceCount[blk->data_ptr[i]] == 0) {
+                freeDataBlock(blk->data_ptr[i]);
+            }
+        }
+    }
+
+    blk->numData = num_subs;
+    blk->size = size;
+    assert(size <= trivialSize || blk->numData > 0);
+    if (size > trivialSize){
+        for (int i = 0; i < blk->numData; ++i){
+            memcpy(&dataBlks[blk->data_ptr[i]][0], write_data + i*subSize,
+                   (size>subSize)?subSize:size);
+            size -= subSize;
+        }
+    } else {
+        memcpy(blk->trivialData,write_data,size);
+    }
+}
+
+
+/**
+ * @todo This code can break if the src is evicted to get a tag for the dest.
+ */
+void
+IIC::doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks)
+{
+    IICTag *dest_tag = findBlock(dest, asid);
+
+    if (dest_tag) {
+        for (int i = 0; i < dest_tag->numData; ++i) {
+            if (--dataReferenceCount[dest_tag->data_ptr[i]] == 0) {
+                freeDataBlock(dest_tag->data_ptr[i]);
+            }
+        }
+        // Reset replacement entry
+    } else {
+        dest_tag = getFreeTag(hash(dest), writebacks);
+        dest_tag->re = (void*) repl->add(dest_tag - tagStore);
+        dest_tag->set = hash(dest);
+        dest_tag->tag = extractTag(dest);
+        dest_tag->req->asid = asid;
+        dest_tag->status = BlkValid | BlkWritable;
+    }
+    // Find the source tag here since it might move if we need to find a
+    // tag for the destination.
+    IICTag *src_tag = findBlock(source, asid);
+    assert(src_tag);
+    assert(!cache->doData() || src_tag->size <= trivialSize
+           || src_tag->numData > 0);
+    // point dest to source data and inc counter
+    for (int i = 0; i < src_tag->numData; ++i) {
+        dest_tag->data_ptr[i] = src_tag->data_ptr[i];
+        ++dataReferenceCount[dest_tag->data_ptr[i]];
+    }
+
+    // Maintain fast access data.
+    memcpy(dest_tag->data, src_tag->data, blkSize);
+
+    dest_tag->xc = src_tag->xc;
+    dest_tag->size = src_tag->size;
+    dest_tag->numData = src_tag->numData;
+    if (src_tag->numData == 0) {
+        // Data is stored in the trivial data, just copy it.
+        memcpy(dest_tag->trivialData, src_tag->trivialData, src_tag->size);
+    }
+
+    dest_tag->status |= BlkDirty;
+    if (dest_tag->size < blkSize) {
+        dest_tag->status |= BlkCompressed;
+    } else {
+        dest_tag->status &= ~BlkCompressed;
+    }
+}
+
+void
+IIC::fixCopy(Packet * &pkt, PacketList* &writebacks)
+{
+    // if reference counter is greater than 1, do copy
+    // else do write
+    Addr blk_addr = blkAlign(pkt->paddr);
+    IICTag* blk = findBlock(blk_addr, pkt->req->asid);
+
+    if (blk->numData > 0 && dataReferenceCount[blk->data_ptr[0]] != 1) {
+        // copy the data
+        // Mark the block as referenced so it doesn't get replaced.
+        blk->status |= BlkReferenced;
+        for (int i = 0; i < blk->numData; ++i){
+            unsigned long new_data = getFreeDataBlock(writebacks);
+            // Need to refresh pointer
+            /**
+             * @todo Remove this refetch once we change IIC to pointer based
+             */
+            blk = findBlock(blk_addr, pkt->req->asid);
+            assert(blk);
+            if (cache->doData()) {
+                memcpy(&(dataBlks[new_data][0]),
+                       &(dataBlks[blk->data_ptr[i]][0]),
+                       subSize);
+            }
+            dataReferenceCount[blk->data_ptr[i]]--;
+            dataReferenceCount[new_data]++;
+            blk->data_ptr[i] = new_data;
+        }
+    }
+}
+
+void
+IIC::cleanupRefs()
+{
+    for (int i = 0; i < numTags; ++i) {
+        if (tagStore[i].isValid()) {
+            totalRefs += tagStore[i].refCount;
+            ++sampledRefs;
+        }
+    }
+}
diff --git a/src/mem/cache/tags/iic.hh b/src/mem/cache/tags/iic.hh
new file mode 100644 (file)
index 0000000..ef3f03c
--- /dev/null
@@ -0,0 +1,574 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declaration of the Indirect Index Cache (IIC) tags store.
+ */
+
+#ifndef __IIC_HH__
+#define __IIC_HH__
+
+#include <list>
+#include <vector>
+
+#include "mem/cache/cache_blk.hh"
+#include "mem/cache/tags/repl/repl.hh"
+#include "mem/packet.hh"
+#include "base/statistics.hh"
+#include "mem/cache/tags/base_tags.hh"
+
+class BaseCache; // Forward declaration
+
+/**
+ * IIC cache blk.
+ */
+class IICTag : public CacheBlk
+{
+  public:
+    /**
+     * Copy the contents of the given IICTag into this one.
+     * @param rhs The tag to copy.
+     * @return const reference to this tag.
+     */
+    const IICTag& operator=(const IICTag& rhs)
+    {
+        CacheBlk::operator=(rhs);
+        chain_ptr = rhs.chain_ptr;
+        re = rhs.re;
+        set = rhs.set;
+        trivialData = rhs.trivialData;
+        numData = rhs.numData;
+        data_ptr.clear();
+        for (int i = 0; i < rhs.numData; ++i) {
+            data_ptr.push_back(rhs.data_ptr[i]);
+        }
+        return *this;
+    }
+
+    /** Hash chain pointer into secondary store. */
+    unsigned long chain_ptr;
+    /** Data array pointers for each subblock. */
+    std::vector<unsigned long> data_ptr;
+    /** Replacement Entry pointer. */
+    void *re;
+    /**
+     * An array to store small compressed data. Conceputally the same size
+     * as the unsused data array pointers.
+     */
+    uint8_t *trivialData;
+    /**
+     * The number of allocated subblocks.
+     */
+    int numData;
+};
+
+/**
+ * A hash set for the IIC primary lookup table.
+ */
+class IICSet{
+  public:
+    /** The associativity of the primary table. */
+    int assoc;
+
+    /** The number of hash chains followed when finding the last block. */
+    int depth;
+    /** The current number of blocks on the chain. */
+    int size;
+
+    /** Tag pointer into the secondary tag storage. */
+    unsigned long chain_ptr;
+
+    /** The LRU list of the primary table. MRU is at 0 index. */
+    IICTag ** tags;
+
+    /**
+     * Find the addr in this set, return the chain pointer to the secondary if
+     * it isn't found.
+     * @param asid The address space ID.
+     * @param tag The address to find.
+     * @param chain_ptr The chain pointer to start the search of the secondary
+     * @return Pointer to the tag, NULL if not found.
+     */
+    IICTag* findTag(int asid, Addr tag, unsigned long &chain_ptr)
+    {
+        depth = 1;
+        for (int i = 0; i < assoc; ++i) {
+            if (tags[i]->tag == tag && tags[i]->isValid()) {
+                return tags[i];
+            }
+        }
+        chain_ptr = this->chain_ptr;
+        return 0;
+    }
+
+    /**
+     * Find an usused tag in this set.
+     * @return Pointer to the unused tag, NULL if none are free.
+     */
+    IICTag* findFree()
+    {
+        for (int i = 0; i < assoc; ++i) {
+            if (!tags[i]->isValid()) {
+                return tags[i];
+            }
+        }
+        return 0;
+    }
+
+    /**
+     * Move a tag to the head of the LRU list
+     * @param tag The tag to move.
+     */
+    void moveToHead(IICTag *tag);
+
+    /**
+     * Move a tag to the tail (LRU) of the LRU list
+     * @param tag The tag to move.
+     */
+    void moveToTail(IICTag *tag);
+};
+
+/**
+ * The IIC tag store. This is a hardware-realizable, fully-associative tag
+ * store that uses software replacement, e.g. Gen.
+ */
+class IIC : public BaseTags
+{
+  public:
+    /** Typedef of the block type used in this class. */
+    typedef IICTag BlkType;
+    /** Typedef for list of pointers to the local block type. */
+    typedef std::list<IICTag*> BlkList;
+  protected:
+    /** The number of set in the primary table. */
+    const int hashSets;
+    /** The block size in bytes. */
+    const int blkSize;
+    /** The associativity of the primary table. */
+    const int assoc;
+    /** The base hit latency. */
+    const int hitLatency;
+    /** The subblock size, used for compression. */
+    const int subSize;
+
+    /** The number of subblocks */
+    const int numSub;
+    /** The number of bytes used by data pointers */
+    const int trivialSize;
+
+    /** The amount to shift address to get the tag. */
+    const int tagShift;
+    /** The mask to get block offset bits. */
+    const unsigned blkMask;
+
+    /** The amount to shift to get the subblock number. */
+    const int subShift;
+    /** The mask to get the correct subblock number. */
+    const unsigned subMask;
+
+    /** The latency of a hash lookup. */
+    const int hashDelay;
+    /** The number of data blocks. */
+    const int numBlocks;
+    /** The total number of tags in primary and secondary. */
+    const int numTags;
+    /** The number of tags in the secondary tag store. */
+    const int numSecondary;
+
+    /** The Null tag pointer. */
+    const int tagNull;
+    /** The last tag in the primary table. */
+    const int primaryBound;
+
+    /** All of the tags */
+    IICTag *tagStore;
+    /**
+     * Pointer to the head of the secondary freelist (maintained with chain
+     * pointers.
+     */
+    unsigned long freelist;
+    /**
+     * The data block freelist.
+     */
+    std::list<unsigned long> blkFreelist;
+
+    /** The primary table. */
+    IICSet *sets;
+
+    /** The replacement policy. */
+    Repl *repl;
+
+    /** An array of data reference counters. */
+    int *dataReferenceCount;
+
+    /** The data blocks. */
+    uint8_t *dataStore;
+
+    /** Storage for the fast access data of each cache block. */
+    uint8_t **dataBlks;
+
+    /**
+     * Count of the current number of free secondary tags.
+     * Used for debugging.
+     */
+    int freeSecond;
+
+    // IIC Statistics
+    /**
+     * @addtogroup IICStatistics IIC Statistics
+     * @{
+     */
+
+    /** Hash hit depth of cache hits. */
+    Stats::Distribution<> hitHashDepth;
+    /** Hash depth for cache misses. */
+    Stats::Distribution<> missHashDepth;
+    /** Count of accesses to each hash set. */
+    Stats::Distribution<> setAccess;
+
+    /** The total hash depth for every miss. */
+    Stats::Scalar<> missDepthTotal;
+    /** The total hash depth for all hits. */
+    Stats::Scalar<> hitDepthTotal;
+    /** The number of hash misses. */
+    Stats::Scalar<> hashMiss;
+    /** The number of hash hits. */
+    Stats::Scalar<> hashHit;
+    /** @} */
+
+  public:
+    /**
+     * Collection of parameters for the IIC.
+     */
+    class Params {
+      public:
+        /** The size in bytes of the cache. */
+        int size;
+        /** The number of sets in the primary table. */
+        int numSets;
+        /** The block size in bytes. */
+        int blkSize;
+        /** The associativity of the primary table. */
+        int assoc;
+        /** The number of cycles for each hash lookup. */
+        int hashDelay;
+        /** The number of cycles to read the data. */
+        int hitLatency;
+        /** The replacement policy. */
+        Repl *rp;
+        /** The subblock size in bytes. */
+        int subblockSize;
+    };
+
+    /**
+     * Construct and initialize this tag store.
+     * @param params The IIC parameters.
+     * @todo
+     * Should make a way to have less tags in the primary than blks in the
+     * cache. Also should be able to specify number of secondary blks.
+     */
+    IIC(Params &params);
+
+    /**
+     * Destructor.
+     */
+    virtual ~IIC();
+
+    /**
+     * Register the statistics.
+     * @param name The name to prepend to the statistic descriptions.
+     */
+    void regStats(const std::string &name);
+
+    /**
+     * Regenerate the block address from the tag.
+     * @param tag The tag of the block.
+     * @param set Not needed for the iic.
+     * @return The block address.
+     */
+    Addr regenerateBlkAddr(Addr tag, int set) {
+        return (((Addr)tag << tagShift));
+    }
+
+    /**
+     * Return the block size.
+     * @return The block size.
+     */
+    int getBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Return the subblock size.
+     * @return The subblock size.
+     */
+    int getSubBlockSize()
+    {
+        return subSize;
+    }
+
+    /**
+     * Return the hit latency.
+     * @return the hit latency.
+     */
+    int getHitLatency() const
+    {
+        return hitLatency;
+    }
+
+    /**
+     * Generate the tag from the address.
+     * @param addr The address to a get a tag for.
+     * @param blk Ignored here.
+     * @return the tag.
+     */
+    Addr extractTag(Addr addr, IICTag *blk) const
+    {
+        return (addr >> tagShift);
+    }
+
+     /**
+     * Generate the tag from the address.
+     * @param addr The address to a get a tag for.
+     * @return the tag.
+     */
+    Addr extractTag(Addr addr) const
+    {
+        return (addr >> tagShift);
+    }
+
+   /**
+     * Return the set, always 0 for IIC.
+     * @return 0.
+     */
+    int extractSet(Addr addr) const
+    {
+        return 0;
+    }
+
+    /**
+     * Get the block offset of an address.
+     * @param addr The address to get the offset of.
+     * @return the block offset of the address.
+     */
+    int extractBlkOffset(Addr addr) const
+    {
+        return (addr & blkMask);
+    }
+
+    /**
+     * Align an address to the block size.
+     * @param addr the address to align.
+     * @return The block address.
+     */
+    Addr blkAlign(Addr addr) const
+    {
+        return (addr & ~(Addr)blkMask);
+    }
+
+    /**
+     * Check for the address in the tagstore.
+     * @param asid The address space ID.
+     * @param addr The address to find.
+     * @return true if it is found.
+     */
+    bool probe(int asid, Addr addr) const;
+
+    /**
+     * Swap the position of two tags.
+     * @param index1 The first tag location.
+     * @param index2 The second tag location.
+     */
+    void tagSwap(unsigned long index1, unsigned long index2);
+
+    /**
+     * Clear the reference bit of the tag and return its old value.
+     * @param index The pointer of the tag to manipulate.
+     * @return The previous state of the reference bit.
+     */
+    bool clearRef(unsigned long index)
+    {
+        bool tmp = tagStore[index].isReferenced();
+        tagStore[index].status &= ~BlkReferenced;
+        return tmp;
+    }
+
+    /**
+     * Decompress a block if it is compressed.
+     * @param index The tag store index for the block to uncompress.
+     */
+    void decompressBlock(unsigned long index);
+
+    /**
+     * Try and compress a block if it is not already compressed.
+     * @param index The tag store index for the block to compress.
+     */
+    void compressBlock(unsigned long index);
+
+    /**
+     * Invalidate the block containing the address.
+     * @param asid The address space ID.
+     * @param addr The address to invalidate.
+     */
+    void invalidateBlk(int asid, Addr addr);
+
+    /**
+     * Find the block and update the replacement data. This call also returns
+     * the access latency as a side effect.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @param lat The access latency.
+     * @return A pointer to the block found, if any.
+     */
+    IICTag* findBlock(Addr addr, int asid, int &lat);
+
+    /**
+     * Find the block and update the replacement data. This call also returns
+     * the access latency as a side effect.
+     * @param req The req whose block to find
+     * @param lat The access latency.
+     * @return A pointer to the block found, if any.
+     */
+    IICTag* findBlock(Packet * &pkt, int &lat);
+
+    /**
+     * Find the block, do not update the replacement data.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @return A pointer to the block found, if any.
+     */
+    IICTag* findBlock(Addr addr, int asid) const;
+
+    /**
+     * Find a replacement block for the address provided.
+     * @param req The request to a find a replacement candidate for.
+     * @param writebacks List for any writebacks to be performed.
+     * @param compress_blocks List of blocks to compress, for adaptive comp.
+     * @return The block to place the replacement in.
+     */
+    IICTag* findReplacement(Packet * &pkt, PacketList* &writebacks,
+                            BlkList &compress_blocks);
+
+    /**
+     * Read the data from the internal storage of the given cache block.
+     * @param blk The block to read the data from.
+     * @param data The buffer to read the data into.
+     * @return The cache block's data.
+     */
+    void readData(IICTag *blk, uint8_t *data);
+
+    /**
+     * Write the data into the internal storage of the given cache block.
+     * @param blk The block to write to.
+     * @param data The data to write.
+     * @param size The number of bytes to write.
+     * @param writebacks A list for any writebacks to be performed. May be
+     * needed when writing to a compressed block.
+     */
+    void writeData(IICTag *blk, uint8_t *data, int size,
+                   PacketList* & writebacks);
+
+    /**
+     * Perform a block aligned copy from the source address to the destination.
+     * @param source The block-aligned source address.
+     * @param dest The block-aligned destination address.
+     * @param asid The address space DI.
+     * @param writebacks List for any generated writeback requests.
+     */
+    void doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks);
+
+    /**
+     * If a block is currently marked copy on write, copy it before writing.
+     * @param req The write request.
+     * @param writebacks List for any generated writeback requests.
+     */
+    void fixCopy(Packet * &pkt, PacketList* &writebacks);
+
+    /**
+     * Called at end of simulation to complete average block reference stats.
+     */
+    virtual void cleanupRefs();
+private:
+    /**
+     * Return the hash of the address.
+     * @param addr The address to hash.
+     * @return the hash of the address.
+     */
+    unsigned hash(Addr addr) const;
+
+    /**
+     * Search for a block in the secondary tag store. Returns the number of
+     * hash lookups as a side effect.
+     * @param asid The address space ID.
+     * @param tag The tag to match.
+     * @param chain_ptr The first entry to search.
+     * @param depth The number of hash lookups made while searching.
+     * @return A pointer to the block if found.
+     */
+    IICTag *secondaryChain(int asid, Addr tag, unsigned long chain_ptr,
+                            int *depth) const;
+
+    /**
+     * Free the resources associated with the next replacement block.
+     * @param writebacks A list of any writebacks to perform.
+     */
+    void freeReplacementBlock(PacketList* & writebacks);
+
+    /**
+     * Return the pointer to a free data block.
+     * @param writebacks A list of any writebacks to perform.
+     * @return A pointer to a free data block.
+     */
+    unsigned long getFreeDataBlock(PacketList* & writebacks);
+
+    /**
+     * Get a free tag in the given hash set.
+     * @param set The hash set to search.
+     * @param writebacks A list of any writebacks to perform.
+     * @return a pointer to a free tag.
+     */
+    IICTag* getFreeTag(int set, PacketList* & writebacks);
+
+    /**
+     * Free the resources associated with the given tag.
+     * @param tag_ptr The tag to free.
+     */
+    void freeTag(IICTag *tag_ptr);
+
+    /**
+     * Mark the given data block as being available.
+     * @param data_ptr The data block to free.
+     */
+    void freeDataBlock(unsigned long data_ptr);
+};
+#endif // __IIC_HH__
+
diff --git a/src/mem/cache/tags/lru.cc b/src/mem/cache/tags/lru.cc
new file mode 100644 (file)
index 0000000..0fe88fd
--- /dev/null
@@ -0,0 +1,310 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Definitions of LRU tag store.
+ */
+
+#include <string>
+
+#include "mem/cache/base_cache.hh"
+#include "base/intmath.hh"
+#include "mem/cache/tags/lru.hh"
+#include "sim/root.hh"
+
+using namespace std;
+
+LRUBlk*
+CacheSet::findBlk(int asid, Addr tag) const
+{
+    for (int i = 0; i < assoc; ++i) {
+        if (blks[i]->tag == tag && blks[i]->isValid()) {
+            return blks[i];
+        }
+    }
+    return 0;
+}
+
+
+void
+CacheSet::moveToHead(LRUBlk *blk)
+{
+    // nothing to do if blk is already head
+    if (blks[0] == blk)
+        return;
+
+    // write 'next' block into blks[i], moving up from MRU toward LRU
+    // until we overwrite the block we moved to head.
+
+    // start by setting up to write 'blk' into blks[0]
+    int i = 0;
+    LRUBlk *next = blk;
+
+    do {
+        assert(i < assoc);
+        // swap blks[i] and next
+        LRUBlk *tmp = blks[i];
+        blks[i] = next;
+        next = tmp;
+        ++i;
+    } while (next != blk);
+}
+
+
+// create and initialize a LRU/MRU cache structure
+LRU::LRU(int _numSets, int _blkSize, int _assoc, int _hit_latency) :
+    numSets(_numSets), blkSize(_blkSize), assoc(_assoc), hitLatency(_hit_latency)
+{
+    // Check parameters
+    if (blkSize < 4 || !isPowerOf2(blkSize)) {
+        fatal("Block size must be at least 4 and a power of 2");
+    }
+    if (numSets <= 0 || !isPowerOf2(numSets)) {
+        fatal("# of sets must be non-zero and a power of 2");
+    }
+    if (assoc <= 0) {
+        fatal("associativity must be greater than zero");
+    }
+    if (hitLatency <= 0) {
+        fatal("access latency must be greater than zero");
+    }
+
+    LRUBlk  *blk;
+    int i, j, blkIndex;
+
+    blkMask = blkSize - 1;
+    setShift = floorLog2(blkSize);
+    setMask = numSets - 1;
+    tagShift = setShift + floorLog2(numSets);
+    warmedUp = false;
+    /** @todo Make warmup percentage a parameter. */
+    warmupBound = numSets * assoc;
+
+    sets = new CacheSet[numSets];
+    blks = new LRUBlk[numSets * assoc];
+    // allocate data storage in one big chunk
+    dataBlks = new uint8_t[numSets*assoc*blkSize];
+
+    blkIndex = 0;      // index into blks array
+    for (i = 0; i < numSets; ++i) {
+        sets[i].assoc = assoc;
+
+        sets[i].blks = new LRUBlk*[assoc];
+
+        // link in the data blocks
+        for (j = 0; j < assoc; ++j) {
+            // locate next cache block
+            blk = &blks[blkIndex];
+            blk->data = &dataBlks[blkSize*blkIndex];
+            ++blkIndex;
+
+            // invalidate new cache block
+            blk->status = 0;
+
+            //EGH Fix Me : do we need to initialize blk?
+
+            // Setting the tag to j is just to prevent long chains in the hash
+            // table; won't matter because the block is invalid
+            blk->tag = j;
+            blk->whenReady = 0;
+            blk->req->asid = -1;
+            blk->isTouched = false;
+            blk->size = blkSize;
+            sets[i].blks[j]=blk;
+            blk->set = i;
+        }
+    }
+}
+
+LRU::~LRU()
+{
+    delete [] dataBlks;
+    delete [] blks;
+    delete [] sets;
+}
+
+// probe cache for presence of given block.
+bool
+LRU::probe(int asid, Addr addr) const
+{
+    //  return(findBlock(Read, addr, asid) != 0);
+    Addr tag = extractTag(addr);
+    unsigned myset = extractSet(addr);
+
+    LRUBlk *blk = sets[myset].findBlk(asid, tag);
+
+    return (blk != NULL);      // true if in cache
+}
+
+LRUBlk*
+LRU::findBlock(Addr addr, int asid, int &lat)
+{
+    Addr tag = extractTag(addr);
+    unsigned set = extractSet(addr);
+    LRUBlk *blk = sets[set].findBlk(asid, tag);
+    lat = hitLatency;
+    if (blk != NULL) {
+        // move this block to head of the MRU list
+        sets[set].moveToHead(blk);
+        if (blk->whenReady > curTick
+            && blk->whenReady - curTick > hitLatency) {
+            lat = blk->whenReady - curTick;
+        }
+        blk->refCount += 1;
+    }
+
+    return blk;
+}
+
+LRUBlk*
+LRU::findBlock(Packet * &pkt, int &lat)
+{
+    Addr addr = pkt->paddr;
+    int asid = pkt->req->asid;
+
+    Addr tag = extractTag(addr);
+    unsigned set = extractSet(addr);
+    LRUBlk *blk = sets[set].findBlk(asid, tag);
+    lat = hitLatency;
+    if (blk != NULL) {
+        // move this block to head of the MRU list
+        sets[set].moveToHead(blk);
+        if (blk->whenReady > curTick
+            && blk->whenReady - curTick > hitLatency) {
+            lat = blk->whenReady - curTick;
+        }
+        blk->refCount += 1;
+    }
+
+    return blk;
+}
+
+LRUBlk*
+LRU::findBlock(Addr addr, int asid) const
+{
+    Addr tag = extractTag(addr);
+    unsigned set = extractSet(addr);
+    LRUBlk *blk = sets[set].findBlk(asid, tag);
+    return blk;
+}
+
+LRUBlk*
+LRU::findReplacement(Packet * &pkt, PacketList* &writebacks,
+                     BlkList &compress_blocks)
+{
+    unsigned set = extractSet(pkt->paddr);
+    // grab a replacement candidate
+    LRUBlk *blk = sets[set].blks[assoc-1];
+    sets[set].moveToHead(blk);
+    if (blk->isValid()) {
+        int thread_num = (blk->xc) ? blk->xc->getThreadNum() : 0;
+        replacements[thread_num]++;
+        totalRefs += blk->refCount;
+        ++sampledRefs;
+        blk->refCount = 0;
+    } else if (!blk->isTouched) {
+        tagsInUse++;
+        blk->isTouched = true;
+        if (!warmedUp && tagsInUse.value() >= warmupBound) {
+            warmedUp = true;
+            warmupCycle = curTick;
+        }
+    }
+
+    return blk;
+}
+
+void
+LRU::invalidateBlk(int asid, Addr addr)
+{
+    LRUBlk *blk = findBlock(addr, asid);
+    if (blk) {
+        blk->status = 0;
+        blk->isTouched = false;
+        tagsInUse--;
+    }
+}
+
+void
+LRU::doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks)
+{
+    assert(source == blkAlign(source));
+    assert(dest == blkAlign(dest));
+    LRUBlk *source_blk = findBlock(source, asid);
+    assert(source_blk);
+    LRUBlk *dest_blk = findBlock(dest, asid);
+    if (dest_blk == NULL) {
+        // Need to do a replacement
+        Packet * pkt = new Packet();
+        pkt->paddr = dest;
+        BlkList dummy_list;
+        dest_blk = findReplacement(pkt, writebacks, dummy_list);
+        if (dest_blk->isValid() && dest_blk->isModified()) {
+            // Need to writeback data.
+            pkt = buildWritebackReq(regenerateBlkAddr(dest_blk->tag,
+                                                      dest_blk->set),
+                                    dest_blk->req->asid,
+                                    dest_blk->xc,
+                                    blkSize,
+                                    (cache->doData())?dest_blk->data:0,
+                                    dest_blk->size);
+            writebacks.push_back(pkt);
+        }
+        dest_blk->tag = extractTag(dest);
+        dest_blk->req->asid = asid;
+        /**
+         * @todo Do we need to pass in the execution context, or can we
+         * assume its the same?
+         */
+        assert(source_blk->xc);
+        dest_blk->xc = source_blk->xc;
+    }
+    /**
+     * @todo Can't assume the status once we have coherence on copies.
+     */
+
+    // Set this block as readable, writeable, and dirty.
+    dest_blk->status = 7;
+    if (cache->doData()) {
+        memcpy(dest_blk->data, source_blk->data, blkSize);
+    }
+}
+
+void
+LRU::cleanupRefs()
+{
+    for (int i = 0; i < numSets*assoc; ++i) {
+        if (blks[i].isValid()) {
+            totalRefs += blks[i].refCount;
+            ++sampledRefs;
+        }
+    }
+}
diff --git a/src/mem/cache/tags/lru.hh b/src/mem/cache/tags/lru.hh
new file mode 100644 (file)
index 0000000..9b4a557
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2003-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declaration of a LRU tag store.
+ */
+
+#ifndef __LRU_HH__
+#define __LRU_HH__
+
+#include <list>
+
+#include "mem/cache/cache_blk.hh" // base class
+#include "mem/packet.hh" // for inlined functions
+#include <assert.h>
+#include "mem/cache/tags/base_tags.hh"
+
+class BaseCache;
+
+/**
+ * LRU cache block.
+ */
+class LRUBlk : public CacheBlk {
+  public:
+    /** Has this block been touched? Used to aid calculation of warmup time. */
+    bool isTouched;
+};
+
+/**
+ * An associative set of cache blocks.
+ */
+class CacheSet
+{
+  public:
+    /** The associativity of this set. */
+    int assoc;
+
+    /** Cache blocks in this set, maintained in LRU order 0 = MRU. */
+    LRUBlk **blks;
+
+    /**
+     * Find a block matching the tag in this set.
+     * @param asid The address space ID.
+     * @param tag The Tag to find.
+     * @return Pointer to the block if found.
+     */
+    LRUBlk* findBlk(int asid, Addr tag) const;
+
+    /**
+     * Move the given block to the head of the list.
+     * @param blk The block to move.
+     */
+    void moveToHead(LRUBlk *blk);
+};
+
+/**
+ * A LRU cache tag store.
+ */
+class LRU : public BaseTags
+{
+  public:
+    /** Typedef the block type used in this tag store. */
+    typedef LRUBlk BlkType;
+    /** Typedef for a list of pointers to the local block class. */
+    typedef std::list<LRUBlk*> BlkList;
+  protected:
+    /** The number of sets in the cache. */
+    const int numSets;
+    /** The number of bytes in a block. */
+    const int blkSize;
+    /** The associativity of the cache. */
+    const int assoc;
+    /** The hit latency. */
+    const int hitLatency;
+
+    /** The cache sets. */
+    CacheSet *sets;
+
+    /** The cache blocks. */
+    LRUBlk *blks;
+    /** The data blocks, 1 per cache block. */
+    uint8_t *dataBlks;
+
+    /** The amount to shift the address to get the set. */
+    int setShift;
+    /** The amount to shift the address to get the tag. */
+    int tagShift;
+    /** Mask out all bits that aren't part of the set index. */
+    unsigned setMask;
+    /** Mask out all bits that aren't part of the block offset. */
+    unsigned blkMask;
+
+public:
+    /**
+     * Construct and initialize this tag store.
+     * @param _numSets The number of sets in the cache.
+     * @param _blkSize The number of bytes in a block.
+     * @param _assoc The associativity of the cache.
+     * @param _hit_latency The latency in cycles for a hit.
+     */
+    LRU(int _numSets, int _blkSize,    int _assoc, int _hit_latency);
+
+    /**
+     * Destructor
+     */
+    virtual ~LRU();
+
+    /**
+     * Return the block size.
+     * @return the block size.
+     */
+    int getBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Return the subblock size. In the case of LRU it is always the block
+     * size.
+     * @return The block size.
+     */
+    int getSubBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Search for the address in the cache.
+     * @param asid The address space ID.
+     * @param addr The address to find.
+     * @return True if the address is in the cache.
+     */
+    bool probe(int asid, Addr addr) const;
+
+    /**
+     * Invalidate the block containing the given address.
+     * @param asid The address space ID.
+     * @param addr The address to invalidate.
+     */
+    void invalidateBlk(int asid, Addr addr);
+
+    /**
+     * Finds the given address in the cache and update replacement data.
+     * Returns the access latency as a side effect.
+     * @param req The request whose block to find.
+     * @param lat The access latency.
+     * @return Pointer to the cache block if found.
+     */
+    LRUBlk* findBlock(Packet * &pkt, int &lat);
+
+    /**
+     * Finds the given address in the cache and update replacement data.
+     * Returns the access latency as a side effect.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @param lat The access latency.
+     * @return Pointer to the cache block if found.
+     */
+    LRUBlk* findBlock(Addr addr, int asid, int &lat);
+
+    /**
+     * Finds the given address in the cache, do not update replacement data.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @return Pointer to the cache block if found.
+     */
+    LRUBlk* findBlock(Addr addr, int asid) const;
+
+    /**
+     * Find a replacement block for the address provided.
+     * @param req The request to a find a replacement candidate for.
+     * @param writebacks List for any writebacks to be performed.
+     * @param compress_blocks List of blocks to compress, for adaptive comp.
+     * @return The block to place the replacement in.
+     */
+    LRUBlk* findReplacement(Packet * &pkt, PacketList* &writebacks,
+                            BlkList &compress_blocks);
+
+    /**
+     * Generate the tag from the given address.
+     * @param addr The address to get the tag from.
+     * @return The tag of the address.
+     */
+    Addr extractTag(Addr addr) const
+    {
+        return (addr >> tagShift);
+    }
+
+   /**
+     * Generate the tag from the given address.
+     * @param addr The address to get the tag from.
+     * @param blk Ignored.
+     * @return The tag of the address.
+     */
+    Addr extractTag(Addr addr, LRUBlk *blk) const
+    {
+        return (addr >> tagShift);
+    }
+
+    /**
+     * Calculate the set index from the address.
+     * @param addr The address to get the set from.
+     * @return The set index of the address.
+     */
+    int extractSet(Addr addr) const
+    {
+        return ((addr >> setShift) & setMask);
+    }
+
+    /**
+     * Get the block offset from an address.
+     * @param addr The address to get the offset of.
+     * @return The block offset.
+     */
+    int extractBlkOffset(Addr addr) const
+    {
+        return (addr & blkMask);
+    }
+
+    /**
+     * Align an address to the block size.
+     * @param addr the address to align.
+     * @return The block address.
+     */
+    Addr blkAlign(Addr addr) const
+    {
+        return (addr & ~(Addr)blkMask);
+    }
+
+    /**
+     * Regenerate the block address from the tag.
+     * @param tag The tag of the block.
+     * @param set The set of the block.
+     * @return The block address.
+     */
+    Addr regenerateBlkAddr(Addr tag, unsigned set) const
+    {
+        return ((tag << tagShift) | ((Addr)set << setShift));
+    }
+
+    /**
+     * Return the hit latency.
+     * @return the hit latency.
+     */
+    int getHitLatency() const
+    {
+        return hitLatency;
+    }
+
+    /**
+     * Read the data out of the internal storage of the given cache block.
+     * @param blk The cache block to read.
+     * @param data The buffer to read the data into.
+     * @return The cache block's data.
+     */
+    void readData(LRUBlk *blk, uint8_t *data)
+    {
+        memcpy(data, blk->data, blk->size);
+    }
+
+    /**
+     * Write data into the internal storage of the given cache block. Since in
+     * LRU does not store data differently this just needs to update the size.
+     * @param blk The cache block to write.
+     * @param data The data to write.
+     * @param size The number of bytes to write.
+     * @param writebacks A list for any writebacks to be performed. May be
+     * needed when writing to a compressed block.
+     */
+    void writeData(LRUBlk *blk, uint8_t *data, int size,
+                   PacketList* & writebacks)
+    {
+        assert(size <= blkSize);
+        blk->size = size;
+    }
+
+    /**
+     * Perform a block aligned copy from the source address to the destination.
+     * @param source The block-aligned source address.
+     * @param dest The block-aligned destination address.
+     * @param asid The address space DI.
+     * @param writebacks List for any generated writeback requests.
+     */
+    void doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks);
+
+    /**
+     * No impl.
+     */
+    void fixCopy(Packet * &pkt, PacketList* &writebacks)
+    {
+    }
+
+    /**
+     * Called at end of simulation to complete average block reference stats.
+     */
+    virtual void cleanupRefs();
+};
+
+#endif
diff --git a/src/mem/cache/tags/repl/gen.cc b/src/mem/cache/tags/repl/gen.cc
new file mode 100644 (file)
index 0000000..ec1c2aa
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Steve Reinhardt
+ */
+
+/**
+ * @file
+ * Definitions of the Generational replacement policy.
+ */
+
+#include <string>
+
+#include "base/misc.hh"
+#include "mem/cache/tags/iic.hh"
+#include "mem/cache/tags/repl/gen.hh"
+#include "sim/builder.hh"
+#include "sim/host.hh"
+
+using namespace std;
+
+GenRepl::GenRepl(const string &_name,
+                 int _num_pools,
+                 int _fresh_res,
+                 int _pool_res) // fix this, should be set by cache
+    : Repl(_name)
+{
+    num_pools = _num_pools;
+    fresh_res = _fresh_res;
+    pool_res = _pool_res;
+    num_entries = 0;
+    num_pool_entries = 0;
+    misses = 0;
+    pools = new GenPool[num_pools+1];
+}
+
+GenRepl::~GenRepl()
+{
+    delete [] pools;
+}
+
+unsigned long
+GenRepl::getRepl()
+{
+    unsigned long tmp;
+    GenReplEntry *re;
+    int i;
+    int num_seen = 0;
+    if (!(num_pool_entries>0)) {
+        fatal("No blks available to replace");
+    }
+    num_entries--;
+    num_pool_entries--;
+    for (i = 0; i < num_pools; i++) {
+        while ((re = pools[i].pop())) {
+            num_seen++;
+            // Remove invalidated entries
+            if (!re->valid) {
+                delete re;
+                continue;
+            }
+            if (iic->clearRef(re->tag_ptr)) {
+                pools[(((i+1)== num_pools)? i :i+1)].push(re, misses);
+            }
+            else {
+                tmp = re->tag_ptr;
+                delete re;
+
+                repl_pool.sample(i);
+
+                return tmp;
+            }
+        }
+    }
+    fatal("No replacement found");
+    return 0xffffffff;
+}
+
+unsigned long *
+GenRepl::getNRepl(int n)
+{
+    unsigned long *tmp;
+    GenReplEntry *re;
+    int i;
+    if (!(num_pool_entries>(n-1))) {
+        fatal("Not enough blks available to replace");
+    }
+    num_entries -= n;
+    num_pool_entries -= n;
+    tmp = new unsigned long[n]; /* array of cache_blk pointers */
+    int blk_index = 0;
+    for (i = 0; i < num_pools && blk_index < n; i++) {
+        while (blk_index < n && (re = pools[i].pop())) {
+            // Remove invalidated entries
+            if (!re->valid) {
+                delete re;
+                continue;
+            }
+            if (iic->clearRef(re->tag_ptr)) {
+                pools[(((i+1)== num_pools)? i :i+1)].push(re, misses);
+            }
+            else {
+                tmp[blk_index] = re->tag_ptr;
+                blk_index++;
+                delete re;
+                repl_pool.sample(i);
+            }
+        }
+    }
+    if (blk_index >= n)
+        return tmp;
+    /* search the fresh pool */
+
+    fatal("No N  replacements found");
+    return NULL;
+}
+
+void
+GenRepl::doAdvance(std::list<unsigned long> &demoted)
+{
+    int i;
+    int num_seen = 0;
+    GenReplEntry *re;
+    misses++;
+    for (i=0; i<num_pools; i++) {
+        while (misses-pools[i].oldest > pool_res && (re = pools[i].pop())!=NULL) {
+            if (iic->clearRef(re->tag_ptr)) {
+                pools[(((i+1)== num_pools)? i :i+1)].push(re, misses);
+                /** @todo Not really demoted, but use it for now. */
+                demoted.push_back(re->tag_ptr);
+                advance_pool.sample(i);
+            }
+            else {
+                pools[(((i-1)<0)?i:i-1)].push(re, misses);
+                demoted.push_back(re->tag_ptr);
+                demote_pool.sample(i);
+            }
+        }
+        num_seen += pools[i].size;
+    }
+    while (misses-pools[num_pools].oldest > fresh_res
+          && (re = pools[num_pools].pop())!=NULL) {
+        num_pool_entries++;
+        if (iic->clearRef(re->tag_ptr)) {
+            pools[num_pools/2].push(re, misses);
+            /** @todo Not really demoted, but use it for now. */
+            demoted.push_back(re->tag_ptr);
+            advance_pool.sample(num_pools);
+        }
+        else {
+            pools[num_pools/2-1].push(re, misses);
+            demoted.push_back(re->tag_ptr);
+            demote_pool.sample(num_pools);
+        }
+    }
+}
+
+void*
+GenRepl::add(unsigned long tag_index)
+{
+    GenReplEntry *re = new GenReplEntry;
+    re->tag_ptr = tag_index;
+    re->valid = true;
+    pools[num_pools].push(re, misses);
+    num_entries++;
+    return (void*)re;
+}
+
+void
+GenRepl::regStats(const string name)
+{
+    using namespace Stats;
+
+    /** GEN statistics */
+    repl_pool
+        .init(0, 16, 1)
+        .name(name + ".repl_pool_dist")
+        .desc("Dist. of Repl. across pools")
+        .flags(pdf)
+        ;
+
+    advance_pool
+        .init(0, 16, 1)
+        .name(name + ".advance_pool_dist")
+        .desc("Dist. of Repl. across pools")
+        .flags(pdf)
+        ;
+
+    demote_pool
+        .init(0, 16, 1)
+        .name(name + ".demote_pool_dist")
+        .desc("Dist. of Repl. across pools")
+        .flags(pdf)
+        ;
+}
+
+int
+GenRepl::fixTag(void* _re, unsigned long old_index, unsigned long new_index)
+{
+    GenReplEntry *re = (GenReplEntry*)_re;
+    assert(re->valid);
+    if (re->tag_ptr == old_index) {
+        re->tag_ptr = new_index;
+        return 1;
+    }
+    fatal("Repl entry: tag ptrs do not match");
+    return 0;
+}
+
+bool
+GenRepl::findTagPtr(unsigned long index)
+{
+    for (int i = 0; i < num_pools + 1; ++i) {
+        list<GenReplEntry*>::const_iterator iter = pools[i].entries.begin();
+        list<GenReplEntry*>::const_iterator end = pools[i].entries.end();
+        for (; iter != end; ++iter) {
+            if ((*iter)->valid && (*iter)->tag_ptr == index) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+BEGIN_DECLARE_SIM_OBJECT_PARAMS(GenRepl)
+
+    Param<int> num_pools;
+    Param<int> fresh_res;
+    Param<int> pool_res;
+
+END_DECLARE_SIM_OBJECT_PARAMS(GenRepl)
+
+
+BEGIN_INIT_SIM_OBJECT_PARAMS(GenRepl)
+
+    INIT_PARAM(num_pools, "capacity in bytes"),
+    INIT_PARAM(fresh_res, "associativity"),
+    INIT_PARAM(pool_res, "block size in bytes")
+
+END_INIT_SIM_OBJECT_PARAMS(GenRepl)
+
+
+CREATE_SIM_OBJECT(GenRepl)
+{
+    return new GenRepl(getInstanceName(), num_pools, fresh_res, pool_res);
+}
+
+REGISTER_SIM_OBJECT("GenRepl", GenRepl)
+
+#endif // DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/tags/repl/gen.hh b/src/mem/cache/tags/repl/gen.hh
new file mode 100644 (file)
index 0000000..c1ceb3f
--- /dev/null
@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ */
+
+/**
+ * @file
+ * Declarations of generational replacement policy
+ */
+
+#ifndef ___GEN_HH__
+#define __GEN_HH__
+
+#include <list>
+
+#include "base/statistics.hh"
+#include "mem/cache/tags/repl/repl.hh"
+
+/**
+ * Generational Replacement entry.
+ */
+class GenReplEntry
+{
+  public:
+    /** Valid flag, used to quickly invalidate bogus entries. */
+    bool valid;
+    /** The difference between this entry and the previous in the pool. */
+    int delta;
+    /** Pointer to the corresponding tag in the IIC. */
+    unsigned long tag_ptr;
+};
+
+/**
+ * Generational replacement pool
+ */
+class GenPool
+{
+  public:
+    /** The time the last entry was added. */
+    Tick newest;
+    /** The time the oldest entry was added. */
+    Tick oldest;
+    /** List of the replacement entries in this pool. */
+    std::list<GenReplEntry*> entries;
+
+    /** The number of entries in this pool. */
+    int size;
+
+    /**
+     * Simple constructor.
+     */
+    GenPool() {
+        newest = 0;
+        oldest = 0;
+        size = 0;
+    }
+
+    /**
+     * Add an entry to this pool.
+     * @param re The entry to add.
+     * @param now The current time.
+     */
+    void push(GenReplEntry *re, Tick now) {
+        ++size;
+        if (!entries.empty()) {
+            re->delta = now - newest;
+            newest = now;
+        } else {
+            re->delta = 0;
+            newest = oldest = now;
+        }
+        entries.push_back(re);
+    }
+
+    /**
+     * Remove an entry from the pool.
+     * @return The entry at the front of the list.
+     */
+    GenReplEntry* pop() {
+        GenReplEntry *tmp = NULL;
+        if (!entries.empty()) {
+            --size;
+            tmp = entries.front();
+            entries.pop_front();
+            oldest += tmp->delta;
+        }
+        return tmp;
+    }
+
+    /**
+     * Return the entry at the front of the list.
+     * @return the entry at the front of the list.
+     */
+    GenReplEntry* top() {
+        return entries.front();
+    }
+
+    /**
+     * Destructor.
+     */
+    ~GenPool() {
+        while (!entries.empty()) {
+            GenReplEntry *tmp = entries.front();
+            entries.pop_front();
+            delete tmp;
+        }
+    }
+};
+
+/**
+ * Generational replacement policy for use with the IIC.
+ * @todo update to use STL and for efficiency
+ */
+class GenRepl : public Repl
+{
+  public:
+    /** The array of pools. */
+    GenPool *pools;
+    /** The number of pools. */
+    int num_pools;
+    /** The amount of time to stay in the fresh pool. */
+    int fresh_res;
+    /** The amount of time to stay in the normal pools. */
+    int pool_res;
+    /** The maximum number of entries */
+    int num_entries;
+    /** The number of entries currently in the pools. */
+    int num_pool_entries;
+    /** The number of misses. Used as the internal time. */
+    Tick misses;
+
+    // Statistics
+
+    /**
+     * @addtogroup CacheStatistics
+     * @{
+     */
+    /** The number of replacements from each pool. */
+    Stats::Distribution<> repl_pool;
+    /** The number of advances out of each pool. */
+    Stats::Distribution<> advance_pool;
+    /** The number of demotions from each pool. */
+    Stats::Distribution<> demote_pool;
+    /**
+     * @}
+     */
+
+    /**
+     * Constructs and initializes this replacement policy.
+     * @param name The name of the policy.
+     * @param num_pools The number of pools to use.
+     * @param fresh_res The amount of time to wait in the fresh pool.
+     * @param pool_res The amount of time to wait in the normal pools.
+     */
+    GenRepl(const std::string &name, int num_pools,
+            int fresh_res, int pool_res);
+
+    /**
+     * Destructor.
+     */
+    ~GenRepl();
+
+    /**
+     * Returns the tag pointer of the cache block to replace.
+     * @return The tag to replace.
+     */
+    virtual unsigned long getRepl();
+
+    /**
+     * Return an array of N tag pointers to replace.
+     * @param n The number of tag pointer to return.
+     * @return An array of tag pointers to replace.
+     */
+    virtual unsigned long *getNRepl(int n);
+
+    /**
+     * Update replacement data
+     */
+    virtual void doAdvance(std::list<unsigned long> &demoted);
+
+    /**
+     * Add a tag to the replacement policy and return a pointer to the
+     * replacement entry.
+     * @param tag_index The tag to add.
+     * @return The replacement entry.
+     */
+    virtual void* add(unsigned long tag_index);
+
+    /**
+     * Register statistics.
+     * @param name The name to prepend to statistic descriptions.
+     */
+    virtual void regStats(const std::string name);
+
+    /**
+     * Update the tag pointer to when the tag moves.
+     * @param re The replacement entry of the tag.
+     * @param old_index The old tag pointer.
+     * @param new_index The new tag pointer.
+     * @return 1 if successful, 0 otherwise.
+     */
+    virtual int fixTag(void *re, unsigned long old_index,
+                       unsigned long new_index);
+
+    /**
+     * Remove this entry from the replacement policy.
+     * @param re The replacement entry to remove
+     */
+    virtual void removeEntry(void *re)
+    {
+        ((GenReplEntry*)re)->valid = false;
+    }
+
+  protected:
+    /**
+     * Debug function to verify that there is only one repl entry per tag.
+     * @param index The tag index to check.
+     */
+    bool findTagPtr(unsigned long index);
+};
+
+#endif /* __GEN_HH__ */
diff --git a/src/mem/cache/tags/repl/repl.cc b/src/mem/cache/tags/repl/repl.cc
new file mode 100644 (file)
index 0000000..ce781eb
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Nathan Binkert
+ */
+
+/**
+ * Definitions of the base replacement class.
+ */
+
+#include "sim/param.hh"
+#include "mem/cache/tags/repl/repl.hh"
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+DEFINE_SIM_OBJECT_CLASS_NAME("Repl", Repl)
+
+#endif //DOXYGEN_SHOULD_SKIP_THIS
diff --git a/src/mem/cache/tags/repl/repl.hh b/src/mem/cache/tags/repl/repl.hh
new file mode 100644 (file)
index 0000000..7c289a5
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Erik Hallnor
+ *          Steve Reinhardt
+ *          Nathan Binkert
+ */
+
+/**
+ * @file
+ * Declaration of a base replacement policy class.
+ */
+
+#ifndef __REPL_HH__
+#define __REPL_HH__
+
+#include <string>
+#include <list>
+
+#include "cpu/smt.hh"
+#include "sim/host.hh"
+#include "sim/sim_object.hh"
+
+
+class IIC;
+
+/**
+ * A pure virtual base class that defines the interface of a replacement
+ * policy.
+ */
+class Repl : public SimObject
+{
+ public:
+    /** Pointer to the IIC using this policy. */
+    IIC *iic;
+
+    /**
+     * Construct and initialize this polixy.
+     * @param name The instance name of this policy.
+     */
+    Repl (const std::string &name)
+        : SimObject(name)
+    {
+        iic = NULL;
+    }
+
+    /**
+     * Set the back pointer to the IIC.
+     * @param iic_ptr Pointer to the IIC.
+     */
+    void setIIC(IIC *iic_ptr)
+    {
+        iic = iic_ptr;
+    }
+
+    /**
+     * Returns the tag pointer of the cache block to replace.
+     * @return The tag to replace.
+     */
+    virtual unsigned long getRepl() = 0;
+
+    /**
+     * Return an array of N tag pointers to replace.
+     * @param n The number of tag pointer to return.
+     * @return An array of tag pointers to replace.
+     */
+    virtual unsigned long  *getNRepl(int n) = 0;
+
+    /**
+     * Update replacement data
+     */
+    virtual void doAdvance(std::list<unsigned long> &demoted) = 0;
+
+     /**
+     * Add a tag to the replacement policy and return a pointer to the
+     * replacement entry.
+     * @param tag_index The tag to add.
+     * @return The replacement entry.
+     */
+    virtual void* add(unsigned long tag_index) = 0;
+
+    /**
+     * Register statistics.
+     * @param name The name to prepend to statistic descriptions.
+     */
+    virtual void regStats(const std::string name) = 0;
+
+    /**
+     * Update the tag pointer to when the tag moves.
+     * @param re The replacement entry of the tag.
+     * @param old_index The old tag pointer.
+     * @param new_index The new tag pointer.
+     * @return 1 if successful, 0 otherwise.
+     */
+    virtual int fixTag(void *re, unsigned long old_index,
+                       unsigned long new_index) = 0;
+
+    /**
+     * Remove this entry from the replacement policy.
+     * @param re The replacement entry to remove
+     */
+    virtual void removeEntry(void *re) = 0;
+};
+
+#endif /* SMT_REPL_HH */
diff --git a/src/mem/cache/tags/split.cc b/src/mem/cache/tags/split.cc
new file mode 100644 (file)
index 0000000..9d9036a
--- /dev/null
@@ -0,0 +1,478 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+/**
+ * @file
+ * Definitions of split cache tag store.
+ */
+
+#include <string>
+#include <iostream>
+#include <fstream>
+
+#include "base/cprintf.hh"
+#include "base/intmath.hh"
+#include "base/output.hh"
+#include "base/trace.hh"
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/tags/split.hh"
+#include "mem/cache/tags/split_lifo.hh"
+#include "mem/cache/tags/split_lru.hh"
+
+
+using namespace std;
+using namespace TheISA;
+
+// create and initialize a partitioned cache structure
+Split::Split(int _numSets, int _blkSize, int total_ways, int LRU1_assoc,
+             bool _lifo, bool _two_queue, int _hit_latency) :
+    numSets(_numSets), blkSize(_blkSize), lifo(_lifo), hitLatency(_hit_latency)
+{
+    DPRINTF(Split, "new split cache!!\n");
+
+    DPRINTF(Split, "lru has %d numSets, %d blkSize, %d assoc, and %d hit_latency\n",
+            numSets, blkSize, LRU1_assoc, hitLatency);
+
+    lru = new SplitLRU(_numSets, _blkSize, LRU1_assoc, _hit_latency, 1);
+
+    if (total_ways - LRU1_assoc == 0) {
+        lifo_net = NULL;
+        lru_net = NULL;
+    } else {
+        if (lifo) {
+            DPRINTF(Split, "Other partition is a LIFO with size %d in bytes. it gets %d ways\n",
+                    (total_ways - LRU1_assoc)*_numSets*_blkSize, (total_ways - LRU1_assoc));
+            lifo_net = new SplitLIFO(_blkSize, (total_ways - LRU1_assoc)*_numSets*_blkSize,
+                                     (total_ways - LRU1_assoc), _hit_latency, _two_queue, 2);
+            lru_net = NULL;
+        }
+        else {
+            DPRINTF(Split, "other LRU gets %d ways\n", total_ways - LRU1_assoc);
+            lru_net = new SplitLRU(_numSets, _blkSize, total_ways - LRU1_assoc, _hit_latency, 2);
+            lifo_net = NULL;
+        }
+    }
+
+    blkMask = blkSize - 1;
+
+    if (!isPowerOf2(total_ways))
+        warn("total cache ways/columns %d should be power of 2",
+             total_ways);
+
+    warmedUp = false;
+    /** @todo Make warmup percentage a parameter. */
+    warmupBound = numSets * total_ways;
+
+}
+
+Split::~Split()
+{
+    delete lru;
+    if (lifo)
+        delete lifo_net;
+    else
+        delete lru_net;
+}
+
+void
+Split::regStats(const string &name)
+{
+    using namespace Stats;
+
+    BaseTags::regStats(name);
+
+    usedEvictDist.init(0,3000,40);
+    unusedEvictDist.init(0,3000,40);
+    useByCPUCycleDist.init(0,35,1);
+
+    nic_repl
+        .name(name + ".nic_repl")
+        .desc("number of replacements in the nic partition")
+        .precision(0)
+        ;
+
+    cpu_repl
+        .name(name + ".cpu_repl")
+        .desc("number of replacements in the cpu partition")
+        .precision(0)
+        ;
+
+    lru->regStats(name + ".lru");
+
+    if (lifo && lifo_net) {
+        lifo_net->regStats(name + ".lifo_net");
+    } else if (lru_net) {
+        lru_net->regStats(name + ".lru_net");
+    }
+
+    nicUsedWhenEvicted
+        .name(name + ".nicUsedWhenEvicted")
+        .desc("number of NIC blks that were used before evicted")
+        ;
+
+    nicUsedTotLatency
+        .name(name + ".nicUsedTotLatency")
+        .desc("total cycles before eviction of used NIC blks")
+        ;
+
+    nicUsedTotEvicted
+        .name(name + ".nicUsedTotEvicted")
+        .desc("total number of used NIC blks evicted")
+        ;
+
+    nicUsedAvgLatency
+        .name(name + ".nicUsedAvgLatency")
+        .desc("avg number of cycles a used NIC blk is in cache")
+        .precision(0)
+        ;
+    nicUsedAvgLatency = nicUsedTotLatency / nicUsedTotEvicted;
+
+    usedEvictDist
+        .name(name + ".usedEvictDist")
+        .desc("distribution of used NIC blk eviction times")
+        .flags(pdf | cdf)
+        ;
+
+    nicUnusedWhenEvicted
+        .name(name + ".nicUnusedWhenEvicted")
+        .desc("number of NIC blks that were unused when evicted")
+        ;
+
+    nicUnusedTotLatency
+        .name(name + ".nicUnusedTotLatency")
+        .desc("total cycles before eviction of unused NIC blks")
+        ;
+
+    nicUnusedTotEvicted
+        .name(name + ".nicUnusedTotEvicted")
+        .desc("total number of unused NIC blks evicted")
+        ;
+
+    nicUnusedAvgLatency
+        .name(name + ".nicUnusedAvgLatency")
+        .desc("avg number of cycles an unused NIC blk is in cache")
+        .precision(0)
+        ;
+    nicUnusedAvgLatency = nicUnusedTotLatency / nicUnusedTotEvicted;
+
+    unusedEvictDist
+        .name(name + ".unusedEvictDist")
+        .desc("distribution of unused NIC blk eviction times")
+        .flags(pdf | cdf)
+        ;
+
+    nicUseByCPUCycleTotal
+        .name(name + ".nicUseByCPUCycleTotal")
+        .desc("total latency of NIC blks til usage time")
+        ;
+
+    nicBlksUsedByCPU
+        .name(name + ".nicBlksUsedByCPU")
+        .desc("total number of NIC blks used")
+        ;
+
+    nicAvgUsageByCPULatency
+        .name(name + ".nicAvgUsageByCPULatency")
+        .desc("average number of cycles before a NIC blk that is used gets used")
+        .precision(0)
+        ;
+    nicAvgUsageByCPULatency = nicUseByCPUCycleTotal / nicBlksUsedByCPU;
+
+    useByCPUCycleDist
+        .name(name + ".useByCPUCycleDist")
+        .desc("the distribution of cycle time in cache before NIC blk is used")
+        .flags(pdf | cdf)
+        ;
+
+    cpuUsedBlks
+        .name(name + ".cpuUsedBlks")
+        .desc("number of cpu blks that were used before evicted")
+        ;
+
+    cpuUnusedBlks
+        .name(name + ".cpuUnusedBlks")
+        .desc("number of cpu blks that were unused before evicted")
+        ;
+
+    nicAvgLatency
+        .name(name + ".nicAvgLatency")
+        .desc("avg number of cycles a NIC blk is in cache before evicted")
+        .precision(0)
+        ;
+    nicAvgLatency = (nicUnusedTotLatency + nicUsedTotLatency) /
+        (nicUnusedTotEvicted + nicUsedTotEvicted);
+
+    NR_CP_hits
+        .name(name + ".NR_CP_hits")
+        .desc("NIC requests hitting in CPU Partition")
+        ;
+
+    NR_NP_hits
+        .name(name + ".NR_NP_hits")
+        .desc("NIC requests hitting in NIC Partition")
+        ;
+
+    CR_CP_hits
+        .name(name + ".CR_CP_hits")
+        .desc("CPU requests hitting in CPU partition")
+        ;
+
+    CR_NP_hits
+        .name(name + ".CR_NP_hits")
+        .desc("CPU requests hitting in NIC partition")
+        ;
+
+}
+
+// probe cache for presence of given block.
+bool
+Split::probe(int asid, Addr addr) const
+{
+    bool success = lru->probe(asid, addr);
+    if (!success) {
+        if (lifo && lifo_net)
+            success = lifo_net->probe(asid, addr);
+        else if (lru_net)
+            success = lru_net->probe(asid, addr);
+    }
+
+    return success;
+}
+
+SplitBlk*
+Split::findBlock(Packet * &pkt, int &lat)
+{
+
+    Addr aligned = blkAlign(pkt->paddr);
+
+    if (memHash.count(aligned)) {
+        memHash[aligned]++;
+    } else if (pkt->nic_pkt) {
+        memHash[aligned] = 1;
+    }
+
+    SplitBlk *blk = lru->findBlock(pkt->paddr, pkt->req->asid, lat);
+    if (blk) {
+        if (pkt->nic_pkt) {
+            NR_CP_hits++;
+        } else {
+            CR_CP_hits++;
+        }
+    } else {
+        if (lifo && lifo_net) {
+            blk = lifo_net->findBlock(pkt->paddr, pkt->req->asid, lat);
+
+        } else if (lru_net) {
+            blk = lru_net->findBlock(pkt->paddr, pkt->req->asid, lat);
+        }
+        if (blk) {
+            if (pkt->nic_pkt) {
+                NR_NP_hits++;
+            } else {
+                CR_NP_hits++;
+            }
+        }
+    }
+
+    if (blk) {
+        Tick latency = curTick - blk->ts;
+        if (blk->isNIC) {
+            if (!blk->isUsed && !pkt->nic_pkt) {
+                    useByCPUCycleDist.sample(latency);
+                    nicUseByCPUCycleTotal += latency;
+                    nicBlksUsedByCPU++;
+            }
+        }
+        blk->isUsed = true;
+
+        if (pkt->nic_pkt) {
+            DPRINTF(Split, "found block in partition %d\n", blk->part);
+        }
+    }
+    return blk;
+}
+
+SplitBlk*
+Split::findBlock(Addr addr, int asid, int &lat)
+{
+    SplitBlk *blk = lru->findBlock(addr, asid, lat);
+    if (!blk) {
+        if (lifo && lifo_net) {
+            blk = lifo_net->findBlock(addr, asid, lat);
+        } else if (lru_net) {
+            blk = lru_net->findBlock(addr, asid, lat);
+        }
+    }
+
+    return blk;
+}
+
+SplitBlk*
+Split::findBlock(Addr addr, int asid) const
+{
+    SplitBlk *blk = lru->findBlock(addr, asid);
+    if (!blk) {
+        if (lifo && lifo_net) {
+            blk = lifo_net->findBlock(addr, asid);
+        } else if (lru_net) {
+            blk = lru_net->findBlock(addr, asid);
+        }
+    }
+
+    return blk;
+}
+
+SplitBlk*
+Split::findReplacement(Packet * &pkt, PacketList* &writebacks,
+                     BlkList &compress_blocks)
+{
+    SplitBlk *blk;
+
+    if (pkt->nic_pkt) {
+        DPRINTF(Split, "finding a replacement for nic_req\n");
+        nic_repl++;
+        if (lifo && lifo_net)
+            blk = lifo_net->findReplacement(pkt, writebacks,
+                                             compress_blocks);
+        else if (lru_net)
+            blk = lru_net->findReplacement(pkt, writebacks,
+                                            compress_blocks);
+        // in this case, this is an LRU only cache, it's non partitioned
+        else
+            blk = lru->findReplacement(pkt, writebacks, compress_blocks);
+    } else {
+        DPRINTF(Split, "finding replacement for cpu_req\n");
+        blk = lru->findReplacement(pkt, writebacks,
+                                    compress_blocks);
+        cpu_repl++;
+    }
+
+    Tick latency = curTick - blk->ts;
+    if (blk->isNIC) {
+        if (blk->isUsed) {
+            nicUsedWhenEvicted++;
+            usedEvictDist.sample(latency);
+            nicUsedTotLatency += latency;
+            nicUsedTotEvicted++;
+        } else {
+            nicUnusedWhenEvicted++;
+            unusedEvictDist.sample(latency);
+            nicUnusedTotLatency += latency;
+            nicUnusedTotEvicted++;
+        }
+    } else {
+        if (blk->isUsed) {
+            cpuUsedBlks++;
+        } else {
+            cpuUnusedBlks++;
+        }
+    }
+
+    // blk attributes for the new blk coming IN
+    blk->ts = curTick;
+    blk->isNIC = (pkt->nic_pkt) ? true : false;
+
+    return blk;
+}
+
+void
+Split::invalidateBlk(int asid, Addr addr)
+{
+    SplitBlk *blk = lru->findBlock(addr, asid);
+    if (!blk) {
+        if (lifo && lifo_net)
+            blk = lifo_net->findBlock(addr, asid);
+        else if (lru_net)
+            blk = lru_net->findBlock(addr, asid);
+
+        if (!blk)
+            return;
+    }
+
+    blk->status = 0;
+    blk->isTouched = false;
+    tagsInUse--;
+}
+
+void
+Split::doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks)
+{
+    if (lru->probe(asid, source))
+        lru->doCopy(source, dest, asid, writebacks);
+    else {
+        if (lifo && lifo_net)
+            lifo_net->doCopy(source, dest, asid, writebacks);
+        else if (lru_net)
+            lru_net->doCopy(source, dest, asid, writebacks);
+    }
+}
+
+void
+Split::cleanupRefs()
+{
+    lru->cleanupRefs();
+    if (lifo && lifo_net)
+        lifo_net->cleanupRefs();
+    else if (lru_net)
+        lru_net->cleanupRefs();
+
+    ofstream memPrint(simout.resolve("memory_footprint.txt").c_str(),
+                      ios::trunc);
+
+    // this shouldn't be here but it happens at the end, which is what i want
+    memIter end = memHash.end();
+    for (memIter iter = memHash.begin(); iter != end; ++iter) {
+        ccprintf(memPrint, "%8x\t%d\n", (*iter).first, (*iter).second);
+    }
+}
+
+Addr
+Split::regenerateBlkAddr(Addr tag, int set) const
+{
+    if (lifo_net)
+        return lifo_net->regenerateBlkAddr(tag, set);
+    else
+        return lru->regenerateBlkAddr(tag, set);
+}
+
+Addr
+Split::extractTag(Addr addr, SplitBlk *blk) const
+{
+    if (blk->part == 2) {
+        if (lifo_net)
+            return lifo_net->extractTag(addr);
+        else if (lru_net)
+            return lru_net->extractTag(addr);
+        else
+            panic("this shouldn't happen");
+    } else
+        return lru->extractTag(addr);
+}
+
diff --git a/src/mem/cache/tags/split.hh b/src/mem/cache/tags/split.hh
new file mode 100644 (file)
index 0000000..6f24415
--- /dev/null
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+/**
+ * @file
+ * Declaration of a split/partitioned tag store.
+ */
+
+#ifndef __SPLIT_HH__
+#define __SPLIT_HH__
+
+#include <list>
+
+#include "mem/cache/cache_blk.hh" // base class
+#include "mem/cache/tags/split_blk.hh"
+#include "mem/packet.hh" // for inlined functions
+#include <assert.h>
+#include "mem/cache/tags/base_tags.hh"
+#include "base/hashmap.hh"
+
+class BaseCache;
+class SplitLRU;
+class SplitLIFO;
+
+/**
+ * A  cache tag store.
+ */
+class Split : public BaseTags
+{
+  public:
+    /** Typedef the block type used in this tag store. */
+    typedef SplitBlk BlkType;
+    /** Typedef for a list of pointers to the local block class. */
+    typedef std::list<SplitBlk*> BlkList;
+  protected:
+    /** The number of sets in the cache. */
+    const int numSets;
+    /** The number of bytes in a block. */
+    const int blkSize;
+    /** Whether the 2nd partition (for the nic) is LIFO or not */
+    const bool lifo;
+    /** The hit latency. */
+    const int hitLatency;
+
+    Addr blkMask;
+
+    /** Number of NIC requests that hit in the NIC partition */
+    Stats::Scalar<> NR_NP_hits;
+    /** Number of NIC requests that hit in the CPU partition */
+    Stats::Scalar<> NR_CP_hits;
+    /** Number of CPU requests that hit in the NIC partition */
+    Stats::Scalar<> CR_NP_hits;
+    /** Number of CPU requests that hit in the CPU partition */
+    Stats::Scalar<> CR_CP_hits;
+    /** The number of nic replacements (i.e. misses) */
+    Stats::Scalar<> nic_repl;
+    /** The number of cpu replacements (i.e. misses) */
+    Stats::Scalar<> cpu_repl;
+
+    //For latency studies
+    /** the number of NIC blks that were used before evicted */
+    Stats::Scalar<> nicUsedWhenEvicted;
+    /** the total latency of used NIC blocks in the cache */
+    Stats::Scalar<> nicUsedTotLatency;
+    /** the total number of used NIC blocks evicted */
+    Stats::Scalar<> nicUsedTotEvicted;
+    /** the average number of cycles a used NIC blk is in the cache */
+    Stats::Formula nicUsedAvgLatency;
+    /** the Distribution of used NIC blk eviction times */
+    Stats::Distribution<> usedEvictDist;
+
+    /** the number of NIC blks that were unused before evicted */
+    Stats::Scalar<> nicUnusedWhenEvicted;
+    /** the total latency of unused NIC blks in the cache */
+    Stats::Scalar<> nicUnusedTotLatency;
+    /** the total number of unused NIC blocks evicted */
+    Stats::Scalar<> nicUnusedTotEvicted;
+    /** the average number of cycles an unused NIC blk is in the cache */
+    Stats::Formula nicUnusedAvgLatency;
+    /** the Distribution of unused NIC blk eviction times */
+    Stats::Distribution<> unusedEvictDist;
+
+    /** The total latency of NIC blocks to 1st usage time by CPU */
+    Stats::Scalar<> nicUseByCPUCycleTotal;
+    /** The total number of NIC blocks used */
+    Stats::Scalar<> nicBlksUsedByCPU;
+    /** the average number of cycles before a NIC blk that is used gets used by CPU */
+    Stats::Formula nicAvgUsageByCPULatency;
+    /** the Distribution of cycles time before a NIC blk is used by CPU*/
+    Stats::Distribution<> useByCPUCycleDist;
+
+    /** the number of CPU blks that were used before evicted */
+    Stats::Scalar<> cpuUsedBlks;
+    /** the number of CPU blks that were unused before evicted */
+    Stats::Scalar<> cpuUnusedBlks;
+
+    /** the avg number of cycles before a NIC blk is evicted */
+    Stats::Formula nicAvgLatency;
+
+    typedef m5::hash_map<Addr, int, m5::hash<Addr> > hash_t;
+    typedef hash_t::const_iterator memIter;
+    hash_t memHash;
+
+
+  private:
+    SplitLRU *lru;
+    SplitLRU *lru_net;
+    SplitLIFO *lifo_net;
+
+  public:
+    /**
+     * Construct and initialize this tag store.
+     * @param _numSets The number of sets in the cache.
+     * @param _blkSize The number of bytes in a block.
+     * @param _assoc The associativity of the cache.
+     * @param _hit_latency The latency in cycles for a hit.
+     */
+    Split(int _numSets, int _blkSize, int total_ways, int LRU1_assoc,
+          bool _lifo, bool _two_queue, int _hit_latency);
+
+    /**
+     * Destructor
+     */
+    virtual ~Split();
+
+    /**
+     * Register the stats for this object
+     * @param name The name to prepend to the stats name.
+     */
+    void regStats(const std::string &name);
+
+    /**
+     * Return the block size.
+     * @return the block size.
+     */
+    int getBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Return the subblock size. In the case of Split it is always the block
+     * size.
+     * @return The block size.
+     */
+    int getSubBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Search for the address in the cache.
+     * @param asid The address space ID.
+     * @param addr The address to find.
+     * @return True if the address is in the cache.
+     */
+    bool probe(int asid, Addr addr) const;
+
+    /**
+     * Invalidate the block containing the given address.
+     * @param asid The address space ID.
+     * @param addr The address to invalidate.
+     */
+    void invalidateBlk(int asid, Addr addr);
+
+    /**
+     * Finds the given address in the cache and update replacement data.
+     * Returns the access latency as a side effect.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @param lat The access latency.
+     * @return Pointer to the cache block if found.
+     */
+    SplitBlk* findBlock(Addr addr, int asid, int &lat);
+
+    /**
+     * Finds the given address in the cache and update replacement data.
+     * Returns the access latency as a side effect.
+     * @param req The memory request whose block to find
+     * @param lat The access latency.
+     * @return Pointer to the cache block if found.
+     */
+    SplitBlk* findBlock(Packet * &pkt, int &lat);
+
+    /**
+     * Finds the given address in the cache, do not update replacement data.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @return Pointer to the cache block if found.
+     */
+    SplitBlk* findBlock(Addr addr, int asid) const;
+
+    /**
+     * Find a replacement block for the address provided.
+     * @param req The request to a find a replacement candidate for.
+     * @param writebacks List for any writebacks to be performed.
+     * @param compress_blocks List of blocks to compress, for adaptive comp.
+     * @return The block to place the replacement in.
+     */
+    SplitBlk* findReplacement(Packet * &pkt, PacketList* &writebacks,
+                            BlkList &compress_blocks);
+
+
+    /**
+     * Generate the tag from the given address.
+     * @param addr The address to get the tag from.
+     * @param blk The block to find the partition it's in
+     * @return The tag of the address.
+     */
+    Addr extractTag(Addr addr, SplitBlk *blk) const;
+
+    /**
+     * Calculate the set index from the address.
+     * @param addr The address to get the set from.
+     * @return The set index of the address.
+     */
+    int extractSet(Addr addr) const
+    {
+        panic("should never call this!\n");
+    }
+
+    /**
+     * Get the block offset from an address.
+     * @param addr The address to get the offset of.
+     * @return The block offset.
+     */
+    int extractBlkOffset(Addr addr) const
+    {
+        return (addr & blkMask);
+    }
+
+    /**
+     * Align an address to the block size.
+     * @param addr the address to align.
+     * @return The block address.
+     */
+    Addr blkAlign(Addr addr) const
+    {
+        return (addr & ~(Addr) (blkMask));
+    }
+
+    /**
+     * Regenerate the block address from the tag.
+     * @param tag The tag of the block.
+     * @param set The set of the block.
+     * @return The block address.
+     */
+    Addr regenerateBlkAddr(Addr tag, int set) const;
+
+    /**
+     * Return the hit latency.
+     * @return the hit latency.
+     */
+    int getHitLatency() const
+    {
+        return hitLatency;
+    }
+
+    /**
+     * Read the data out of the internal storage of the given cache block.
+     * @param blk The cache block to read.
+     * @param data The buffer to read the data into.
+     * @return The cache block's data.
+     */
+    void readData(SplitBlk *blk, uint8_t *data)
+    {
+        memcpy(data, blk->data, blk->size);
+    }
+
+    /**
+     * Write data into the internal storage of the given cache block. Since in
+     * Split does not store data differently this just needs to update the size.
+     * @param blk The cache block to write.
+     * @param data The data to write.
+     * @param size The number of bytes to write.
+     * @param writebacks A list for any writebacks to be performed. May be
+     * needed when writing to a compressed block.
+     */
+    void writeData(SplitBlk *blk, uint8_t *data, int size,
+                   PacketList* & writebacks)
+    {
+        assert(size <= blkSize);
+        blk->size = size;
+    }
+
+    /**
+     * Perform a block aligned copy from the source address to the destination.
+     * @param source The block-aligned source address.
+     * @param dest The block-aligned destination address.
+     * @param asid The address space DI.
+     * @param writebacks List for any generated writeback requests.
+     */
+    void doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks);
+
+    /**
+     * No impl.
+     */
+    void fixCopy(Packet * &pkt, PacketList* &writebacks)
+    {
+    }
+
+    /**
+     * Called at end of simulation to complete average block reference stats.
+     */
+    virtual void cleanupRefs();
+};
+
+#endif
diff --git a/src/mem/cache/tags/split_blk.hh b/src/mem/cache/tags/split_blk.hh
new file mode 100644 (file)
index 0000000..f385161
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+/**
+ * @file
+ * Declaration of partitioned tag store cache block class.
+ */
+
+#ifndef __SPLIT_BLK_HH__
+#define __SPLIT_BLK_HH__
+
+#include "mem/cache/cache_blk.hh" // base class
+
+/**
+ * Split cache block.
+ */
+class SplitBlk : public CacheBlk {
+  public:
+    /** Has this block been touched? Used to aid calculation of warmup time. */
+    bool isTouched;
+    /** Has this block been used after being brought in? (for LIFO partition) */
+    bool isUsed;
+    /** is this blk a NIC block? (i.e. requested by the NIC) */
+    bool isNIC;
+    /** timestamp of the arrival of this block into the cache */
+    Tick ts;
+    /** the previous block in the LIFO partition (brought in before than me) */
+    SplitBlk *prev;
+    /** the next block in the LIFO partition (brought in later than me) */
+    SplitBlk *next;
+    /** which partition this block is in */
+    int part;
+
+    SplitBlk()
+        : isTouched(false), isUsed(false), isNIC(false), ts(0), prev(NULL), next(NULL),
+          part(0)
+    {}
+};
+
+#endif
+
diff --git a/src/mem/cache/tags/split_lifo.cc b/src/mem/cache/tags/split_lifo.cc
new file mode 100644 (file)
index 0000000..f2c37c8
--- /dev/null
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+/**
+ * @file
+ * Definitions of LIFO tag store usable in a partitioned cache.
+ */
+
+#include <string>
+
+#include "mem/cache/base_cache.hh"
+#include "base/intmath.hh"
+#include "mem/cache/tags/split_lifo.hh"
+#include "sim/root.hh"
+#include "base/trace.hh"
+
+using namespace std;
+
+SplitBlk*
+LIFOSet::findBlk(int asid, Addr tag) const
+{
+    for (SplitBlk *blk = firstIn; blk != NULL; blk = blk->next) {
+        if (blk->tag == tag && blk->isValid()) {
+            return blk;
+        }
+    }
+    return NULL;
+}
+
+void
+LIFOSet::moveToLastIn(SplitBlk *blk)
+{
+    if (blk == lastIn)
+        return;
+
+    if (blk == firstIn) {
+        blk->next->prev = NULL;
+    } else {
+        blk->prev->next = blk->next;
+        blk->next->prev = blk->prev;
+    }
+    blk->next = NULL;
+    blk->prev = lastIn;
+    lastIn->next = blk;
+
+    lastIn = blk;
+}
+
+void
+LIFOSet::moveToFirstIn(SplitBlk *blk)
+{
+    if (blk == firstIn)
+        return;
+
+    if (blk == lastIn) {
+        blk->prev->next = NULL;
+    } else {
+        blk->next->prev = blk->prev;
+        blk->prev->next = blk->next;
+    }
+
+    blk->prev = NULL;
+    blk->next = firstIn;
+    firstIn->prev = blk;
+
+    firstIn = blk;
+}
+
+// create and initialize a LIFO cache structure
+SplitLIFO::SplitLIFO(int _blkSize, int _size, int _ways, int _hit_latency, bool two_Queue, int _part) :
+    blkSize(_blkSize), size(_size), numBlks(_size/_blkSize), numSets((_size/_ways)/_blkSize), ways(_ways),
+    hitLatency(_hit_latency), twoQueue(two_Queue), part(_part)
+{
+    if (!isPowerOf2(blkSize))
+        fatal("cache block size (in bytes) must be a power of 2");
+    if (!(hitLatency > 0))
+        fatal("access latency in cycles must be at least on cycle");
+    if (_ways == 0)
+        fatal("if instantiating a splitLIFO, needs non-zero size!");
+
+
+    SplitBlk  *blk;
+    int i, j, blkIndex;
+
+    setShift = floorLog2(blkSize);
+    blkMask = blkSize - 1;
+    setMask = numSets - 1;
+    tagShift = setShift + floorLog2(numSets);
+
+    warmedUp = false;
+    /** @todo Make warmup percentage a parameter. */
+    warmupBound = size/blkSize;
+
+    // allocate data blocks
+    blks = new SplitBlk[numBlks];
+    sets = new LIFOSet[numSets];
+    dataBlks = new uint8_t[size];
+
+/*
+    // these start off point to same blk
+    top = &(blks[0]);
+    head = top;
+*/
+
+    blkIndex = 0;
+    for (i=0; i < numSets; ++i) {
+        sets[i].ways = ways;
+        sets[i].lastIn = &blks[blkIndex];
+        sets[i].firstIn = &blks[blkIndex + ways - 1];
+
+        /* 3 cases:  if there is 1 way, if there are 2 ways, or if there are 3+.
+           in the case of 1 way, last in and first out point to the same blocks,
+           and the next and prev pointers need to be assigned specially.  and so on
+        */
+        /* deal with the first way */
+        blk = &blks[blkIndex];
+        blk->prev = &blks[blkIndex + 1];
+        blk->next = NULL;
+        blk->data = &dataBlks[blkSize*blkIndex];
+        blk->size = blkSize;
+        blk->part = part;
+        blk->set = i;
+        ++blkIndex;
+
+        /* if there are "middle" ways, do them here */
+        if (ways > 2) {
+            for (j=1; j < ways-1; ++j) {
+                blk = &blks[blkIndex];
+                blk->data = &dataBlks[blkSize*blkIndex];
+                blk->prev = &blks[blkIndex+1];
+                blk->next = &blks[blkIndex-1];
+                blk->data = &(dataBlks[blkSize*blkIndex]);
+                blk->size = blkSize;
+                blk->part = part;
+                blk->set = i;
+                ++blkIndex;
+            }
+        }
+
+        /* do the final way here, depending on whether the final way is the only
+           way or not
+        */
+        if (ways > 1) {
+            blk =  &blks[blkIndex];
+            blk->prev = NULL;
+            blk->next = &blks[blkIndex - 1];
+            blk->data = &dataBlks[blkSize*blkIndex];
+            blk->size = blkSize;
+            blk->part = part;
+            blk->set = i;
+            ++blkIndex;
+        } else {
+            blk->prev = NULL;
+        }
+    }
+    assert(blkIndex == numBlks);
+}
+
+SplitLIFO::~SplitLIFO()
+{
+    delete [] blks;
+    delete [] sets;
+    delete [] dataBlks;
+}
+
+void
+SplitLIFO::regStats(const std::string &name)
+{
+    BaseTags::regStats(name);
+
+    hits
+        .name(name + ".hits")
+        .desc("number of hits on this partition")
+        .precision(0)
+        ;
+
+    misses
+        .name(name + ".misses")
+        .desc("number of misses in this partition")
+        .precision(0)
+        ;
+
+    invalidations
+        .name(name + ".invalidations")
+        .desc("number of invalidations in this partition")
+        .precision(0)
+        ;
+}
+
+// probe cache for presence of given block.
+bool
+SplitLIFO::probe(int asid, Addr addr) const
+{
+    Addr tag = extractTag(addr);
+    unsigned myset = extractSet(addr);
+
+    SplitBlk* blk = sets[myset].findBlk(asid, tag);
+    return (blk != NULL);
+}
+
+SplitBlk*
+SplitLIFO::findBlock(Addr addr, int asid, int &lat)
+{
+    Addr tag = extractTag(addr);
+    unsigned set = extractSet(addr);
+    SplitBlk *blk = sets[set].findBlk(asid, tag);
+
+    lat = hitLatency;
+
+    if (blk) {
+        DPRINTF(Split, "Found LIFO blk %#x in set %d, with tag %#x\n",
+                addr, set, tag);
+        hits++;
+
+        if (blk->whenReady > curTick && blk->whenReady - curTick > hitLatency)
+            lat = blk->whenReady - curTick;
+        blk->refCount +=1;
+
+        if (twoQueue) {
+            blk->isUsed = true;
+            sets[set].moveToFirstIn(blk);
+        } else {
+            sets[set].moveToLastIn(blk);
+        }
+    }
+
+    return blk;
+}
+
+SplitBlk*
+SplitLIFO::findBlock(Packet * &pkt, int &lat)
+{
+    Addr addr = pkt->paddr;
+    int asid = pkt->req->asid;
+
+    Addr tag = extractTag(addr);
+    unsigned set = extractSet(addr);
+    SplitBlk *blk = sets[set].findBlk(asid, tag);
+
+    if (blk) {
+        DPRINTF(Split, "Found LIFO blk %#x in set %d, with tag %#x\n",
+                addr, set, tag);
+        hits++;
+
+        if (twoQueue) {
+            blk->isUsed = true;
+            sets[set].moveToFirstIn(blk);
+        } else {
+            sets[set].moveToLastIn(blk);
+        }
+    }
+    lat = hitLatency;
+
+    return blk;
+}
+
+SplitBlk*
+SplitLIFO::findBlock(Addr addr, int asid) const
+{
+    Addr tag = extractTag(addr);
+    unsigned set = extractSet(addr);
+    SplitBlk *blk = sets[set].findBlk(asid, tag);
+
+    return blk;
+}
+
+SplitBlk*
+SplitLIFO::findReplacement(Packet * &pkt, PacketList* &writebacks,
+                           BlkList &compress_blocks)
+{
+    unsigned set = extractSet(pkt->paddr);
+
+    SplitBlk *firstIn = sets[set].firstIn;
+    SplitBlk *lastIn = sets[set].lastIn;
+
+    SplitBlk *blk;
+    if (twoQueue && firstIn->isUsed) {
+        blk = firstIn;
+        blk->isUsed = false;
+        sets[set].moveToLastIn(blk);
+    } else {
+        int withValue = sets[set].withValue;
+        if (withValue == ways) {
+            blk = lastIn;
+        } else {
+            blk = &(sets[set].firstIn[ways - ++withValue]);
+        }
+    }
+
+    DPRINTF(Split, "just assigned %#x addr into LIFO, replacing %#x status %#x\n",
+            pkt->paddr, regenerateBlkAddr(blk->tag, set), blk->status);
+    if (blk->isValid()) {
+        int thread_num = (blk->xc) ? blk->xc->getThreadNum() : 0;
+        replacements[thread_num]++;
+        totalRefs += blk->refCount;
+        ++sampledRefs;
+        blk->refCount = 0;
+    } else {
+        tagsInUse++;
+        blk->isTouched = true;
+        if (!warmedUp && tagsInUse.value() >= warmupBound) {
+            warmedUp = true;
+            warmupCycle = curTick;
+        }
+    }
+
+    misses++;
+
+    return blk;
+}
+
+void
+SplitLIFO::invalidateBlk(int asid, Addr addr)
+{
+    SplitBlk *blk = findBlock(addr, asid);
+    if (blk) {
+        blk->status = 0;
+        blk->isTouched = false;
+        tagsInUse--;
+        invalidations++;
+    }
+}
+
+void
+SplitLIFO::doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks)
+{
+    assert(source == blkAlign(source));
+    assert(dest == blkAlign(dest));
+    SplitBlk *source_blk = findBlock(source, asid);
+    assert(source_blk);
+    SplitBlk *dest_blk = findBlock(dest, asid);
+    if (dest_blk == NULL) {
+        // Need to do a replacement
+        Packet * pkt = new Packet();
+        pkt->paddr = dest;
+        BlkList dummy_list;
+        dest_blk = findReplacement(pkt, writebacks, dummy_list);
+        if (dest_blk->isValid() && dest_blk->isModified()) {
+            // Need to writeback data.
+            pkt = buildWritebackReq(regenerateBlkAddr(dest_blk->tag,
+                                                      dest_blk->set),
+                                    dest_blk->req->asid,
+                                    dest_blk->xc,
+                                    blkSize,
+                                    (cache->doData())?dest_blk->data:0,
+                                    dest_blk->size);
+            writebacks.push_back(pkt);
+        }
+        dest_blk->tag = extractTag(dest);
+        dest_blk->req->asid = asid;
+        /**
+         * @todo Do we need to pass in the execution context, or can we
+         * assume its the same?
+         */
+        assert(source_blk->xc);
+        dest_blk->xc = source_blk->xc;
+    }
+    /**
+     * @todo Can't assume the status once we have coherence on copies.
+     */
+
+    // Set this block as readable, writeable, and dirty.
+    dest_blk->status = 7;
+    if (cache->doData()) {
+        memcpy(dest_blk->data, source_blk->data, blkSize);
+    }
+}
+
+void
+SplitLIFO::cleanupRefs()
+{
+    for (int i = 0; i < numBlks; ++i) {
+        if (blks[i].isValid()) {
+            totalRefs += blks[i].refCount;
+            ++sampledRefs;
+        }
+    }
+}
diff --git a/src/mem/cache/tags/split_lifo.hh b/src/mem/cache/tags/split_lifo.hh
new file mode 100644 (file)
index 0000000..c50eaa5
--- /dev/null
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+/**
+ * @file
+ * Declaration of a LIFO tag store usable in a partitioned cache.
+ */
+
+#ifndef __SPLIT_LIFO_HH__
+#define __SPLIT_LIFO_HH__
+
+#include <list>
+
+#include "mem/cache/cache_blk.hh" // base class
+#include "mem/cache/tags/split_blk.hh"
+#include "mem/packet.hh" // for inlined functions
+#include "base/hashmap.hh"
+#include <assert.h>
+#include "mem/cache/tags/base_tags.hh"
+
+class BaseCache;
+
+/**
+ * A LIFO set of cache blks
+ */
+class LIFOSet {
+  public:
+    /** the number of blocks in this set */
+    int ways;
+
+    /** Cache blocks in this set, maintained in LIFO order where
+        0 = Last in (head) */
+    SplitBlk *lastIn;
+    SplitBlk *firstIn;
+
+    /** has the initial "filling" of this set finished? i.e., have you had
+     * 'ways' number of compulsory misses in this set yet? if withValue == ways,
+     * then yes.  withValue is meant to be the number of blocks in the set that have
+     * gone through their first compulsory miss.
+     */
+    int withValue;
+
+    /**
+     * Find a block matching the tag in this set.
+     * @param asid The address space ID.
+     * @param tag the Tag you are looking for
+     * @return Pointer to the block, if found, NULL otherwise
+     */
+    SplitBlk* findBlk(int asid, Addr tag) const;
+
+    void moveToLastIn(SplitBlk *blk);
+    void moveToFirstIn(SplitBlk *blk);
+
+    LIFOSet()
+        : ways(-1), lastIn(NULL), firstIn(NULL), withValue(0)
+    {}
+};
+
+/**
+ * A LIFO cache tag store.
+ */
+class SplitLIFO : public BaseTags
+{
+  public:
+    /** Typedef the block type used in this tag store. */
+    typedef SplitBlk BlkType;
+    /** Typedef for a list of pointers to the local block class. */
+    typedef std::list<SplitBlk*> BlkList;
+  protected:
+    /** The number of bytes in a block. */
+    const int blkSize;
+    /** the size of the cache in bytes */
+    const int size;
+    /** the number of blocks in the cache */
+    const int numBlks;
+    /** the number of sets in the cache */
+    const int numSets;
+    /** the number of ways in the cache */
+    const int ways;
+    /** The hit latency. */
+    const int hitLatency;
+    /** whether this is a "2 queue" replacement @sa moveToLastIn @sa moveToFirstIn */
+    const bool twoQueue;
+    /** indicator for which partition this is */
+    const int part;
+
+    /** The cache blocks. */
+    SplitBlk *blks;
+    /** The Cache sets */
+    LIFOSet *sets;
+    /** The data blocks, 1 per cache block. */
+    uint8_t *dataBlks;
+
+    /** The amount to shift the address to get the set. */
+    int setShift;
+    /** The amount to shift the address to get the tag. */
+    int tagShift;
+    /** Mask out all bits that aren't part of the set index. */
+    unsigned setMask;
+    /** Mask out all bits that aren't part of the block offset. */
+    unsigned blkMask;
+
+
+    /** the number of hit in this partition */
+    Stats::Scalar<> hits;
+    /** the number of blocks brought into this partition (i.e. misses) */
+    Stats::Scalar<> misses;
+    /** the number of invalidations in this partition */
+    Stats::Scalar<> invalidations;
+
+public:
+    /**
+     * Construct and initialize this tag store.
+     * @param _numSets The number of sets in the cache.
+     * @param _blkSize The number of bytes in a block.
+     * @param _assoc The associativity of the cache.
+     * @param _hit_latency The latency in cycles for a hit.
+     */
+    SplitLIFO(int _blkSize, int _size, int _ways, int _hit_latency, bool twoQueue, int _part);
+
+    /**
+     * Destructor
+     */
+    virtual ~SplitLIFO();
+
+    /**
+     * Register the statistics for this object
+     * @param name The name to precede the stat
+     */
+    void regStats(const std::string &name);
+
+    /**
+     * Return the block size.
+     * @return the block size.
+     */
+    int getBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Return the subblock size. In the case of LIFO it is always the block
+     * size.
+     * @return The block size.
+     */
+    int getSubBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Search for the address in the cache.
+     * @param asid The address space ID.
+     * @param addr The address to find.
+     * @return True if the address is in the cache.
+     */
+    bool probe(int asid, Addr addr) const;
+
+    /**
+     * Invalidate the block containing the given address.
+     * @param asid The address space ID.
+     * @param addr The address to invalidate.
+     */
+    void invalidateBlk(int asid, Addr addr);
+
+    /**
+     * Finds the given address in the cache and update replacement data.
+     * Returns the access latency as a side effect.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @param lat The access latency.
+     * @return Pointer to the cache block if found.
+     */
+    SplitBlk* findBlock(Addr addr, int asid, int &lat);
+
+    /**
+     * Finds the given address in the cache and update replacement data.
+     * Returns the access latency as a side effect.
+     * @param req The req whose block to find
+     * @param lat The access latency.
+     * @return Pointer to the cache block if found.
+     */
+    SplitBlk* findBlock(Packet * &pkt, int &lat);
+
+    /**
+     * Finds the given address in the cache, do not update replacement data.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @return Pointer to the cache block if found.
+     */
+    SplitBlk* findBlock(Addr addr, int asid) const;
+
+    /**
+     * Find a replacement block for the address provided.
+     * @param req The request to a find a replacement candidate for.
+     * @param writebacks List for any writebacks to be performed.
+     * @param compress_blocks List of blocks to compress, for adaptive comp.
+     * @return The block to place the replacement in.
+     */
+    SplitBlk* findReplacement(Packet * &pkt, PacketList* &writebacks,
+                            BlkList &compress_blocks);
+
+    /**
+     * Generate the tag from the given address.
+     * @param addr The address to get the tag from.
+     * @return The tag of the address.
+     */
+    Addr extractTag(Addr addr) const
+    {
+        return (addr >> tagShift);
+    }
+
+     /**
+     * Generate the tag from the given address.
+     * @param addr The address to get the tag from.
+     * @param blk Ignored
+     * @return The tag of the address.
+     */
+    Addr extractTag(Addr addr, SplitBlk *blk) const
+    {
+        return (addr >> tagShift);
+    }
+
+   /**
+     * Calculate the set index from the address.
+     * @param addr The address to get the set from.
+     * @return The set index of the address.
+     */
+    int extractSet(Addr addr) const
+    {
+        return ((addr >> setShift) & setMask);
+    }
+
+    /**
+     * Get the block offset from an address.
+     * @param addr The address to get the offset of.
+     * @return The block offset.
+     */
+    int extractBlkOffset(Addr addr) const
+    {
+        return (addr & blkMask);
+    }
+
+    /**
+     * Align an address to the block size.
+     * @param addr the address to align.
+     * @return The block address.
+     */
+    Addr blkAlign(Addr addr) const
+    {
+        return (addr & ~(Addr)blkMask);
+    }
+
+    /**
+     * Regenerate the block address from the tag.
+     * @param tag The tag of the block.
+     * @param set The set of the block.
+     * @return The block address.
+     */
+    Addr regenerateBlkAddr(Addr tag, unsigned set) const
+    {
+        return ((tag << tagShift) | ((Addr)set << setShift));
+    }
+
+    /**
+     * Return the hit latency.
+     * @return the hit latency.
+     */
+    int getHitLatency() const
+    {
+        return hitLatency;
+    }
+
+    /**
+     * Read the data out of the internal storage of the given cache block.
+     * @param blk The cache block to read.
+     * @param data The buffer to read the data into.
+     * @return The cache block's data.
+     */
+    void readData(SplitBlk *blk, uint8_t *data)
+    {
+        memcpy(data, blk->data, blk->size);
+    }
+
+    /**
+     * Write data into the internal storage of the given cache block. Since in
+     * LIFO does not store data differently this just needs to update the size.
+     * @param blk The cache block to write.
+     * @param data The data to write.
+     * @param size The number of bytes to write.
+     * @param writebacks A list for any writebacks to be performed. May be
+     * needed when writing to a compressed block.
+     */
+    void writeData(SplitBlk *blk, uint8_t *data, int size,
+                   PacketList* & writebacks)
+    {
+        assert(size <= blkSize);
+        blk->size = size;
+    }
+
+    /**
+     * Perform a block aligned copy from the source address to the destination.
+     * @param source The block-aligned source address.
+     * @param dest The block-aligned destination address.
+     * @param asid The address space DI.
+     * @param writebacks List for any generated writeback requests.
+     */
+    void doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks);
+
+    /**
+     * No impl.
+     */
+    void fixCopy(Packet * &pkt, PacketList* &writebacks)
+    {
+    }
+
+    /**
+     * Called at end of simulation to complete average block reference stats.
+     */
+    virtual void cleanupRefs();
+};
+
+#endif
diff --git a/src/mem/cache/tags/split_lru.cc b/src/mem/cache/tags/split_lru.cc
new file mode 100644 (file)
index 0000000..ea5b92d
--- /dev/null
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+/**
+ * @file
+ * Definitions of LRU tag store for a partitioned cache.
+ */
+
+#include <string>
+
+#include "mem/cache/base_cache.hh"
+#include "base/intmath.hh"
+#include "mem/cache/tags/split_lru.hh"
+#include "sim/root.hh"
+
+using namespace std;
+
+SplitBlk*
+SplitCacheSet::findBlk(int asid, Addr tag) const
+{
+    for (int i = 0; i < assoc; ++i) {
+        if (blks[i]->tag == tag && blks[i]->isValid()) {
+            return blks[i];
+        }
+    }
+    return 0;
+}
+
+
+void
+SplitCacheSet::moveToHead(SplitBlk *blk)
+{
+    // nothing to do if blk is already head
+    if (blks[0] == blk)
+        return;
+
+    // write 'next' block into blks[i], moving up from MRU toward LRU
+    // until we overwrite the block we moved to head.
+
+    // start by setting up to write 'blk' into blks[0]
+    int i = 0;
+    SplitBlk *next = blk;
+
+    do {
+        assert(i < assoc);
+        // swap blks[i] and next
+        SplitBlk *tmp = blks[i];
+        blks[i] = next;
+        next = tmp;
+        ++i;
+    } while (next != blk);
+}
+
+
+// create and initialize a LRU/MRU cache structure
+SplitLRU::SplitLRU(int _numSets, int _blkSize, int _assoc, int _hit_latency, int _part) :
+    numSets(_numSets), blkSize(_blkSize), assoc(_assoc), hitLatency(_hit_latency), part(_part)
+{
+    // Check parameters
+    if (blkSize < 4 || !isPowerOf2(blkSize)) {
+        fatal("Block size must be at least 4 and a power of 2");
+    }
+    if (numSets <= 0 || !isPowerOf2(numSets)) {
+        fatal("# of sets must be non-zero and a power of 2");
+    }
+    if (assoc <= 0) {
+        fatal("associativity must be greater than zero");
+    }
+    if (hitLatency <= 0) {
+        fatal("access latency must be greater than zero");
+    }
+
+    SplitBlk  *blk;
+    int i, j, blkIndex;
+
+    blkMask = blkSize - 1;
+    setShift = floorLog2(blkSize);
+    setMask = numSets - 1;
+    tagShift = setShift + floorLog2(numSets);
+    warmedUp = false;
+    /** @todo Make warmup percentage a parameter. */
+    warmupBound = numSets * assoc;
+
+    sets = new SplitCacheSet[numSets];
+    blks = new SplitBlk[numSets * assoc];
+    // allocate data storage in one big chunk
+    dataBlks = new uint8_t[numSets*assoc*blkSize];
+
+    blkIndex = 0;      // index into blks array
+    for (i = 0; i < numSets; ++i) {
+        sets[i].assoc = assoc;
+
+        sets[i].blks = new SplitBlk*[assoc];
+
+        // link in the data blocks
+        for (j = 0; j < assoc; ++j) {
+            // locate next cache block
+            blk = &blks[blkIndex];
+            blk->data = &dataBlks[blkSize*blkIndex];
+            ++blkIndex;
+
+            // invalidate new cache block
+            blk->status = 0;
+
+            //EGH Fix Me : do we need to initialize blk?
+
+            // Setting the tag to j is just to prevent long chains in the hash
+            // table; won't matter because the block is invalid
+            blk->tag = j;
+            blk->whenReady = 0;
+            blk->req->asid = -1;
+            blk->isTouched = false;
+            blk->size = blkSize;
+            sets[i].blks[j]=blk;
+            blk->set = i;
+            blk->part = part;
+        }
+    }
+}
+
+SplitLRU::~SplitLRU()
+{
+    delete [] dataBlks;
+    delete [] blks;
+    delete [] sets;
+}
+
+void
+SplitLRU::regStats(const std::string &name)
+{
+    BaseTags::regStats(name);
+
+    hits
+        .name(name + ".hits")
+        .desc("number of hits on this partition")
+        .precision(0)
+        ;
+
+    misses
+        .name(name + ".misses")
+        .desc("number of misses in this partition")
+        .precision(0)
+        ;
+}
+
+// probe cache for presence of given block.
+bool
+SplitLRU::probe(int asid, Addr addr) const
+{
+    //  return(findBlock(Read, addr, asid) != 0);
+    Addr tag = extractTag(addr);
+    unsigned myset = extractSet(addr);
+
+    SplitBlk *blk = sets[myset].findBlk(asid, tag);
+
+    return (blk != NULL);      // true if in cache
+}
+
+SplitBlk*
+SplitLRU::findBlock(Addr addr, int asid, int &lat)
+{
+    Addr tag = extractTag(addr);
+    unsigned set = extractSet(addr);
+    SplitBlk *blk = sets[set].findBlk(asid, tag);
+    lat = hitLatency;
+    if (blk != NULL) {
+        // move this block to head of the MRU list
+        sets[set].moveToHead(blk);
+        if (blk->whenReady > curTick && blk->whenReady - curTick > hitLatency){
+            lat = blk->whenReady - curTick;
+        }
+        blk->refCount += 1;
+        hits++;
+    }
+
+    return blk;
+}
+
+SplitBlk*
+SplitLRU::findBlock(Packet * &pkt, int &lat)
+{
+    Addr addr = pkt->paddr;
+    int asid = pkt->req->asid;
+
+    Addr tag = extractTag(addr);
+    unsigned set = extractSet(addr);
+    SplitBlk *blk = sets[set].findBlk(asid, tag);
+    lat = hitLatency;
+    if (blk != NULL) {
+        // move this block to head of the MRU list
+        sets[set].moveToHead(blk);
+        if (blk->whenReady > curTick && blk->whenReady - curTick > hitLatency){
+            lat = blk->whenReady - curTick;
+        }
+        blk->refCount += 1;
+        hits++;
+    }
+
+    return blk;
+}
+
+SplitBlk*
+SplitLRU::findBlock(Addr addr, int asid) const
+{
+    Addr tag = extractTag(addr);
+    unsigned set = extractSet(addr);
+    SplitBlk *blk = sets[set].findBlk(asid, tag);
+    return blk;
+}
+
+SplitBlk*
+SplitLRU::findReplacement(Packet * &pkt, PacketList* &writebacks,
+                     BlkList &compress_blocks)
+{
+    unsigned set = extractSet(pkt->paddr);
+    // grab a replacement candidate
+    SplitBlk *blk = sets[set].blks[assoc-1];
+    sets[set].moveToHead(blk);
+    if (blk->isValid()) {
+        int thread_num = (blk->xc) ? blk->xc->getThreadNum() : 0;
+        replacements[thread_num]++;
+        totalRefs += blk->refCount;
+        ++sampledRefs;
+        blk->refCount = 0;
+    } else if (!blk->isTouched) {
+        tagsInUse++;
+        blk->isTouched = true;
+        if (!warmedUp && tagsInUse.value() >= warmupBound) {
+            warmedUp = true;
+            warmupCycle = curTick;
+        }
+    }
+
+    misses++;
+
+    return blk;
+}
+
+void
+SplitLRU::invalidateBlk(int asid, Addr addr)
+{
+    SplitBlk *blk = findBlock(addr, asid);
+    if (blk) {
+        blk->status = 0;
+        blk->isTouched = false;
+        tagsInUse--;
+    }
+}
+
+void
+SplitLRU::doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks)
+{
+    assert(source == blkAlign(source));
+    assert(dest == blkAlign(dest));
+    SplitBlk *source_blk = findBlock(source, asid);
+    assert(source_blk);
+    SplitBlk *dest_blk = findBlock(dest, asid);
+    if (dest_blk == NULL) {
+        // Need to do a replacement
+        Packet * pkt = new Packet();
+        pkt->paddr = dest;
+        BlkList dummy_list;
+        dest_blk = findReplacement(pkt, writebacks, dummy_list);
+        if (dest_blk->isValid() && dest_blk->isModified()) {
+            // Need to writeback data.
+            pkt = buildWritebackReq(regenerateBlkAddr(dest_blk->tag,
+                                                      dest_blk->set),
+                                    dest_blk->req->asid,
+                                    dest_blk->xc,
+                                    blkSize,
+                                    (cache->doData())?dest_blk->data:0,
+                                    dest_blk->size);
+            writebacks.push_back(pkt);
+        }
+        dest_blk->tag = extractTag(dest);
+        dest_blk->req->asid = asid;
+        /**
+         * @todo Do we need to pass in the execution context, or can we
+         * assume its the same?
+         */
+        assert(source_blk->xc);
+        dest_blk->xc = source_blk->xc;
+    }
+    /**
+     * @todo Can't assume the status once we have coherence on copies.
+     */
+
+    // Set this block as readable, writeable, and dirty.
+    dest_blk->status = 7;
+    if (cache->doData()) {
+        memcpy(dest_blk->data, source_blk->data, blkSize);
+    }
+}
+
+void
+SplitLRU::cleanupRefs()
+{
+    for (int i = 0; i < numSets*assoc; ++i) {
+        if (blks[i].isValid()) {
+            totalRefs += blks[i].refCount;
+            ++sampledRefs;
+        }
+    }
+}
diff --git a/src/mem/cache/tags/split_lru.hh b/src/mem/cache/tags/split_lru.hh
new file mode 100644 (file)
index 0000000..1c0fc86
--- /dev/null
@@ -0,0 +1,333 @@
+/*
+ * Copyright (c) 2004-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Lisa Hsu
+ */
+
+/**
+ * @file
+ * Declaration of a LRU tag store for a partitioned cache.
+ */
+
+#ifndef __SPLIT_LRU_HH__
+#define __SPLIT_LRU_HH__
+
+#include <list>
+
+#include "mem/cache/cache_blk.hh" // base class
+#include "mem/cache/tags/split_blk.hh"
+#include "mem/packet.hh" // for inlined functions
+#include <assert.h>
+#include "mem/cache/tags/base_tags.hh"
+
+class BaseCache;
+
+/**
+ * An associative set of cache blocks.
+ */
+
+class SplitCacheSet
+{
+  public:
+    /** The associativity of this set. */
+    int assoc;
+
+    /** Cache blocks in this set, maintained in LRU order 0 = MRU. */
+    SplitBlk **blks;
+
+    /**
+     * Find a block matching the tag in this set.
+     * @param asid The address space ID.
+     * @param tag The Tag to find.
+     * @return Pointer to the block if found.
+     */
+    SplitBlk* findBlk(int asid, Addr tag) const;
+
+    /**
+     * Move the given block to the head of the list.
+     * @param blk The block to move.
+     */
+    void moveToHead(SplitBlk *blk);
+};
+
+/**
+ * A LRU cache tag store.
+ */
+class SplitLRU : public BaseTags
+{
+  public:
+    /** Typedef the block type used in this tag store. */
+    typedef SplitBlk BlkType;
+    /** Typedef for a list of pointers to the local block class. */
+    typedef std::list<SplitBlk*> BlkList;
+  protected:
+    /** The number of sets in the cache. */
+    const int numSets;
+    /** The number of bytes in a block. */
+    const int blkSize;
+    /** The associativity of the cache. */
+    const int assoc;
+    /** The hit latency. */
+    const int hitLatency;
+    /** indicator for which partition this is */
+    const int part;
+
+    /** The cache sets. */
+    SplitCacheSet *sets;
+
+    /** The cache blocks. */
+    SplitBlk *blks;
+    /** The data blocks, 1 per cache block. */
+    uint8_t *dataBlks;
+
+    /** The amount to shift the address to get the set. */
+    int setShift;
+    /** The amount to shift the address to get the tag. */
+    int tagShift;
+    /** Mask out all bits that aren't part of the set index. */
+    unsigned setMask;
+    /** Mask out all bits that aren't part of the block offset. */
+    unsigned blkMask;
+
+    /** number of hits in this partition */
+    Stats::Scalar<> hits;
+    /** number of blocks brought into this partition (i.e. misses) */
+    Stats::Scalar<> misses;
+
+public:
+    /**
+     * Construct and initialize this tag store.
+     * @param _numSets The number of sets in the cache.
+     * @param _blkSize The number of bytes in a block.
+     * @param _assoc The associativity of the cache.
+     * @param _hit_latency The latency in cycles for a hit.
+     */
+    SplitLRU(int _numSets, int _blkSize, int _assoc, int _hit_latency, int _part);
+
+    /**
+     * Destructor
+     */
+    virtual ~SplitLRU();
+
+    /**
+     * Register the statistics for this object
+     * @param name The name to precede the stat
+     */
+    void regStats(const std::string &name);
+
+    /**
+     * Return the block size.
+     * @return the block size.
+     */
+    int getBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Return the subblock size. In the case of LRU it is always the block
+     * size.
+     * @return The block size.
+     */
+    int getSubBlockSize()
+    {
+        return blkSize;
+    }
+
+    /**
+     * Search for the address in the cache.
+     * @param asid The address space ID.
+     * @param addr The address to find.
+     * @return True if the address is in the cache.
+     */
+    bool probe(int asid, Addr addr) const;
+
+    /**
+     * Invalidate the block containing the given address.
+     * @param asid The address space ID.
+     * @param addr The address to invalidate.
+     */
+    void invalidateBlk(int asid, Addr addr);
+
+    /**
+     * Finds the given address in the cache and update replacement data.
+     * Returns the access latency as a side effect.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @param lat The access latency.
+     * @return Pointer to the cache block if found.
+     */
+    SplitBlk* findBlock(Addr addr, int asid, int &lat);
+
+    /**
+     * Finds the given address in the cache and update replacement data.
+     * Returns the access latency as a side effect.
+     * @param req The req whose block to find.
+     * @param lat The access latency.
+     * @return Pointer to the cache block if found.
+     */
+    SplitBlk* findBlock(Packet * &pkt, int &lat);
+
+    /**
+     * Finds the given address in the cache, do not update replacement data.
+     * @param addr The address to find.
+     * @param asid The address space ID.
+     * @return Pointer to the cache block if found.
+     */
+    SplitBlk* findBlock(Addr addr, int asid) const;
+
+    /**
+     * Find a replacement block for the address provided.
+     * @param req The request to a find a replacement candidate for.
+     * @param writebacks List for any writebacks to be performed.
+     * @param compress_blocks List of blocks to compress, for adaptive comp.
+     * @return The block to place the replacement in.
+     */
+    SplitBlk* findReplacement(Packet * &pkt, PacketList* &writebacks,
+                            BlkList &compress_blocks);
+
+    /**
+     * Generate the tag from the given address.
+     * @param addr The address to get the tag from.
+     * @return The tag of the address.
+     */
+    Addr extractTag(Addr addr) const
+    {
+        return (addr >> tagShift);
+    }
+
+    /**
+     * Generate the tag from the given address.
+     * @param addr The address to get the tag from.
+     * @param blk Ignored.
+     * @return The tag of the address.
+     */
+    Addr extractTag(Addr addr, SplitBlk *blk) const
+    {
+        return (addr >> tagShift);
+    }
+
+    /**
+     * Calculate the set index from the address.
+     * @param addr The address to get the set from.
+     * @return The set index of the address.
+     */
+    int extractSet(Addr addr) const
+    {
+        return ((addr >> setShift) & setMask);
+    }
+
+    /**
+     * Get the block offset from an address.
+     * @param addr The address to get the offset of.
+     * @return The block offset.
+     */
+    int extractBlkOffset(Addr addr) const
+    {
+        return (addr & blkMask);
+    }
+
+    /**
+     * Align an address to the block size.
+     * @param addr the address to align.
+     * @return The block address.
+     */
+    Addr blkAlign(Addr addr) const
+    {
+        return (addr & ~(Addr)blkMask);
+    }
+
+    /**
+     * Regenerate the block address from the tag.
+     * @param tag The tag of the block.
+     * @param set The set of the block.
+     * @return The block address.
+     */
+    Addr regenerateBlkAddr(Addr tag, unsigned set) const
+    {
+        return ((tag << tagShift) | ((Addr)set << setShift));
+    }
+
+    /**
+     * Return the hit latency.
+     * @return the hit latency.
+     */
+    int getHitLatency() const
+    {
+        return hitLatency;
+    }
+
+    /**
+     * Read the data out of the internal storage of the given cache block.
+     * @param blk The cache block to read.
+     * @param data The buffer to read the data into.
+     * @return The cache block's data.
+     */
+    void readData(SplitBlk *blk, uint8_t *data)
+    {
+        memcpy(data, blk->data, blk->size);
+    }
+
+    /**
+     * Write data into the internal storage of the given cache block. Since in
+     * LRU does not store data differently this just needs to update the size.
+     * @param blk The cache block to write.
+     * @param data The data to write.
+     * @param size The number of bytes to write.
+     * @param writebacks A list for any writebacks to be performed. May be
+     * needed when writing to a compressed block.
+     */
+    void writeData(SplitBlk *blk, uint8_t *data, int size,
+                   PacketList* & writebacks)
+    {
+        assert(size <= blkSize);
+        blk->size = size;
+    }
+
+    /**
+     * Perform a block aligned copy from the source address to the destination.
+     * @param source The block-aligned source address.
+     * @param dest The block-aligned destination address.
+     * @param asid The address space DI.
+     * @param writebacks List for any generated writeback requests.
+     */
+    void doCopy(Addr source, Addr dest, int asid, PacketList* &writebacks);
+
+    /**
+     * No impl.
+     */
+    void fixCopy(Packet * &pkt, PacketList* &writebacks)
+    {
+    }
+
+    /**
+     * Called at end of simulation to complete average block reference stats.
+     */
+    virtual void cleanupRefs();
+};
+
+#endif