Port: Make getAddrRanges const
[gem5.git] / src / mem / cache / cache_impl.hh
index 257b3ef33ec8ba2c12284138d8e9ad99129329b2..f7901261ff6f7e238ac3d649a21853b2d73d5f09 100644 (file)
@@ -1,5 +1,18 @@
 /*
+ * Copyright (c) 2010-2012 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder.  You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
  * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * Copyright (c) 2010 Advanced Micro Devices, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * Cache definitions.
  */
 
-#include "sim/host.hh"
 #include "base/misc.hh"
-#include "base/range_ops.hh"
-
+#include "base/range.hh"
+#include "base/types.hh"
+#include "debug/Cache.hh"
+#include "debug/CachePort.hh"
+#include "mem/cache/prefetch/base.hh"
+#include "mem/cache/blk.hh"
 #include "mem/cache/cache.hh"
-#include "mem/cache/cache_blk.hh"
-#include "mem/cache/miss/mshr.hh"
-#include "mem/cache/prefetch/base_prefetcher.hh"
-
-#include "sim/sim_exit.hh" // for SimExitEvent
-
+#include "mem/cache/mshr.hh"
+#include "sim/sim_exit.hh"
 
 template<class TagStore>
-Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
+Cache<TagStore>::Cache(const Params *p, TagStore *tags)
     : BaseCache(p),
-      prefetchAccess(p->prefetch_access),
       tags(tags),
-      prefetcher(pf),
+      prefetcher(p->prefetcher),
       doFastWrites(true),
-      prefetchMiss(p->prefetch_miss)
+      prefetchOnAccess(p->prefetch_on_access)
 {
     tempBlock = new BlkType();
     tempBlock->data = new uint8_t[blkSize];
 
     cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
-                                  "CpuSidePort",
-                                  p->cpu_side_filter_ranges);
+                                  "CpuSidePort");
     memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
-                                  "MemSidePort",
-                                  p->mem_side_filter_ranges);
-    cpuSidePort->setOtherPort(memSidePort);
-    memSidePort->setOtherPort(cpuSidePort);
+                                  "MemSidePort");
 
     tags->setCache(this);
-    prefetcher->setCache(this);
+    if (prefetcher)
+        prefetcher->setCache(this);
 }
 
 template<class TagStore>
@@ -80,37 +88,8 @@ Cache<TagStore>::regStats()
 {
     BaseCache::regStats();
     tags->regStats(name());
-    prefetcher->regStats(name());
-}
-
-template<class TagStore>
-Port *
-Cache<TagStore>::getPort(const std::string &if_name, int idx)
-{
-    if (if_name == "" || if_name == "cpu_side") {
-        return cpuSidePort;
-    } else if (if_name == "mem_side") {
-        return memSidePort;
-    } else if (if_name == "functional") {
-        return new CpuSidePort(name() + "-cpu_side_funcport", this,
-                               "CpuSideFuncPort",
-                               std::vector<Range<Addr> >());
-    } else {
-        panic("Port name %s unrecognized\n", if_name);
-    }
 }
 
-template<class TagStore>
-void
-Cache<TagStore>::deletePortRefs(Port *p)
-{
-    if (cpuSidePort == p || memSidePort == p)
-        panic("Can only delete functional ports\n");
-
-    delete p;
-}
-
-
 template<class TagStore>
 void
 Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
@@ -144,23 +123,27 @@ Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
             panic("Invalid size for conditional read/write\n");
     }
 
-    if (overwrite_mem)
+    if (overwrite_mem) {
         std::memcpy(blk_data, &overwrite_val, pkt->getSize());
+        blk->status |= BlkDirty;
+    }
 }
 
 
 template<class TagStore>
 void
-Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
+Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
+                                       bool deferred_response,
+                                       bool pending_downgrade)
 {
-    assert(blk);
+    assert(blk && blk->isValid());
     // Occasionally this is not true... if we are a lower-level cache
     // satisfying a string of Read and ReadEx requests from
     // upper-level caches, a Read will mark the block as shared but we
     // can satisfy a following ReadEx anyway since we can rely on the
     // Read requester(s) to have buffered the ReadEx snoop and to
     // invalidate their blocks after receiving them.
-    // assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
+    // assert(!pkt->needsExclusive() || blk->isWritable());
     assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
 
     // Check RMW operations first since both isRead() and
@@ -169,11 +152,11 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
         cmpAndSwap(blk, pkt);
     } else if (pkt->isWrite()) {
         if (blk->checkWrite(pkt)) {
-            blk->status |= BlkDirty;
             pkt->writeDataToBlock(blk->data, blkSize);
+            blk->status |= BlkDirty;
         }
     } else if (pkt->isRead()) {
-        if (pkt->isLocked()) {
+        if (pkt->isLLSC()) {
             blk->trackLoadLocked(pkt);
         }
         pkt->setDataFromBlock(blk->data, blkSize);
@@ -181,20 +164,50 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
             // special handling for coherent block requests from
             // upper-level caches
             if (pkt->needsExclusive()) {
-                // on ReadExReq we give up our copy
+                // if we have a dirty copy, make sure the recipient
+                // keeps it marked dirty
+                if (blk->isDirty()) {
+                    pkt->assertMemInhibit();
+                }
+                // on ReadExReq we give up our copy unconditionally
                 tags->invalidateBlk(blk);
+            } else if (blk->isWritable() && !pending_downgrade
+                       && !pkt->sharedAsserted()) {
+                // we can give the requester an exclusive copy (by not
+                // asserting shared line) on a read request if:
+                // - we have an exclusive copy at this level (& below)
+                // - we don't have a pending snoop from below
+                //   signaling another read request
+                // - no other cache above has a copy (otherwise it
+                //   would have asseretd shared line on request)
+                
+                if (blk->isDirty()) {
+                    // special considerations if we're owner:
+                    if (!deferred_response && !isTopLevel) {
+                        // if we are responding immediately and can
+                        // signal that we're transferring ownership
+                        // along with exclusivity, do so
+                        pkt->assertMemInhibit();
+                        blk->status &= ~BlkDirty;
+                    } else {
+                        // if we're responding after our own miss,
+                        // there's a window where the recipient didn't
+                        // know it was getting ownership and may not
+                        // have responded to snoops correctly, so we
+                        // can't pass off ownership *or* exclusivity
+                        pkt->assertShared();
+                    }
+                }
             } else {
-                // on ReadReq we create shareable copies here and in
-                // the requester
+                // otherwise only respond with a shared copy
                 pkt->assertShared();
-                blk->status &= ~BlkWritable;
             }
         }
     } else {
         // Not a read or write... must be an upgrade.  it's OK
         // to just ack those as long as we have an exclusive
         // copy at this level.
-        assert(pkt->cmd == MemCmd::UpgradeReq);
+        assert(pkt->isUpgrade());
         tags->invalidateBlk(blk);
     }
 }
@@ -209,9 +222,9 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
 
 template<class TagStore>
 void
-Cache<TagStore>::markInService(MSHR *mshr)
+Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
 {
-    markInServiceInternal(mshr);
+    markInServiceInternal(mshr, pkt);
 #if 0
         if (mshr->originalCmd == MemCmd::HardPFReq) {
             DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
@@ -256,81 +269,89 @@ Cache<TagStore>::squash(int threadNum)
 
 template<class TagStore>
 bool
-Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk, int &lat)
+Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
+                        int &lat, PacketList &writebacks)
 {
-    if (pkt->req->isUncacheable())  {
+    if (pkt->req->isUncacheable()) {
+        if (pkt->req->isClearLL()) {
+            tags->clearLocks();
+        } else if (pkt->isWrite()) {
+           blk = tags->findBlock(pkt->getAddr());
+           if (blk != NULL) {
+               tags->invalidateBlk(blk);
+           }
+        }
+
         blk = NULL;
         lat = hitLatency;
         return false;
     }
 
-    bool satisfied = false;  // assume the worst
-    blk = tags->findBlock(pkt->getAddr(), lat);
-
-    if (prefetchAccess) {
-        //We are determining prefetches on access stream, call prefetcher
-        prefetcher->handleMiss(pkt, curTick);
-    }
+    int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
+    blk = tags->accessBlock(pkt->getAddr(), lat, id);
 
-    DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(), pkt->getAddr(),
-            (blk) ? "hit" : "miss");
+    DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
+            pkt->req->isInstFetch() ? " (ifetch)" : "",
+            pkt->getAddr(), (blk) ? "hit" : "miss");
 
     if (blk != NULL) {
-        // HIT
-        if (blk->isPrefetch()) {
-            //Signal that this was a hit under prefetch (no need for
-            //use prefetch (only can get here if true)
-            DPRINTF(HWPrefetch, "Hit a block that was prefetched\n");
-            blk->status &= ~BlkHWPrefetched;
-            if (prefetchMiss) {
-                //If we are using the miss stream, signal the
-                //prefetcher otherwise the access stream would have
-                //already signaled this hit
-                prefetcher->handleMiss(pkt, curTick);
-            }
-        }
 
-        if (pkt->needsExclusive() ? blk->isWritable() : blk->isValid()) {
+        if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
             // OK to satisfy access
-            hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
-            satisfied = true;
+            incHitCount(pkt);
             satisfyCpuSideRequest(pkt, blk);
-        } else if (pkt->cmd == MemCmd::Writeback) {
-            // special case: writeback to read-only block (e.g., from
-            // L1 into L2).  since we're really just passing ownership
-            // from one cache to another, we can update this cache to
-            // be the owner without making the block writeable
-            assert(!blk->isWritable() /* && !blk->isDirty() */);
-            assert(blkSize == pkt->getSize());
-            std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
-            blk->status |= BlkDirty;
-            satisfied = true;
-            // nothing else to do; writeback doesn't expect response
-            assert(!pkt->needsResponse());
-        } else {
-            // permission violation... nothing to do here, leave unsatisfied
-            // for statistics purposes this counts like a complete miss
-            incMissCount(pkt);
+            return true;
         }
-    } else {
-        // complete miss (no matching block)
-        incMissCount(pkt);
+    }
 
-        if (pkt->isLocked() && pkt->isWrite()) {
-            // miss on store conditional... just give up now
-            pkt->req->setExtraData(0);
-            satisfied = true;
+    // Can't satisfy access normally... either no block (blk == NULL)
+    // or have block but need exclusive & only have shared.
+
+    // Writeback handling is special case.  We can write the block
+    // into the cache without having a writeable copy (or any copy at
+    // all).
+    if (pkt->cmd == MemCmd::Writeback) {
+        assert(blkSize == pkt->getSize());
+        if (blk == NULL) {
+            // need to do a replacement
+            blk = allocateBlock(pkt->getAddr(), writebacks);
+            if (blk == NULL) {
+                // no replaceable block available, give up.
+                // writeback will be forwarded to next level.
+                incMissCount(pkt);
+                return false;
+            }
+            int id = pkt->req->masterId();
+            tags->insertBlock(pkt->getAddr(), blk, id);
+            blk->status = BlkValid | BlkReadable;
+        }
+        std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
+        blk->status |= BlkDirty;
+        if (pkt->isSupplyExclusive()) {
+            blk->status |= BlkWritable;
         }
+        // nothing else to do; writeback doesn't expect response
+        assert(!pkt->needsResponse());
+        incHitCount(pkt);
+        return true;
+    }
+
+    incMissCount(pkt);
+
+    if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
+        // complete miss on store conditional... just give up now
+        pkt->req->setExtraData(0);
+        return true;
     }
 
-    return satisfied;
+    return false;
 }
 
 
 class ForwardResponseRecord : public Packet::SenderState
 {
     Packet::SenderState *prevSenderState;
-    int prevSrc;
+    PortID prevSrc;
 #ifndef NDEBUG
     BaseCache *cache;
 #endif
@@ -357,14 +378,31 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
 //@todo Add back in MemDebug Calls
 //    MemDebug::cacheAccess(pkt);
 
+
+    /// @todo temporary hack to deal with memory corruption issue until
+    /// 4-phase transactions are complete
+    for (int x = 0; x < pendingDelete.size(); x++)
+        delete pendingDelete[x];
+    pendingDelete.clear();
+
     // we charge hitLatency for doing just about anything here
-    Tick time =  curTick + hitLatency;
+    Tick time =  curTick() + hitLatency;
 
     if (pkt->isResponse()) {
         // must be cache-to-cache response from upper to lower level
         ForwardResponseRecord *rec =
             dynamic_cast<ForwardResponseRecord *>(pkt->senderState);
-        assert(rec != NULL);
+
+        if (rec == NULL) {
+            assert(pkt->cmd == MemCmd::HardPFResp);
+            // Check if it's a prefetch response and handle it. We shouldn't
+            // get any other kinds of responses without FRRs.
+            DPRINTF(Cache, "Got prefetch response from above for addr %#x\n",
+                    pkt->getAddr());
+            handleResponse(pkt);
+            return true;
+        }
+
         rec->restore(pkt, this);
         delete rec;
         memSidePort->respond(pkt, time);
@@ -385,13 +423,29 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
             Packet *snoopPkt = new Packet(pkt, true);  // clear flags
             snoopPkt->setExpressSnoop();
             snoopPkt->assertMemInhibit();
-            memSidePort->sendTiming(snoopPkt);
+            memSidePort->sendTimingReq(snoopPkt);
             // main memory will delete snoopPkt
         }
+        // since we're the official target but we aren't responding,
+        // delete the packet now.
+
+        /// @todo nominally we should just delete the packet here,
+        /// however, until 4-phase stuff we can't because sending
+        /// cache is still relying on it
+        pendingDelete.push_back(pkt);
         return true;
     }
 
     if (pkt->req->isUncacheable()) {
+        if (pkt->req->isClearLL()) {
+            tags->clearLocks();
+        } else if (pkt->isWrite()) {
+            BlkType *blk = tags->findBlock(pkt->getAddr());
+            if (blk != NULL) {
+                tags->invalidateBlk(blk);
+            }
+        }
+
         // writes go in write buffer, reads use MSHR
         if (pkt->isWrite() && !pkt->isRead()) {
             allocateWriteBuffer(pkt, time, true);
@@ -403,29 +457,16 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
     }
 
     int lat = hitLatency;
-    bool satisfied = false;
-
-    Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
-    MSHR *mshr = mshrQueue.findMatch(blk_addr);
+    BlkType *blk = NULL;
+    PacketList writebacks;
 
-    if (!mshr) {
-        // no outstanding access to this block, look up in cache
-        // (otherwise if we allow reads while there's an outstanding
-        // write miss, the read could return stale data out of the
-        // cache block... a more aggressive system could detect the
-        // overlap (if any) and forward data out of the MSHRs, but we
-        // don't do that yet)
-        BlkType *blk = NULL;
-        satisfied = access(pkt, blk, lat);
-    }
+    bool satisfied = access(pkt, blk, lat, writebacks);
 
 #if 0
-    PacketList writebacks;
+    /** @todo make the fast write alloc (wh64) work with coherence. */
 
     // If this is a block size write/hint (WH64) allocate the block here
     // if the coherence protocol allows it.
-    /** @todo make the fast write alloc (wh64) work with coherence. */
-    /** @todo Do we want to do fast writes for writebacks as well? */
     if (!blk && pkt->getSize() >= blkSize && coherence->allowFastWrites() &&
         (pkt->cmd == MemCmd::WriteReq
          || pkt->cmd == MemCmd::WriteInvalidateReq) ) {
@@ -441,34 +482,41 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
             ++fastWrites;
         }
     }
-
-    // copy writebacks to write buffer
-    while (!writebacks.empty()) {
-        PacketPtr wbPkt = writebacks.front();
-        allocateWriteBuffer(wbPkt, time, true);
-        writebacks.pop_front();
-    }
 #endif
 
+    // track time of availability of next prefetch, if any
+    Tick next_pf_time = 0;
+
     bool needsResponse = pkt->needsResponse();
 
     if (satisfied) {
+        if (prefetcher && (prefetchOnAccess || (blk && blk->wasPrefetched()))) {
+            if (blk)
+                blk->status &= ~BlkHWPrefetched;
+            next_pf_time = prefetcher->notify(pkt, time);
+        }
+
         if (needsResponse) {
             pkt->makeTimingResponse();
-            cpuSidePort->respond(pkt, curTick+lat);
+            cpuSidePort->respond(pkt, curTick()+lat);
         } else {
-            delete pkt;
+            /// @todo nominally we should just delete the packet here,
+            /// however, until 4-phase stuff we can't because sending
+            /// cache is still relying on it
+            pendingDelete.push_back(pkt);
         }
     } else {
         // miss
-        if (prefetchMiss)
-            prefetcher->handleMiss(pkt, time);
+
+        Addr blk_addr = blockAlign(pkt->getAddr());
+        MSHR *mshr = mshrQueue.findMatch(blk_addr);
 
         if (mshr) {
             // MSHR hit
             //@todo remove hw_pf here
-            mshr_hits[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
-            if (mshr->threadNum != 0/*pkt->req->getThreadNum()*/) {
+            assert(pkt->req->masterId() < system->maxMasters());
+            mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
+            if (mshr->threadNum != 0/*pkt->req->threadId()*/) {
                 mshr->threadNum = -1;
             }
             mshr->allocateTarget(pkt, time, order++);
@@ -476,28 +524,64 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
                 noTargetMSHR = mshr;
                 setBlocked(Blocked_NoTargets);
                 // need to be careful with this... if this mshr isn't
-                // ready yet (i.e. time > curTick_, we don't want to
+                // ready yet (i.e. time > curTick()_, we don't want to
                 // move it ahead of mshrs that are ready
                 // mshrQueue.moveToFront(mshr);
             }
         } else {
             // no MSHR
-            mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
+            assert(pkt->req->masterId() < system->maxMasters());
+            mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
             // always mark as cache fill for now... if we implement
             // no-write-allocate or bypass accesses this will have to
             // be changed.
             if (pkt->cmd == MemCmd::Writeback) {
                 allocateWriteBuffer(pkt, time, true);
             } else {
+                if (blk && blk->isValid()) {
+                    // If we have a write miss to a valid block, we
+                    // need to mark the block non-readable.  Otherwise
+                    // if we allow reads while there's an outstanding
+                    // write miss, the read could return stale data
+                    // out of the cache block... a more aggressive
+                    // system could detect the overlap (if any) and
+                    // forward data out of the MSHRs, but we don't do
+                    // that yet.  Note that we do need to leave the
+                    // block valid so that it stays in the cache, in
+                    // case we get an upgrade response (and hence no
+                    // new data) when the write miss completes.
+                    // As long as CPUs do proper store/load forwarding
+                    // internally, and have a sufficiently weak memory
+                    // model, this is probably unnecessary, but at some
+                    // point it must have seemed like we needed it...
+                    assert(pkt->needsExclusive() && !blk->isWritable());
+                    blk->status &= ~BlkReadable;
+                }
+
                 allocateMissBuffer(pkt, time, true);
             }
+
+            if (prefetcher) {
+                next_pf_time = prefetcher->notify(pkt, time);
+            }
         }
     }
 
+    if (next_pf_time != 0)
+        requestMemSideBus(Request_PF, std::max(time, next_pf_time));
+
+    // copy writebacks to write buffer
+    while (!writebacks.empty()) {
+        PacketPtr wbPkt = writebacks.front();
+        allocateWriteBuffer(wbPkt, time, true);
+        writebacks.pop_front();
+    }
+
     return true;
 }
 
 
+// See comment in cache.hh.
 template<class TagStore>
 PacketPtr
 Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
@@ -506,18 +590,15 @@ Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
     bool blkValid = blk && blk->isValid();
 
     if (cpu_pkt->req->isUncacheable()) {
-        assert(blk == NULL);
+        //assert(blk == NULL);
         return NULL;
     }
 
     if (!blkValid &&
-        (cpu_pkt->cmd == MemCmd::Writeback ||
-         cpu_pkt->cmd == MemCmd::UpgradeReq)) {
-            // For now, writebacks from upper-level caches that
-            // completely miss in the cache just go through. If we had
-            // "fast write" support (where we could write the whole
-            // block w/o fetching new data) we might want to allocate
-            // on writeback misses instead.
+        (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
+        // Writebacks that weren't allocated in access() and upgrades
+        // from upper-level caches that missed completely just go
+        // through.
         return NULL;
     }
 
@@ -533,12 +614,12 @@ Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
         // only reason to be here is that blk is shared
         // (read-only) and we need exclusive
         assert(needsExclusive && !blk->isWritable());
-        cmd = MemCmd::UpgradeReq;
+        cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
     } else {
         // block is invalid
         cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
     }
-    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, Packet::Broadcast, blkSize);
+    PacketPtr pkt = new Packet(cpu_pkt->req, cmd, blkSize);
 
     pkt->allocate();
     return pkt;
@@ -583,53 +664,79 @@ Cache<TagStore>::atomicAccess(PacketPtr pkt)
     // access in timing mode
 
     BlkType *blk = NULL;
+    PacketList writebacks;
 
-    if (!access(pkt, blk, lat)) {
+    if (!access(pkt, blk, lat, writebacks)) {
         // MISS
-        PacketPtr busPkt = getBusPacket(pkt, blk, pkt->needsExclusive());
+        PacketPtr bus_pkt = getBusPacket(pkt, blk, pkt->needsExclusive());
 
-        bool isCacheFill = (busPkt != NULL);
+        bool is_forward = (bus_pkt == NULL);
 
-        if (busPkt == NULL) {
+        if (is_forward) {
             // just forwarding the same request to the next level
             // no local cache operation involved
-            busPkt = pkt;
+            bus_pkt = pkt;
         }
 
         DPRINTF(Cache, "Sending an atomic %s for %x\n",
-                busPkt->cmdString(), busPkt->getAddr());
+                bus_pkt->cmdString(), bus_pkt->getAddr());
 
 #if TRACING_ON
         CacheBlk::State old_state = blk ? blk->status : 0;
 #endif
 
-        lat += memSidePort->sendAtomic(busPkt);
+        lat += memSidePort->sendAtomic(bus_pkt);
 
         DPRINTF(Cache, "Receive response: %s for addr %x in state %i\n",
-                busPkt->cmdString(), busPkt->getAddr(), old_state);
-
-        bool is_error = busPkt->isError();
-        assert(!busPkt->wasNacked());
-
-        if (is_error && pkt->needsResponse()) {
-            pkt->makeAtomicResponse();
-            pkt->copyError(busPkt);
-        } else if (isCacheFill && !is_error) {
-            PacketList writebacks;
-            blk = handleFill(busPkt, blk, writebacks);
-            satisfyCpuSideRequest(pkt, blk);
-            delete busPkt;
-
-            // Handle writebacks if needed
-            while (!writebacks.empty()){
-                PacketPtr wbPkt = writebacks.front();
-                memSidePort->sendAtomic(wbPkt);
-                writebacks.pop_front();
-                delete wbPkt;
+                bus_pkt->cmdString(), bus_pkt->getAddr(), old_state);
+
+        assert(!bus_pkt->wasNacked());
+
+        // If packet was a forward, the response (if any) is already
+        // in place in the bus_pkt == pkt structure, so we don't need
+        // to do anything.  Otherwise, use the separate bus_pkt to
+        // generate response to pkt and then delete it.
+        if (!is_forward) {
+            if (pkt->needsResponse()) {
+                assert(bus_pkt->isResponse());
+                if (bus_pkt->isError()) {
+                    pkt->makeAtomicResponse();
+                    pkt->copyError(bus_pkt);
+                } else if (bus_pkt->isRead() ||
+                           bus_pkt->cmd == MemCmd::UpgradeResp) {
+                    // we're updating cache state to allow us to
+                    // satisfy the upstream request from the cache
+                    blk = handleFill(bus_pkt, blk, writebacks);
+                    satisfyCpuSideRequest(pkt, blk);
+                } else {
+                    // we're satisfying the upstream request without
+                    // modifying cache state, e.g., a write-through
+                    pkt->makeAtomicResponse();
+                }
             }
+            delete bus_pkt;
         }
     }
 
+    // Note that we don't invoke the prefetcher at all in atomic mode.
+    // It's not clear how to do it properly, particularly for
+    // prefetchers that aggressively generate prefetch candidates and
+    // rely on bandwidth contention to throttle them; these will tend
+    // to pollute the cache in atomic mode since there is no bandwidth
+    // contention.  If we ever do want to enable prefetching in atomic
+    // mode, though, this is the place to do it... see timingAccess()
+    // for an example (though we'd want to issue the prefetch(es)
+    // immediately rather than calling requestMemSideBus() as we do
+    // there).
+
+    // Handle writebacks if needed
+    while (!writebacks.empty()){
+        PacketPtr wbPkt = writebacks.front();
+        memSidePort->sendAtomic(wbPkt);
+        writebacks.pop_front();
+        delete wbPkt;
+    }
+
     // We now have the block one way or another (hit or completed miss)
 
     if (pkt->needsResponse()) {
@@ -642,28 +749,57 @@ Cache<TagStore>::atomicAccess(PacketPtr pkt)
 
 template<class TagStore>
 void
-Cache<TagStore>::functionalAccess(PacketPtr pkt,
-                                  CachePort *incomingPort,
-                                  CachePort *otherSidePort)
+Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
 {
-    Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
+    Addr blk_addr = blockAlign(pkt->getAddr());
     BlkType *blk = tags->findBlock(pkt->getAddr());
+    MSHR *mshr = mshrQueue.findMatch(blk_addr);
 
     pkt->pushLabel(name());
 
     CacheBlkPrintWrapper cbpw(blk);
-    bool done =
-        (blk && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data))
-        || incomingPort->checkFunctional(pkt)
+
+    // Note that just because an L2/L3 has valid data doesn't mean an
+    // L1 doesn't have a more up-to-date modified copy that still
+    // needs to be found.  As a result we always update the request if
+    // we have it, but only declare it satisfied if we are the owner.
+
+    // see if we have data at all (owned or otherwise)
+    bool have_data = blk && blk->isValid()
+        && pkt->checkFunctional(&cbpw, blk_addr, blkSize, blk->data);
+
+    // data we have is dirty if marked as such or if valid & ownership
+    // pending due to outstanding UpgradeReq
+    bool have_dirty =
+        have_data && (blk->isDirty() ||
+                      (mshr && mshr->inService && mshr->isPendingDirty()));
+
+    bool done = have_dirty
+        || cpuSidePort->checkFunctional(pkt)
         || mshrQueue.checkFunctional(pkt, blk_addr)
         || writeBuffer.checkFunctional(pkt, blk_addr)
-        || otherSidePort->checkFunctional(pkt);
+        || memSidePort->checkFunctional(pkt);
+
+    DPRINTF(Cache, "functional %s %x %s%s%s\n",
+            pkt->cmdString(), pkt->getAddr(),
+            (blk && blk->isValid()) ? "valid " : "",
+            have_data ? "data " : "", done ? "done " : "");
 
     // We're leaving the cache, so pop cache->name() label
     pkt->popLabel();
 
-    if (!done) {
-        otherSidePort->sendFunctional(pkt);
+    if (done) {
+        pkt->makeResponse();
+    } else {
+        // if it came as a request from the CPU side then make sure it
+        // continues towards the memory side
+        if (fromCpuSide) {
+            memSidePort->sendFunctional(pkt);
+        } else if (forwardSnoops && cpuSidePort->isSnooping()) {
+            // if it came from the memory side, it must be a snoop request
+            // and we should only forward it if we are forwarding snoops
+            cpuSidePort->sendFunctionalSnoop(pkt);
+        }
     }
 }
 
@@ -679,7 +815,7 @@ template<class TagStore>
 void
 Cache<TagStore>::handleResponse(PacketPtr pkt)
 {
-    Tick time = curTick + hitLatency;
+    Tick time = curTick() + hitLatency;
     MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
     bool is_error = pkt->isError();
 
@@ -711,18 +847,23 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
     MSHR::Target *initial_tgt = mshr->getTarget();
     BlkType *blk = tags->findBlock(pkt->getAddr());
     int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
-    Tick miss_latency = curTick - initial_tgt->recvTime;
+    Tick miss_latency = curTick() - initial_tgt->recvTime;
     PacketList writebacks;
 
     if (pkt->req->isUncacheable()) {
-        mshr_uncacheable_lat[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
+        assert(pkt->req->masterId() < system->maxMasters());
+        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
             miss_latency;
     } else {
-        mshr_miss_latency[stats_cmd_idx][0/*pkt->req->getThreadNum()*/] +=
+        assert(pkt->req->masterId() < system->maxMasters());
+        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
             miss_latency;
     }
 
-    if (mshr->isCacheFill && !is_error) {
+    bool is_fill = !mshr->isForward &&
+        (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp);
+
+    if (is_fill && !is_error) {
         DPRINTF(Cache, "Block for addr %x being updated in Cache\n",
                 pkt->getAddr());
 
@@ -743,10 +884,12 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
     while (mshr->hasTargets()) {
         MSHR::Target *target = mshr->getTarget();
 
-        if (target->isCpuSide()) {
+        switch (target->source) {
+          case MSHR::Target::FromCPU:
             Tick completion_time;
-            if (blk != NULL) {
-                satisfyCpuSideRequest(target->pkt, blk);
+            if (is_fill) {
+                satisfyCpuSideRequest(target->pkt, blk,
+                                      true, mshr->hasPostDowngrade());
                 // How many bytes past the first request is this one
                 int transfer_offset =
                     target->pkt->getOffset(blkSize) - initial_offset;
@@ -759,8 +902,17 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
                     (transfer_offset ? pkt->finishTime : pkt->firstWordTime);
 
                 assert(!target->pkt->req->isUncacheable());
-                missLatency[target->pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/] +=
+
+                assert(target->pkt->req->masterId() < system->maxMasters());
+                missLatency[target->pkt->cmdToIndex()][target->pkt->req->masterId()] +=
                     completion_time - target->recvTime;
+            } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
+                // failed StoreCond upgrade
+                assert(target->pkt->cmd == MemCmd::StoreCondReq ||
+                       target->pkt->cmd == MemCmd::StoreCondFailReq ||
+                       target->pkt->cmd == MemCmd::SCUpgradeFailReq);
+                completion_time = tags->getHitLatency() + pkt->finishTime;
+                target->pkt->req->setExtraData(0);
             } else {
                 // not a cache fill, just forwarding response
                 completion_time = tags->getHitLatency() + pkt->finishTime;
@@ -772,19 +924,55 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
             // if this packet is an error copy that to the new packet
             if (is_error)
                 target->pkt->copyError(pkt);
+            if (target->pkt->cmd == MemCmd::ReadResp &&
+                (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
+                // If intermediate cache got ReadRespWithInvalidate,
+                // propagate that.  Response should not have
+                // isInvalidate() set otherwise.
+                target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
+            }
             cpuSidePort->respond(target->pkt, completion_time);
-        } else {
+            break;
+
+          case MSHR::Target::FromPrefetcher:
+            assert(target->pkt->cmd == MemCmd::HardPFReq);
+            if (blk)
+                blk->status |= BlkHWPrefetched;
+            delete target->pkt->req;
+            delete target->pkt;
+            break;
+
+          case MSHR::Target::FromSnoop:
             // I don't believe that a snoop can be in an error state
             assert(!is_error);
             // response to snoop request
             DPRINTF(Cache, "processing deferred snoop...\n");
-            handleSnoop(target->pkt, blk, true, true);
+            assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
+            handleSnoop(target->pkt, blk, true, true,
+                        mshr->hasPostInvalidate());
+            break;
+
+          default:
+            panic("Illegal target->source enum %d\n", target->source);
         }
 
         mshr->popTarget();
     }
 
+    if (blk) {
+        if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
+            tags->invalidateBlk(blk);
+        } else if (mshr->hasPostDowngrade()) {
+            blk->status &= ~BlkWritable;
+        }
+    }
+
     if (mshr->promoteDeferredTargets()) {
+        // avoid later read getting stale data while write miss is
+        // outstanding.. see comment in timingAccess()
+        if (blk) {
+            blk->status &= ~BlkReadable;
+        }
         MSHRQueue *mq = mshr->queue;
         mq->markPending(mshr);
         requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
@@ -806,6 +994,7 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
         if (blk->isDirty()) {
             allocateWriteBuffer(writebackBlk(blk), time, true);
         }
+        blk->status &= ~BlkValid;
         tags->invalidateBlk(blk);
     }
 
@@ -821,11 +1010,15 @@ Cache<TagStore>::writebackBlk(BlkType *blk)
 {
     assert(blk && blk->isValid() && blk->isDirty());
 
-    writebacks[0/*pkt->req->getThreadNum()*/]++;
+    writebacks[Request::wbMasterId]++;
 
     Request *writebackReq =
-        new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0);
-    PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback, -1);
+        new Request(tags->regenerateBlkAddr(blk->tag, blk->set), blkSize, 0,
+                Request::wbMasterId);
+    PacketPtr writeback = new Packet(writebackReq, MemCmd::Writeback);
+    if (blk->isWritable()) {
+        writeback->setSupplyExclusive();
+    }
     writeback->allocate();
     std::memcpy(writeback->getPtr<uint8_t>(), blk->data, blkSize);
 
@@ -834,6 +1027,39 @@ Cache<TagStore>::writebackBlk(BlkType *blk)
 }
 
 
+template<class TagStore>
+typename Cache<TagStore>::BlkType*
+Cache<TagStore>::allocateBlock(Addr addr, PacketList &writebacks)
+{
+    BlkType *blk = tags->findVictim(addr, writebacks);
+
+    if (blk->isValid()) {
+        Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
+        MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
+        if (repl_mshr) {
+            // must be an outstanding upgrade request on block
+            // we're about to replace...
+            assert(!blk->isWritable());
+            assert(repl_mshr->needsExclusive());
+            // too hard to replace block with transient state
+            // allocation failed, block not inserted
+            return NULL;
+        } else {
+            DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
+                    repl_addr, addr,
+                    blk->isDirty() ? "writeback" : "clean");
+
+            if (blk->isDirty()) {
+                // Save writeback packet for handling by caller
+                writebacks.push_back(writebackBlk(blk));
+            }
+        }
+    }
+
+    return blk;
+}
+
+
 // Note that the reason we return a list of writebacks rather than
 // inserting them directly in the write buffer is that this function
 // is called by both atomic and timing-mode accesses, and in atomic
@@ -851,50 +1077,46 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
 
     if (blk == NULL) {
         // better have read new data...
-        assert(pkt->isRead());
-
+        assert(pkt->hasData());
         // need to do a replacement
-        blk = tags->findReplacement(addr, writebacks);
-        if (blk->isValid()) {
-            Addr repl_addr = tags->regenerateBlkAddr(blk->tag, blk->set);
-            MSHR *repl_mshr = mshrQueue.findMatch(repl_addr);
-            if (repl_mshr) {
-                // must be an outstanding upgrade request on block
-                // we're about to replace...
-                assert(!blk->isWritable());
-                assert(repl_mshr->needsExclusive());
-                // too hard to replace block with transient state;
-                // just use temporary storage to complete the current
-                // request and then get rid of it
-                assert(!tempBlock->isValid());
-                blk = tempBlock;
-                tempBlock->set = tags->extractSet(addr);
-                DPRINTF(Cache, "using temp block for %x\n", addr);
-            } else {
-                DPRINTF(Cache, "replacement: replacing %x with %x: %s\n",
-                        repl_addr, addr,
-                        blk->isDirty() ? "writeback" : "clean");
-
-                if (blk->isDirty()) {
-                    // Save writeback packet for handling by caller
-                    writebacks.push_back(writebackBlk(blk));
-                }
-            }
+        blk = allocateBlock(addr, writebacks);
+        if (blk == NULL) {
+            // No replaceable block... just use temporary storage to
+            // complete the current request and then get rid of it
+            assert(!tempBlock->isValid());
+            blk = tempBlock;
+            tempBlock->set = tags->extractSet(addr);
+            tempBlock->tag = tags->extractTag(addr);
+            DPRINTF(Cache, "using temp block for %x\n", addr);
+        } else {
+            int id = pkt->req->masterId();
+            tags->insertBlock(pkt->getAddr(), blk, id);
         }
 
-        blk->tag = tags->extractTag(addr);
+        // starting from scratch with a new block
+        blk->status = 0;
     } else {
         // existing block... probably an upgrade
         assert(blk->tag == tags->extractTag(addr));
         // either we're getting new data or the block should already be valid
-        assert(pkt->isRead() || blk->isValid());
+        assert(pkt->hasData() || blk->isValid());
+        // don't clear block status... if block is already dirty we
+        // don't want to lose that
     }
 
+    blk->status |= BlkValid | BlkReadable;
+
     if (!pkt->sharedAsserted()) {
-        blk->status = BlkValid | BlkWritable;
-    } else {
-        assert(!pkt->needsExclusive());
-        blk->status = BlkValid;
+        blk->status |= BlkWritable;
+        // If we got this via cache-to-cache transfer (i.e., from a
+        // cache that was an owner) and took away that owner's copy,
+        // then we need to write it back.  Normally this happens
+        // anyway as a side effect of getting a copy to write it, but
+        // there are cases (such as failed store conditionals or
+        // compare-and-swaps) where we'll demand an exclusive copy but
+        // end up not writing it.
+        if (pkt->memInhibitAsserted())
+            blk->status |= BlkDirty;
     }
 
     DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
@@ -919,64 +1141,78 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
 
 template<class TagStore>
 void
-Cache<TagStore>::doTimingSupplyResponse(PacketPtr req_pkt,
-                                        uint8_t *blk_data,
-                                        bool already_copied)
+Cache<TagStore>::
+doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
+                       bool already_copied, bool pending_inval)
 {
     // timing-mode snoop responses require a new packet, unless we
     // already made a copy...
-    PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt, true);
-    if (!req_pkt->isInvalidate()) {
-        // note that we're ignoring the shared flag on req_pkt... it's
-        // basically irrelevant, as we'll always assert shared unless
-        // it's an exclusive request, in which case the shared line
-        // should never be asserted1
-        pkt->assertShared();
-    }
+    PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
+    assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
     pkt->allocate();
     pkt->makeTimingResponse();
     if (pkt->isRead()) {
         pkt->setDataFromBlock(blk_data, blkSize);
     }
-    memSidePort->respond(pkt, curTick + hitLatency);
+    if (pkt->cmd == MemCmd::ReadResp && pending_inval) {
+        // Assume we defer a response to a read from a far-away cache
+        // A, then later defer a ReadExcl from a cache B on the same
+        // bus as us.  We'll assert MemInhibit in both cases, but in
+        // the latter case MemInhibit will keep the invalidation from
+        // reaching cache A.  This special response tells cache A that
+        // it gets the block to satisfy its read, but must immediately
+        // invalidate it.
+        pkt->cmd = MemCmd::ReadRespWithInvalidate;
+    }
+    memSidePort->respond(pkt, curTick() + hitLatency);
 }
 
 template<class TagStore>
 void
 Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
-                             bool is_timing, bool is_deferred)
+                             bool is_timing, bool is_deferred,
+                             bool pending_inval)
 {
+    // deferred snoops can only happen in timing mode
+    assert(!(is_deferred && !is_timing));
+    // pending_inval only makes sense on deferred snoops
+    assert(!(pending_inval && !is_deferred));
     assert(pkt->isRequest());
 
-    // first propagate snoop upward to see if anyone above us wants to
-    // handle it.  save & restore packet src since it will get
-    // rewritten to be relative to cpu-side bus (if any)
-    bool alreadyResponded = pkt->memInhibitAsserted();
-    if (is_timing) {
-        Packet *snoopPkt = new Packet(pkt, true);  // clear flags
-        snoopPkt->setExpressSnoop();
-        snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
-        cpuSidePort->sendTiming(snoopPkt);
-        if (snoopPkt->memInhibitAsserted()) {
-            // cache-to-cache response from some upper cache
-            assert(!alreadyResponded);
-            pkt->assertMemInhibit();
+    // the packet may get modified if we or a forwarded snooper
+    // responds in atomic mode, so remember a few things about the
+    // original packet up front
+    bool invalidate = pkt->isInvalidate();
+    bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
+
+    if (forwardSnoops) {
+        // first propagate snoop upward to see if anyone above us wants to
+        // handle it.  save & restore packet src since it will get
+        // rewritten to be relative to cpu-side bus (if any)
+        bool alreadyResponded = pkt->memInhibitAsserted();
+        if (is_timing) {
+            Packet snoopPkt(pkt, true);  // clear flags
+            snoopPkt.setExpressSnoop();
+            snoopPkt.senderState = new ForwardResponseRecord(pkt, this);
+            cpuSidePort->sendTimingSnoopReq(&snoopPkt);
+            if (snoopPkt.memInhibitAsserted()) {
+                // cache-to-cache response from some upper cache
+                assert(!alreadyResponded);
+                pkt->assertMemInhibit();
+            } else {
+                delete snoopPkt.senderState;
+            }
+            if (snoopPkt.sharedAsserted()) {
+                pkt->assertShared();
+            }
         } else {
-            delete snoopPkt->senderState;
-        }
-        if (snoopPkt->sharedAsserted()) {
-            pkt->assertShared();
-        }
-        delete snoopPkt;
-    } else {
-        int origSrc = pkt->getSrc();
-        cpuSidePort->sendAtomic(pkt);
-        if (!alreadyResponded && pkt->memInhibitAsserted()) {
-            // cache-to-cache response from some upper cache:
-            // forward response to original requester
-            assert(pkt->isResponse());
+            cpuSidePort->sendAtomicSnoop(pkt);
+            if (!alreadyResponded && pkt->memInhibitAsserted()) {
+                // cache-to-cache response from some upper cache:
+                // forward response to original requester
+                assert(pkt->isResponse());
+            }
         }
-        pkt->setSrc(origSrc);
     }
 
     if (!blk || !blk->isValid()) {
@@ -988,10 +1224,9 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
     // and then do it later
     bool respond = blk->isDirty() && pkt->needsResponse();
     bool have_exclusive = blk->isWritable();
-    bool invalidate = pkt->isInvalidate();
 
-    if (pkt->isRead() && !pkt->isInvalidate()) {
-        assert(!pkt->needsExclusive());
+    if (pkt->isRead() && !invalidate) {
+        assert(!needs_exclusive);
         pkt->assertShared();
         int bits_to_clear = BlkWritable;
         const bool haveOwnershipState = true; // for now
@@ -1015,11 +1250,16 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
             pkt->setSupplyExclusive();
         }
         if (is_timing) {
-            doTimingSupplyResponse(pkt, blk->data, is_deferred);
+            doTimingSupplyResponse(pkt, blk->data, is_deferred, pending_inval);
         } else {
             pkt->makeAtomicResponse();
             pkt->setDataFromBlock(blk->data, blkSize);
         }
+    } else if (is_timing && is_deferred) {
+        // if it's a deferred timing snoop then we've made a copy of
+        // the packet, and so if we're not using that copy to respond
+        // then we need to delete it here.
+        delete pkt;
     }
 
     // Do this last in case it deallocates block data or something
@@ -1045,7 +1285,7 @@ Cache<TagStore>::snoopTiming(PacketPtr pkt)
 
     BlkType *blk = tags->findBlock(pkt->getAddr());
 
-    Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
+    Addr blk_addr = blockAlign(pkt->getAddr());
     MSHR *mshr = mshrQueue.findMatch(blk_addr);
 
     // Let the MSHR itself track the snoop and decide whether we want
@@ -1065,8 +1305,10 @@ Cache<TagStore>::snoopTiming(PacketPtr pkt)
                 pkt->getAddr());
 
         //Look through writebacks for any non-uncachable writes, use that
-        for (int i = 0; i < writebacks.size(); i++) {
-            mshr = writebacks[i];
+        if (writebacks.size()) {
+            // We should only ever find a single match
+            assert(writebacks.size() == 1);
+            mshr = writebacks[0];
             assert(!mshr->isUncacheable());
             assert(mshr->getNumTargets() == 1);
             PacketPtr wb_pkt = mshr->getTarget()->pkt;
@@ -1076,31 +1318,41 @@ Cache<TagStore>::snoopTiming(PacketPtr pkt)
             pkt->assertMemInhibit();
             if (!pkt->needsExclusive()) {
                 pkt->assertShared();
+                // the writeback is no longer the exclusive copy in the system
+                wb_pkt->clearSupplyExclusive();
             } else {
                 // if we're not asserting the shared line, we need to
                 // invalidate our copy.  we'll do that below as long as
                 // the packet's invalidate flag is set...
                 assert(pkt->isInvalidate());
             }
-            doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(), false);
+            doTimingSupplyResponse(pkt, wb_pkt->getPtr<uint8_t>(),
+                                   false, false);
 
             if (pkt->isInvalidate()) {
                 // Invalidation trumps our writeback... discard here
                 markInService(mshr);
+                delete wb_pkt;
             }
-
-            // If this was a shared writeback, there may still be
-            // other shared copies above that require invalidation.
-            // We could be more selective and return here if the
-            // request is non-exclusive or if the writeback is
-            // exclusive.
-            break;
-        }
+        } // writebacks.size()
     }
 
-    handleSnoop(pkt, blk, true, false);
+    // If this was a shared writeback, there may still be
+    // other shared copies above that require invalidation.
+    // We could be more selective and return here if the
+    // request is non-exclusive or if the writeback is
+    // exclusive.
+    handleSnoop(pkt, blk, true, false, false);
 }
 
+template<class TagStore>
+bool
+Cache<TagStore>::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
+{
+    // Express snoop responses from master to slave, e.g., from L1 to L2
+    cache->timingAccess(pkt);
+    return true;
+}
 
 template<class TagStore>
 Tick
@@ -1113,7 +1365,7 @@ Cache<TagStore>::snoopAtomic(PacketPtr pkt)
     }
 
     BlkType *blk = tags->findBlock(pkt->getAddr());
-    handleSnoop(pkt, blk, false, false);
+    handleSnoop(pkt, blk, false, false, false);
     return hitLatency;
 }
 
@@ -1178,15 +1430,24 @@ Cache<TagStore>::getNextMSHR()
 
     // fall through... no pending requests.  Try a prefetch.
     assert(!miss_mshr && !write_mshr);
-    if (!mshrQueue.isFull()) {
+    if (prefetcher && !mshrQueue.isFull()) {
         // If we have a miss queue slot, we can try a prefetch
         PacketPtr pkt = prefetcher->getPacket();
         if (pkt) {
-            // Update statistic on number of prefetches issued
-            // (hwpf_mshr_misses)
-            mshr_misses[pkt->cmdToIndex()][0/*pkt->req->getThreadNum()*/]++;
-            // Don't request bus, since we already have it
-            return allocateMissBuffer(pkt, curTick, false);
+            Addr pf_addr = blockAlign(pkt->getAddr());
+            if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr) &&
+                                             !writeBuffer.findMatch(pf_addr)) {
+                // Update statistic on number of prefetches issued
+                // (hwpf_mshr_misses)
+                assert(pkt->req->masterId() < system->maxMasters());
+                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
+                // Don't request bus, since we already have it
+                return allocateMissBuffer(pkt, curTick(), false);
+            } else {
+                // free the request and packet
+                delete pkt->req;
+                delete pkt;
+            }
         }
     }
 
@@ -1208,19 +1469,56 @@ Cache<TagStore>::getTimingPacket()
     PacketPtr tgt_pkt = mshr->getTarget()->pkt;
     PacketPtr pkt = NULL;
 
-    if (mshr->isSimpleForward()) {
+    if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq ||
+        tgt_pkt->cmd == MemCmd::StoreCondFailReq) {
+        // SCUpgradeReq or StoreCondReq saw invalidation while queued
+        // in MSHR, so now that we are getting around to processing
+        // it, just treat it as if we got a failure response
+        pkt = new Packet(tgt_pkt);
+        pkt->cmd = MemCmd::UpgradeFailResp;
+        pkt->senderState = mshr;
+        pkt->firstWordTime = pkt->finishTime = curTick();
+        handleResponse(pkt);
+        return NULL;
+    } else if (mshr->isForwardNoResponse()) {
         // no response expected, just forward packet as it is
         assert(tags->findBlock(mshr->addr) == NULL);
         pkt = tgt_pkt;
     } else {
         BlkType *blk = tags->findBlock(mshr->addr);
+
+        if (tgt_pkt->cmd == MemCmd::HardPFReq) {
+            // It might be possible for a writeback to arrive between
+            // the time the prefetch is placed in the MSHRs and when
+            // it's selected to send... if so, this assert will catch
+            // that, and then we'll have to figure out what to do.
+            assert(blk == NULL);
+
+            // We need to check the caches above us to verify that they don't have
+            // a copy of this block in the dirty state at the moment. Without this
+            // check we could get a stale copy from memory  that might get used
+            // in place of the dirty one.
+            PacketPtr snoop_pkt = new Packet(tgt_pkt, true);
+            snoop_pkt->setExpressSnoop();
+            snoop_pkt->senderState = mshr;
+            cpuSidePort->sendTimingSnoopReq(snoop_pkt);
+
+            if (snoop_pkt->memInhibitAsserted()) {
+                markInService(mshr, snoop_pkt);
+                DPRINTF(Cache, "Upward snoop of prefetch for addr %#x hit\n",
+                        tgt_pkt->getAddr());
+                delete snoop_pkt;
+                return NULL;
+            }
+            delete snoop_pkt;
+        }
+
         pkt = getBusPacket(tgt_pkt, blk, mshr->needsExclusive());
 
-        mshr->isCacheFill = (pkt != NULL);
+        mshr->isForward = (pkt == NULL);
 
-        if (pkt == NULL) {
+        if (mshr->isForward) {
             // not a cache block request, but a response is expected
-            assert(!mshr->isSimpleForward());
             // make copy of current packet to forward, keep current
             // copy for response handling
             pkt = new Packet(tgt_pkt);
@@ -1237,6 +1535,51 @@ Cache<TagStore>::getTimingPacket()
 }
 
 
+template<class TagStore>
+Tick
+Cache<TagStore>::nextMSHRReadyTime()
+{
+    Tick nextReady = std::min(mshrQueue.nextMSHRReadyTime(),
+                              writeBuffer.nextMSHRReadyTime());
+
+    if (prefetcher) {
+        nextReady = std::min(nextReady,
+                             prefetcher->nextPrefetchReadyTime());
+    }
+
+    return nextReady;
+}
+
+template<class TagStore>
+void
+Cache<TagStore>::serialize(std::ostream &os)
+{
+    warn("*** Creating checkpoints with caches is not supported. ***\n");
+    warn("    Remove any caches before taking checkpoints\n");
+    warn("    This checkpoint will not restore correctly and dirty data in "
+         "the cache will be lost!\n");
+
+    // Since we don't write back the data dirty in the caches to the physical
+    // memory if caches exist in the system we won't be able to restore
+    // from the checkpoint as any data dirty in the caches will be lost.
+
+    bool bad_checkpoint = true;
+    SERIALIZE_SCALAR(bad_checkpoint);
+}
+
+template<class TagStore>
+void
+Cache<TagStore>::unserialize(Checkpoint *cp, const std::string &section)
+{
+    bool bad_checkpoint;
+    UNSERIALIZE_SCALAR(bad_checkpoint);
+    if (bad_checkpoint) {
+        fatal("Restoring from checkpoints with caches is not supported in the "
+              "classic memory system. Please remove any caches before taking "
+              "checkpoints.\n");
+    }
+}
+
 ///////////////
 //
 // CpuSidePort
@@ -1244,56 +1587,48 @@ Cache<TagStore>::getTimingPacket()
 ///////////////
 
 template<class TagStore>
-void
-Cache<TagStore>::CpuSidePort::
-getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
+AddrRangeList
+Cache<TagStore>::CpuSidePort::getAddrRanges() const
 {
-    // CPU side port doesn't snoop; it's a target only.
-    bool dummy;
-    otherPort->getPeerAddressRanges(resp, dummy);
-    FilterRangeList(filterRanges, resp);
-    snoop = false;
+    return cache->getAddrRanges();
 }
 
-
 template<class TagStore>
 bool
-Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
+Cache<TagStore>::CpuSidePort::recvTimingReq(PacketPtr pkt)
 {
-    // illegal to block responses... can lead to deadlock
-    if (pkt->isRequest() && !pkt->memInhibitAsserted() && blocked) {
+    // always let inhibited requests through even if blocked
+    if (!pkt->memInhibitAsserted() && blocked) {
         DPRINTF(Cache,"Scheduling a retry while blocked\n");
         mustSendRetry = true;
         return false;
     }
 
-    myCache()->timingAccess(pkt);
+    cache->timingAccess(pkt);
     return true;
 }
 
-
 template<class TagStore>
 Tick
 Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
 {
-    return myCache()->atomicAccess(pkt);
+    // atomic request
+    return cache->atomicAccess(pkt);
 }
 
-
 template<class TagStore>
 void
 Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
 {
-    myCache()->functionalAccess(pkt, this, otherPort);
+    // functional request
+    cache->functionalAccess(pkt, true);
 }
 
-
 template<class TagStore>
 Cache<TagStore>::
 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
-                         const std::string &_label,
-                         std::vector<Range<Addr> > filterRanges)
-    : BaseCache::CachePort(_name, _cache, _label, filterRanges)
+                         const std::string &_label)
+    : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
 {
 }
 
@@ -1303,23 +1638,9 @@ CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
 //
 ///////////////
 
-template<class TagStore>
-void
-Cache<TagStore>::MemSidePort::
-getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
-{
-    otherPort->getPeerAddressRanges(resp, snoop);
-    FilterRangeList(filterRanges, resp);
-
-    // Memory-side port always snoops, so unconditionally set flag for
-    // caller.
-    snoop = true;
-}
-
-
 template<class TagStore>
 bool
-Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
+Cache<TagStore>::MemSidePort::recvTimingResp(PacketPtr pkt)
 {
     // this needs to be fixed so that the cache updates the mshr and sends the
     // packet back out on the link, but it probably won't happen so until this
@@ -1327,60 +1648,48 @@ Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
     if (pkt->wasNacked())
         panic("Need to implement cache resending nacked packets!\n");
 
-    if (pkt->isRequest() && blocked) {
-        DPRINTF(Cache,"Scheduling a retry while blocked\n");
-        mustSendRetry = true;
-        return false;
-    }
-
-    if (pkt->isResponse()) {
-        myCache()->handleResponse(pkt);
-    } else {
-        myCache()->snoopTiming(pkt);
-    }
+    cache->handleResponse(pkt);
     return true;
 }
 
+// Express snooping requests to memside port
+template<class TagStore>
+void
+Cache<TagStore>::MemSidePort::recvTimingSnoopReq(PacketPtr pkt)
+{
+    // handle snooping requests
+    cache->snoopTiming(pkt);
+}
 
 template<class TagStore>
 Tick
-Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
+Cache<TagStore>::MemSidePort::recvAtomicSnoop(PacketPtr pkt)
 {
-    // in atomic mode, responses go back to the sender via the
-    // function return from sendAtomic(), not via a separate
-    // sendAtomic() from the responder.  Thus we should never see a
-    // response packet in recvAtomic() (anywhere, not just here).
-    assert(!pkt->isResponse());
-    return myCache()->snoopAtomic(pkt);
+    // atomic snoop
+    return cache->snoopAtomic(pkt);
 }
 
-
 template<class TagStore>
 void
-Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
+Cache<TagStore>::MemSidePort::recvFunctionalSnoop(PacketPtr pkt)
 {
-    myCache()->functionalAccess(pkt, this, otherPort);
+    // functional snoop (note that in contrast to atomic we don't have
+    // a specific functionalSnoop method, as they have the same
+    // behaviour regardless)
+    cache->functionalAccess(pkt, false);
 }
 
-
-
 template<class TagStore>
 void
-Cache<TagStore>::MemSidePort::sendPacket()
+Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
 {
-    // if we have responses that are ready, they take precedence
+    // if we have a response packet waiting we have to start with that
     if (deferredPacketReady()) {
-        bool success = sendTiming(transmitList.front().pkt);
-
-        if (success) {
-            //send successful, remove packet
-            transmitList.pop_front();
-        }
-
-        waitingOnRetry = !success;
+        // use the normal approach from the timing port
+        trySendTiming();
     } else {
-        // check for non-response packets (requests & writebacks)
-        PacketPtr pkt = myCache()->getTimingPacket();
+        // check for request packets (requests & writebacks)
+        PacketPtr pkt = cache.getTimingPacket();
         if (pkt == NULL) {
             // can happen if e.g. we attempt a writeback and fail, but
             // before the retry, the writeback is eliminated because
@@ -1389,66 +1698,40 @@ Cache<TagStore>::MemSidePort::sendPacket()
         } else {
             MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
 
-            bool success = sendTiming(pkt);
+            waitingOnRetry = !masterPort.sendTimingReq(pkt);
 
-            waitingOnRetry = !success;
             if (waitingOnRetry) {
                 DPRINTF(CachePort, "now waiting on a retry\n");
-                if (!mshr->isSimpleForward()) {
+                if (!mshr->isForwardNoResponse()) {
+                    // we are awaiting a retry, but we
+                    // delete the packet and will be creating a new packet
+                    // when we get the opportunity
                     delete pkt;
                 }
+                // note that we have now masked any requestBus and
+                // schedSendEvent (we will wait for a retry before
+                // doing anything), and this is so even if we do not
+                // care about this packet and might override it before
+                // it gets retried
             } else {
-                myCache()->markInService(mshr);
+                cache.markInService(mshr, pkt);
             }
         }
     }
 
-
-    // tried to send packet... if it was successful (no retry), see if
-    // we need to rerequest bus or not
+    // if we succeeded and are not waiting for a retry, schedule the
+    // next send, not only looking at the response transmit list, but
+    // also considering when the next MSHR is ready
     if (!waitingOnRetry) {
-        Tick nextReady = std::min(deferredPacketReadyTime(),
-                                  myCache()->nextMSHRReadyTime());
-        // @TODO: need to facotr in prefetch requests here somehow
-        if (nextReady != MaxTick) {
-            DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
-            sendEvent->schedule(std::max(nextReady, curTick + 1));
-        } else {
-            // no more to send right now: if we're draining, we may be done
-            if (drainEvent) {
-                drainEvent->process();
-                drainEvent = NULL;
-            }
-        }
+        scheduleSend(cache.nextMSHRReadyTime());
     }
 }
 
-template<class TagStore>
-void
-Cache<TagStore>::MemSidePort::recvRetry()
-{
-    assert(waitingOnRetry);
-    sendPacket();
-}
-
-
-template<class TagStore>
-void
-Cache<TagStore>::MemSidePort::processSendEvent()
-{
-    assert(!waitingOnRetry);
-    sendPacket();
-}
-
-
 template<class TagStore>
 Cache<TagStore>::
 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
-                         const std::string &_label,
-                         std::vector<Range<Addr> > filterRanges)
-    : BaseCache::CachePort(_name, _cache, _label, filterRanges)
+                         const std::string &_label)
+    : BaseCache::CacheMasterPort(_name, _cache, _queue),
+      _queue(*_cache, *this, _label), cache(_cache)
 {
-    // override default send event from SimpleTimingPort
-    delete sendEvent;
-    sendEvent = new SendEvent(this);
 }