cache: coherence protocol enhancements & bug fixes
[gem5.git] / src / mem / cache / cache_impl.hh
index a78fd363792c510d67707695eb01d4cf9dec966d..8d2806b8d2aeb96cec2da1ac9f8aabee02bdff79 100644 (file)
@@ -1,5 +1,18 @@
 /*
+ * Copyright (c) 2010 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder.  You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
  * Copyright (c) 2002-2005 The Regents of The University of Michigan
+ * Copyright (c) 2010 Advanced Micro Devices, Inc.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * Cache definitions.
  */
 
-#include "sim/host.hh"
 #include "base/fast_alloc.hh"
 #include "base/misc.hh"
-#include "base/range_ops.hh"
-
-#include "mem/cache/cache.hh"
+#include "base/range.hh"
+#include "base/types.hh"
 #include "mem/cache/blk.hh"
+#include "mem/cache/cache.hh"
 #include "mem/cache/mshr.hh"
 #include "mem/cache/prefetch/base.hh"
-
-#include "sim/sim_exit.hh" // for SimExitEvent
-
+#include "sim/sim_exit.hh"
 
 template<class TagStore>
 Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
@@ -62,11 +72,9 @@ Cache<TagStore>::Cache(const Params *p, TagStore *tags, BasePrefetcher *pf)
     tempBlock->data = new uint8_t[blkSize];
 
     cpuSidePort = new CpuSidePort(p->name + "-cpu_side_port", this,
-                                  "CpuSidePort",
-                                  p->cpu_side_filter_ranges);
+                                  "CpuSidePort");
     memSidePort = new MemSidePort(p->name + "-mem_side_port", this,
-                                  "MemSidePort",
-                                  p->mem_side_filter_ranges);
+                                  "MemSidePort");
     cpuSidePort->setOtherPort(memSidePort);
     memSidePort->setOtherPort(cpuSidePort);
 
@@ -96,8 +104,7 @@ Cache<TagStore>::getPort(const std::string &if_name, int idx)
     } else if (if_name == "functional") {
         CpuSidePort *funcPort =
             new CpuSidePort(name() + "-cpu_side_funcport", this,
-                            "CpuSideFuncPort",
-                            std::vector<Range<Addr> >());
+                            "CpuSideFuncPort");
         funcPort->setOtherPort(memSidePort);
         return funcPort;
     } else {
@@ -158,16 +165,18 @@ Cache<TagStore>::cmpAndSwap(BlkType *blk, PacketPtr pkt)
 
 template<class TagStore>
 void
-Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
+Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk,
+                                       bool deferred_response,
+                                       bool pending_downgrade)
 {
-    assert(blk);
+    assert(blk && blk->isValid());
     // Occasionally this is not true... if we are a lower-level cache
     // satisfying a string of Read and ReadEx requests from
     // upper-level caches, a Read will mark the block as shared but we
     // can satisfy a following ReadEx anyway since we can rely on the
     // Read requester(s) to have buffered the ReadEx snoop and to
     // invalidate their blocks after receiving them.
-    // assert(pkt->needsExclusive() ? blk->isWritable() : blk->isValid());
+    // assert(!pkt->needsExclusive() || blk->isWritable());
     assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
 
     // Check RMW operations first since both isRead() and
@@ -175,12 +184,12 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
     if (pkt->cmd == MemCmd::SwapReq) {
         cmpAndSwap(blk, pkt);
     } else if (pkt->isWrite()) {
-        blk->status |= BlkDirty;
         if (blk->checkWrite(pkt)) {
             pkt->writeDataToBlock(blk->data, blkSize);
+            blk->status |= BlkDirty;
         }
     } else if (pkt->isRead()) {
-        if (pkt->isLocked()) {
+        if (pkt->isLLSC()) {
             blk->trackLoadLocked(pkt);
         }
         pkt->setDataFromBlock(blk->data, blkSize);
@@ -188,20 +197,50 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
             // special handling for coherent block requests from
             // upper-level caches
             if (pkt->needsExclusive()) {
-                // on ReadExReq we give up our copy
+                // if we have a dirty copy, make sure the recipient
+                // keeps it marked dirty
+                if (blk->isDirty()) {
+                    pkt->assertMemInhibit();
+                }
+                // on ReadExReq we give up our copy unconditionally
                 tags->invalidateBlk(blk);
+            } else if (blk->isWritable() && !pending_downgrade
+                       && !pkt->sharedAsserted()) {
+                // we can give the requester an exclusive copy (by not
+                // asserting shared line) on a read request if:
+                // - we have an exclusive copy at this level (& below)
+                // - we don't have a pending snoop from below
+                //   signaling another read request
+                // - no other cache above has a copy (otherwise it
+                //   would have asseretd shared line on request)
+                
+                if (blk->isDirty()) {
+                    // special considerations if we're owner:
+                    if (!deferred_response) {
+                        // if we are responding immediately and can
+                        // signal that we're transferring ownership
+                        // along with exclusivity, do so
+                        pkt->assertMemInhibit();
+                        blk->status &= ~BlkDirty;
+                    } else {
+                        // if we're responding after our own miss,
+                        // there's a window where the recipient didn't
+                        // know it was getting ownership and may not
+                        // have responded to snoops correctly, so we
+                        // can't pass off ownership *or* exclusivity
+                        pkt->assertShared();
+                    }
+                }
             } else {
-                // on ReadReq we create shareable copies here and in
-                // the requester
+                // otherwise only respond with a shared copy
                 pkt->assertShared();
-                blk->status &= ~BlkWritable;
             }
         }
     } else {
         // Not a read or write... must be an upgrade.  it's OK
         // to just ack those as long as we have an exclusive
         // copy at this level.
-        assert(pkt->cmd == MemCmd::UpgradeReq);
+        assert(pkt->isUpgrade());
         tags->invalidateBlk(blk);
     }
 }
@@ -216,9 +255,9 @@ Cache<TagStore>::satisfyCpuSideRequest(PacketPtr pkt, BlkType *blk)
 
 template<class TagStore>
 void
-Cache<TagStore>::markInService(MSHR *mshr)
+Cache<TagStore>::markInService(MSHR *mshr, PacketPtr pkt)
 {
-    markInServiceInternal(mshr);
+    markInServiceInternal(mshr, pkt);
 #if 0
         if (mshr->originalCmd == MemCmd::HardPFReq) {
             DPRINTF(HWPrefetch, "%s:Marking a HW_PF in service\n",
@@ -266,22 +305,33 @@ bool
 Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
                         int &lat, PacketList &writebacks)
 {
-    if (pkt->req->isUncacheable())  {
+    if (pkt->req->isUncacheable()) {
+        if (pkt->req->isClrex()) {
+            tags->clearLocks();
+        } else {
+           blk = tags->findBlock(pkt->getAddr());
+           if (blk != NULL) {
+               tags->invalidateBlk(blk);
+           }
+        }
+
         blk = NULL;
         lat = hitLatency;
         return false;
     }
 
-    blk = tags->accessBlock(pkt->getAddr(), lat);
+    int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
+    blk = tags->accessBlock(pkt->getAddr(), lat, id);
 
-    DPRINTF(Cache, "%s %x %s\n", pkt->cmdString(), pkt->getAddr(),
-            (blk) ? "hit" : "miss");
+    DPRINTF(Cache, "%s%s %x %s\n", pkt->cmdString(),
+            pkt->req->isInstFetch() ? " (ifetch)" : "",
+            pkt->getAddr(), (blk) ? "hit" : "miss");
 
     if (blk != NULL) {
 
         if (pkt->needsExclusive() ? blk->isWritable() : blk->isReadable()) {
             // OK to satisfy access
-            hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
+            incHitCount(pkt, id);
             satisfyCpuSideRequest(pkt, blk);
             return true;
         }
@@ -301,23 +351,24 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
             if (blk == NULL) {
                 // no replaceable block available, give up.
                 // writeback will be forwarded to next level.
-                incMissCount(pkt);
+                incMissCount(pkt, id);
                 return false;
             }
-            tags->insertBlock(pkt->getAddr(), blk);
+            int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
+            tags->insertBlock(pkt->getAddr(), blk, id);
             blk->status = BlkValid | BlkReadable;
         }
         std::memcpy(blk->data, pkt->getPtr<uint8_t>(), blkSize);
         blk->status |= BlkDirty;
         // nothing else to do; writeback doesn't expect response
         assert(!pkt->needsResponse());
-        hits[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
+        incHitCount(pkt, id);
         return true;
     }
 
-    incMissCount(pkt);
+    incMissCount(pkt, id);
 
-    if (blk == NULL && pkt->isLocked() && pkt->isWrite()) {
+    if (blk == NULL && pkt->isLLSC() && pkt->isWrite()) {
         // complete miss on store conditional... just give up now
         pkt->req->setExtraData(0);
         return true;
@@ -395,6 +446,15 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
     }
 
     if (pkt->req->isUncacheable()) {
+        if (pkt->req->isClrex()) {
+            tags->clearLocks();
+        } else {
+            BlkType *blk = tags->findBlock(pkt->getAddr());
+            if (blk != NULL) {
+                tags->invalidateBlk(blk);
+            }
+        }
+
         // writes go in write buffer, reads use MSHR
         if (pkt->isWrite() && !pkt->isRead()) {
             allocateWriteBuffer(pkt, time, true);
@@ -454,7 +514,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
     } else {
         // miss
 
-        Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
+        Addr blk_addr = blockAlign(pkt->getAddr());
         MSHR *mshr = mshrQueue.findMatch(blk_addr);
 
         if (mshr) {
@@ -534,12 +594,12 @@ Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
     bool blkValid = blk && blk->isValid();
 
     if (cpu_pkt->req->isUncacheable()) {
-        assert(blk == NULL);
+        //assert(blk == NULL);
         return NULL;
     }
 
-    if (!blkValid && (cpu_pkt->cmd == MemCmd::Writeback ||
-                      cpu_pkt->cmd == MemCmd::UpgradeReq)) {
+    if (!blkValid &&
+        (cpu_pkt->cmd == MemCmd::Writeback || cpu_pkt->isUpgrade())) {
         // Writebacks that weren't allocated in access() and upgrades
         // from upper-level caches that missed completely just go
         // through.
@@ -558,7 +618,7 @@ Cache<TagStore>::getBusPacket(PacketPtr cpu_pkt, BlkType *blk,
         // only reason to be here is that blk is shared
         // (read-only) and we need exclusive
         assert(needsExclusive && !blk->isWritable());
-        cmd = MemCmd::UpgradeReq;
+        cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
     } else {
         // block is invalid
         cmd = needsExclusive ? MemCmd::ReadExReq : MemCmd::ReadReq;
@@ -697,7 +757,7 @@ Cache<TagStore>::functionalAccess(PacketPtr pkt,
                                   CachePort *incomingPort,
                                   CachePort *otherSidePort)
 {
-    Addr blk_addr = pkt->getAddr() & ~(blkSize - 1);
+    Addr blk_addr = blockAlign(pkt->getAddr());
     BlkType *blk = tags->findBlock(pkt->getAddr());
 
     pkt->pushLabel(name());
@@ -801,7 +861,8 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
           case MSHR::Target::FromCPU:
             Tick completion_time;
             if (is_fill) {
-                satisfyCpuSideRequest(target->pkt, blk);
+                satisfyCpuSideRequest(target->pkt, blk,
+                                      true, mshr->hasPostDowngrade());
                 // How many bytes past the first request is this one
                 int transfer_offset =
                     target->pkt->getOffset(blkSize) - initial_offset;
@@ -816,6 +877,11 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
                 assert(!target->pkt->req->isUncacheable());
                 missLatency[target->pkt->cmdToIndex()][0/*pkt->req->threadId()*/] +=
                     completion_time - target->recvTime;
+            } else if (target->pkt->cmd == MemCmd::StoreCondReq &&
+                       pkt->cmd == MemCmd::UpgradeFailResp) {
+                // failed StoreCond upgrade
+                completion_time = tags->getHitLatency() + pkt->finishTime;
+                target->pkt->req->setExtraData(0);
             } else {
                 // not a cache fill, just forwarding response
                 completion_time = tags->getHitLatency() + pkt->finishTime;
@@ -827,12 +893,11 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
             // if this packet is an error copy that to the new packet
             if (is_error)
                 target->pkt->copyError(pkt);
-            if (pkt->isInvalidate()) {
+            if (target->pkt->cmd == MemCmd::ReadResp &&
+                (pkt->isInvalidate() || mshr->hasPostInvalidate())) {
                 // If intermediate cache got ReadRespWithInvalidate,
                 // propagate that.  Response should not have
                 // isInvalidate() set otherwise.
-                assert(target->pkt->cmd == MemCmd::ReadResp);
-                assert(pkt->cmd == MemCmd::ReadRespWithInvalidate);
                 target->pkt->cmd = MemCmd::ReadRespWithInvalidate;
             }
             cpuSidePort->respond(target->pkt, completion_time);
@@ -851,8 +916,9 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
             assert(!is_error);
             // response to snoop request
             DPRINTF(Cache, "processing deferred snoop...\n");
+            assert(!(pkt->isInvalidate() && !mshr->hasPostInvalidate()));
             handleSnoop(target->pkt, blk, true, true,
-                        mshr->pendingInvalidate || pkt->isInvalidate());
+                        mshr->hasPostInvalidate());
             break;
 
           default:
@@ -862,14 +928,20 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
         mshr->popTarget();
     }
 
-    if (pkt->isInvalidate()) {
-        tags->invalidateBlk(blk);
+    if (blk) {
+        if (pkt->isInvalidate() || mshr->hasPostInvalidate()) {
+            tags->invalidateBlk(blk);
+        } else if (mshr->hasPostDowngrade()) {
+            blk->status &= ~BlkWritable;
+        }
     }
 
     if (mshr->promoteDeferredTargets()) {
         // avoid later read getting stale data while write miss is
         // outstanding.. see comment in timingAccess()
-        blk->status &= ~BlkReadable;
+        if (blk) {
+            blk->status &= ~BlkReadable;
+        }
         MSHRQueue *mq = mshr->queue;
         mq->markPending(mshr);
         requestMemSideBus((RequestCause)mq->index, pkt->finishTime);
@@ -981,20 +1053,34 @@ Cache<TagStore>::handleFill(PacketPtr pkt, BlkType *blk,
             tempBlock->tag = tags->extractTag(addr);
             DPRINTF(Cache, "using temp block for %x\n", addr);
         } else {
-            tags->insertBlock(addr, blk);
+            int id = pkt->req->hasContextId() ? pkt->req->contextId() : -1;
+            tags->insertBlock(pkt->getAddr(), blk, id);
         }
+
+        // starting from scratch with a new block
+        blk->status = 0;
     } else {
         // existing block... probably an upgrade
         assert(blk->tag == tags->extractTag(addr));
         // either we're getting new data or the block should already be valid
         assert(pkt->hasData() || blk->isValid());
+        // don't clear block status... if block is already dirty we
+        // don't want to lose that
     }
 
+    blk->status |= BlkValid | BlkReadable;
+
     if (!pkt->sharedAsserted()) {
-        blk->status = BlkValid | BlkReadable | BlkWritable;
-    } else {
-        assert(!pkt->needsExclusive());
-        blk->status = BlkValid | BlkReadable;
+        blk->status |= BlkWritable;
+        // If we got this via cache-to-cache transfer (i.e., from a
+        // cache that was an owner) and took away that owner's copy,
+        // then we need to write it back.  Normally this happens
+        // anyway as a side effect of getting a copy to write it, but
+        // there are cases (such as failed store conditionals or
+        // compare-and-swaps) where we'll demand an exclusive copy but
+        // end up not writing it.
+        if (pkt->memInhibitAsserted())
+            blk->status |= BlkDirty;
     }
 
     DPRINTF(Cache, "Block addr %x moving from state %i to %i\n",
@@ -1025,14 +1111,8 @@ doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
 {
     // timing-mode snoop responses require a new packet, unless we
     // already made a copy...
-    PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt, true);
-    if (!req_pkt->isInvalidate()) {
-        // note that we're ignoring the shared flag on req_pkt... it's
-        // basically irrelevant, as we'll always assert shared unless
-        // it's an exclusive request, in which case the shared line
-        // should never be asserted1
-        pkt->assertShared();
-    }
+    PacketPtr pkt = already_copied ? req_pkt : new Packet(req_pkt);
+    assert(req_pkt->isInvalidate() || pkt->sharedAsserted());
     pkt->allocate();
     pkt->makeTimingResponse();
     if (pkt->isRead()) {
@@ -1063,35 +1143,43 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
     assert(!(pending_inval && !is_deferred));
     assert(pkt->isRequest());
 
-    // first propagate snoop upward to see if anyone above us wants to
-    // handle it.  save & restore packet src since it will get
-    // rewritten to be relative to cpu-side bus (if any)
-    bool alreadyResponded = pkt->memInhibitAsserted();
-    if (is_timing) {
-        Packet *snoopPkt = new Packet(pkt, true);  // clear flags
-        snoopPkt->setExpressSnoop();
-        snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
-        cpuSidePort->sendTiming(snoopPkt);
-        if (snoopPkt->memInhibitAsserted()) {
-            // cache-to-cache response from some upper cache
-            assert(!alreadyResponded);
-            pkt->assertMemInhibit();
+    // the packet may get modified if we or a forwarded snooper
+    // responds in atomic mode, so remember a few things about the
+    // original packet up front
+    bool invalidate = pkt->isInvalidate();
+    bool M5_VAR_USED needs_exclusive = pkt->needsExclusive();
+
+    if (forwardSnoops) {
+        // first propagate snoop upward to see if anyone above us wants to
+        // handle it.  save & restore packet src since it will get
+        // rewritten to be relative to cpu-side bus (if any)
+        bool alreadyResponded = pkt->memInhibitAsserted();
+        if (is_timing) {
+            Packet *snoopPkt = new Packet(pkt, true);  // clear flags
+            snoopPkt->setExpressSnoop();
+            snoopPkt->senderState = new ForwardResponseRecord(pkt, this);
+            cpuSidePort->sendTiming(snoopPkt);
+            if (snoopPkt->memInhibitAsserted()) {
+                // cache-to-cache response from some upper cache
+                assert(!alreadyResponded);
+                pkt->assertMemInhibit();
+            } else {
+                delete snoopPkt->senderState;
+            }
+            if (snoopPkt->sharedAsserted()) {
+                pkt->assertShared();
+            }
+            delete snoopPkt;
         } else {
-            delete snoopPkt->senderState;
-        }
-        if (snoopPkt->sharedAsserted()) {
-            pkt->assertShared();
-        }
-        delete snoopPkt;
-    } else {
-        int origSrc = pkt->getSrc();
-        cpuSidePort->sendAtomic(pkt);
-        if (!alreadyResponded && pkt->memInhibitAsserted()) {
-            // cache-to-cache response from some upper cache:
-            // forward response to original requester
-            assert(pkt->isResponse());
+            int origSrc = pkt->getSrc();
+            cpuSidePort->sendAtomic(pkt);
+            if (!alreadyResponded && pkt->memInhibitAsserted()) {
+                // cache-to-cache response from some upper cache:
+                // forward response to original requester
+                assert(pkt->isResponse());
+            }
+            pkt->setSrc(origSrc);
         }
-        pkt->setSrc(origSrc);
     }
 
     if (!blk || !blk->isValid()) {
@@ -1103,10 +1191,9 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
     // and then do it later
     bool respond = blk->isDirty() && pkt->needsResponse();
     bool have_exclusive = blk->isWritable();
-    bool invalidate = pkt->isInvalidate();
 
-    if (pkt->isRead() && !pkt->isInvalidate()) {
-        assert(!pkt->needsExclusive());
+    if (pkt->isRead() && !invalidate) {
+        assert(!needs_exclusive);
         pkt->assertShared();
         int bits_to_clear = BlkWritable;
         const bool haveOwnershipState = true; // for now
@@ -1165,7 +1252,7 @@ Cache<TagStore>::snoopTiming(PacketPtr pkt)
 
     BlkType *blk = tags->findBlock(pkt->getAddr());
 
-    Addr blk_addr = pkt->getAddr() & ~(Addr(blkSize-1));
+    Addr blk_addr = blockAlign(pkt->getAddr());
     MSHR *mshr = mshrQueue.findMatch(blk_addr);
 
     // Let the MSHR itself track the snoop and decide whether we want
@@ -1300,15 +1387,18 @@ Cache<TagStore>::getNextMSHR()
 
     // fall through... no pending requests.  Try a prefetch.
     assert(!miss_mshr && !write_mshr);
-    if (!mshrQueue.isFull()) {
+    if (prefetcher && !mshrQueue.isFull()) {
         // If we have a miss queue slot, we can try a prefetch
         PacketPtr pkt = prefetcher->getPacket();
         if (pkt) {
-            // Update statistic on number of prefetches issued
-            // (hwpf_mshr_misses)
-            mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
-            // Don't request bus, since we already have it
-            return allocateMissBuffer(pkt, curTick, false);
+            Addr pf_addr = blockAlign(pkt->getAddr());
+            if (!tags->findBlock(pf_addr) && !mshrQueue.findMatch(pf_addr)) {
+                // Update statistic on number of prefetches issued
+                // (hwpf_mshr_misses)
+                mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
+                // Don't request bus, since we already have it
+                return allocateMissBuffer(pkt, curTick, false);
+            }
         }
     }
 
@@ -1330,7 +1420,17 @@ Cache<TagStore>::getTimingPacket()
     PacketPtr tgt_pkt = mshr->getTarget()->pkt;
     PacketPtr pkt = NULL;
 
-    if (mshr->isForwardNoResponse()) {
+    if (tgt_pkt->cmd == MemCmd::SCUpgradeFailReq) {
+        // SCUpgradeReq saw invalidation while queued in MSHR, so now
+        // that we are getting around to processing it, just treat it
+        // as if we got a failure response
+        pkt = new Packet(tgt_pkt);
+        pkt->cmd = MemCmd::UpgradeFailResp;
+        pkt->senderState = mshr;
+        pkt->firstWordTime = pkt->finishTime = curTick;
+        handleResponse(pkt);
+        return NULL;
+    } else if (mshr->isForwardNoResponse()) {
         // no response expected, just forward packet as it is
         assert(tags->findBlock(mshr->addr) == NULL);
         pkt = tgt_pkt;
@@ -1385,11 +1485,10 @@ void
 Cache<TagStore>::CpuSidePort::
 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
 {
-    // CPU side port doesn't snoop; it's a target only.
-    bool dummy;
-    otherPort->getPeerAddressRanges(resp, dummy);
-    FilterRangeList(filterRanges, resp);
+    // CPU side port doesn't snoop; it's a target only.  It can
+    // potentially respond to any address.
     snoop = false;
+    resp.push_back(myCache()->getAddrRange());
 }
 
 
@@ -1428,9 +1527,8 @@ Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
 template<class TagStore>
 Cache<TagStore>::
 CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
-                         const std::string &_label,
-                         std::vector<Range<Addr> > filterRanges)
-    : BaseCache::CachePort(_name, _cache, _label, filterRanges)
+                         const std::string &_label)
+    : BaseCache::CachePort(_name, _cache, _label)
 {
 }
 
@@ -1445,11 +1543,9 @@ void
 Cache<TagStore>::MemSidePort::
 getDeviceAddressRanges(AddrRangeList &resp, bool &snoop)
 {
-    otherPort->getPeerAddressRanges(resp, snoop);
-    FilterRangeList(filterRanges, resp);
-
-    // Memory-side port always snoops, so unconditionally set flag for
-    // caller.
+    // Memory-side port always snoops, but never passes requests
+    // through to targets on the cpu side (so we don't add anything to
+    // the address range list).
     snoop = true;
 }
 
@@ -1535,7 +1631,7 @@ Cache<TagStore>::MemSidePort::sendPacket()
                     delete pkt;
                 }
             } else {
-                myCache()->markInService(mshr);
+                myCache()->markInService(mshr, pkt);
             }
         }
     }
@@ -1552,7 +1648,7 @@ Cache<TagStore>::MemSidePort::sendPacket()
             schedule(sendEvent, std::max(nextReady, curTick + 1));
         } else {
             // no more to send right now: if we're draining, we may be done
-            if (drainEvent) {
+            if (drainEvent && !sendEvent->scheduled()) {
                 drainEvent->process();
                 drainEvent = NULL;
             }
@@ -1581,9 +1677,8 @@ Cache<TagStore>::MemSidePort::processSendEvent()
 template<class TagStore>
 Cache<TagStore>::
 MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
-                         const std::string &_label,
-                         std::vector<Range<Addr> > filterRanges)
-    : BaseCache::CachePort(_name, _cache, _label, filterRanges)
+                         const std::string &_label)
+    : BaseCache::CachePort(_name, _cache, _label)
 {
     // override default send event from SimpleTimingPort
     delete sendEvent;