misc: Replaced master/slave terminology
[gem5.git] / src / mem / cache / base.cc
index 89293430f49c0b6201a18a15ef8efa4024be280d..c42071453819095f86c27a5d9f1a4dfda91d96ba 100644 (file)
@@ -36,9 +36,6 @@
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- *          Nikos Nikoleris
  */
 
 /**
 #include "params/WriteAllocator.hh"
 #include "sim/core.hh"
 
-class BaseMasterPort;
-class BaseSlavePort;
-
 using namespace std;
 
-BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
+BaseCache::CacheResponsePort::CacheResponsePort(const std::string &_name,
                                           BaseCache *_cache,
                                           const std::string &_label)
-    : QueuedSlavePort(_name, _cache, queue),
+    : QueuedResponsePort(_name, _cache, queue),
       queue(*_cache, *this, true, _label),
       blocked(false), mustSendRetry(false),
       sendRetryEvent([this]{ processSendRetry(); }, _name)
@@ -81,8 +75,8 @@ BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
 
 BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
     : ClockedObject(p),
-      cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
-      memSidePort(p->name + ".mem_side", this, "MemSidePort"),
+      cpuSidePort (p->name + ".cpu_side_port", this, "CpuSidePort"),
+      memSidePort(p->name + ".mem_side_port", this, "MemSidePort"),
       mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
       writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
       tags(p->tags),
@@ -110,7 +104,8 @@ BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
       noTargetMSHR(nullptr),
       missCount(p->max_miss_count),
       addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()),
-      system(p->system)
+      system(p->system),
+      stats(*this)
 {
     // the MSHR queue has no reserve entries as we check the MSHR
     // queue on every single allocation, whereas the write queue has
@@ -119,7 +114,7 @@ BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
     // buffer before committing to an MSHR
 
     // forward snoops is overridden in init() once we can query
-    // whether the connected master is actually snooping or not
+    // whether the connected requestor is actually snooping or not
 
     tempBlock = new TempCacheBlk(blkSize);
 
@@ -134,7 +129,7 @@ BaseCache::~BaseCache()
 }
 
 void
-BaseCache::CacheSlavePort::setBlocked()
+BaseCache::CacheResponsePort::setBlocked()
 {
     assert(!blocked);
     DPRINTF(CachePort, "Port is blocking new requests\n");
@@ -149,7 +144,7 @@ BaseCache::CacheSlavePort::setBlocked()
 }
 
 void
-BaseCache::CacheSlavePort::clearBlocked()
+BaseCache::CacheResponsePort::clearBlocked()
 {
     assert(blocked);
     DPRINTF(CachePort, "Port is accepting new requests\n");
@@ -161,7 +156,7 @@ BaseCache::CacheSlavePort::clearBlocked()
 }
 
 void
-BaseCache::CacheSlavePort::processSendRetry()
+BaseCache::CacheResponsePort::processSendRetry()
 {
     DPRINTF(CachePort, "Port is sending retry\n");
 
@@ -275,8 +270,8 @@ BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
                 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
                         pkt->print());
 
-                assert(pkt->req->masterId() < system->maxMasters());
-                mshr_hits[pkt->cmdToIndex()][pkt->req->masterId()]++;
+                assert(pkt->req->requestorId() < system->maxRequestors());
+                stats.cmdStats(pkt).mshr_hits[pkt->req->requestorId()]++;
 
                 // We use forward_time here because it is the same
                 // considering new targets. We have multiple
@@ -299,8 +294,8 @@ BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk,
         }
     } else {
         // no MSHR
-        assert(pkt->req->masterId() < system->maxMasters());
-        mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
+        assert(pkt->req->requestorId() < system->maxRequestors());
+        stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
 
         if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
             // We use forward_time here because there is an
@@ -342,11 +337,20 @@ BaseCache::recvTimingReq(PacketPtr pkt)
     // the delay provided by the crossbar
     Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
 
-    // Note that lat is passed by reference here. The function
-    // access() will set the lat value.
     Cycles lat;
     CacheBlk *blk = nullptr;
-    bool satisfied = access(pkt, blk, lat);
+    bool satisfied = false;
+    {
+        PacketList writebacks;
+        // Note that lat is passed by reference here. The function
+        // access() will set the lat value.
+        satisfied = access(pkt, blk, lat, writebacks);
+
+        // After the evicted blocks are selected, they must be forwarded
+        // to the write buffer to ensure they logically precede anything
+        // happening below
+        doWritebacks(writebacks, clockEdge(lat + forwardLatency));
+    }
 
     // Here we charge the headerDelay that takes into account the latencies
     // of the bus, if the packet comes from it.
@@ -434,20 +438,20 @@ BaseCache::recvTimingResp(PacketPtr pkt)
     }
 
     // Initial target is used just for stats
-    QueueEntry::Target *initial_tgt = mshr->getTarget();
-    int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
-    Tick miss_latency = curTick() - initial_tgt->recvTime;
-
+    const QueueEntry::Target *initial_tgt = mshr->getTarget();
+    const Tick miss_latency = curTick() - initial_tgt->recvTime;
     if (pkt->req->isUncacheable()) {
-        assert(pkt->req->masterId() < system->maxMasters());
-        mshr_uncacheable_lat[stats_cmd_idx][pkt->req->masterId()] +=
-            miss_latency;
+        assert(pkt->req->requestorId() < system->maxRequestors());
+        stats.cmdStats(initial_tgt->pkt)
+            .mshr_uncacheable_lat[pkt->req->requestorId()] += miss_latency;
     } else {
-        assert(pkt->req->masterId() < system->maxMasters());
-        mshr_miss_latency[stats_cmd_idx][pkt->req->masterId()] +=
-            miss_latency;
+        assert(pkt->req->requestorId() < system->maxRequestors());
+        stats.cmdStats(initial_tgt->pkt)
+            .mshr_miss_latency[pkt->req->requestorId()] += miss_latency;
     }
 
+    PacketList writebacks;
+
     bool is_fill = !mshr->isForward &&
         (pkt->isRead() || pkt->cmd == MemCmd::UpgradeResp ||
          mshr->wasWholeLineWrite);
@@ -464,7 +468,7 @@ BaseCache::recvTimingResp(PacketPtr pkt)
 
         const bool allocate = (writeAllocator && mshr->wasWholeLineWrite) ?
             writeAllocator->allocate() : mshr->allocOnFill();
-        blk = handleFill(pkt, blk, allocate);
+        blk = handleFill(pkt, blk, writebacks, allocate);
         assert(blk != nullptr);
         ppFill->notify(pkt);
     }
@@ -510,7 +514,7 @@ BaseCache::recvTimingResp(PacketPtr pkt)
 
         // Request the bus for a prefetch if this deallocation freed enough
         // MSHRs for a prefetch to take place
-        if (prefetcher && mshrQueue.canPrefetch()) {
+        if (prefetcher && mshrQueue.canPrefetch() && !isBlocked()) {
             Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
                                          clockEdge());
             if (next_pf_time != MaxTick)
@@ -520,9 +524,13 @@ BaseCache::recvTimingResp(PacketPtr pkt)
 
     // if we used temp block, check to see if its valid and then clear it out
     if (blk == tempBlock && tempBlock->isValid()) {
-        evictBlock(blk, clockEdge(forwardLatency) + pkt->headerDelay);
+        evictBlock(blk, writebacks);
     }
 
+    const Tick forward_time = clockEdge(forwardLatency) + pkt->headerDelay;
+    // copy writebacks to write buffer
+    doWritebacks(writebacks, forward_time);
+
     DPRINTF(CacheVerbose, "%s: Leaving with %s\n", __func__, pkt->print());
     delete pkt;
 }
@@ -540,7 +548,8 @@ BaseCache::recvAtomic(PacketPtr pkt)
     Cycles lat = lookupLatency;
 
     CacheBlk *blk = nullptr;
-    bool satisfied = access(pkt, blk, lat);
+    PacketList writebacks;
+    bool satisfied = access(pkt, blk, lat, writebacks);
 
     if (pkt->isClean() && blk && blk->isDirty()) {
         // A cache clean opearation is looking for a dirty
@@ -550,12 +559,17 @@ BaseCache::recvAtomic(PacketPtr pkt)
         DPRINTF(CacheVerbose, "%s: packet %s found block: %s\n",
                 __func__, pkt->print(), blk->print());
         PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
+        writebacks.push_back(wb_pkt);
         pkt->setSatisfied();
-        doWritebacksAtomic(wb_pkt);
     }
 
+    // handle writebacks resulting from the access here to ensure they
+    // logically precede anything happening below
+    doWritebacksAtomic(writebacks);
+    assert(writebacks.empty());
+
     if (!satisfied) {
-        lat += handleAtomicReqMiss(pkt, blk);
+        lat += handleAtomicReqMiss(pkt, blk, writebacks);
     }
 
     // Note that we don't invoke the prefetcher at all in atomic mode.
@@ -569,6 +583,9 @@ BaseCache::recvAtomic(PacketPtr pkt)
     // immediately rather than calling requestMemSideBus() as we do
     // there).
 
+    // do any writebacks resulting from the response handling
+    doWritebacksAtomic(writebacks);
+
     // if we used temp block, check to see if its valid and if so
     // clear it out, but only do so after the call to recvAtomic is
     // finished so that any downstream observers (such as a snoop
@@ -747,7 +764,7 @@ BaseCache::getNextQueueEntry()
 
     // fall through... no pending requests.  Try a prefetch.
     assert(!miss_mshr && !wq_entry);
-    if (prefetcher && mshrQueue.canPrefetch()) {
+    if (prefetcher && mshrQueue.canPrefetch() && !isBlocked()) {
         // If we have a miss queue slot, we can try a prefetch
         PacketPtr pkt = prefetcher->getPacket();
         if (pkt) {
@@ -757,8 +774,8 @@ BaseCache::getNextQueueEntry()
                 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
                 // Update statistic on number of prefetches issued
                 // (hwpf_mshr_misses)
-                assert(pkt->req->masterId() < system->maxMasters());
-                mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
+                assert(pkt->req->requestorId() < system->maxRequestors());
+                stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
 
                 // allocate an MSHR and return it, note
                 // that we send the packet straight away, so do not
@@ -774,9 +791,46 @@ BaseCache::getNextQueueEntry()
     return nullptr;
 }
 
+bool
+BaseCache::handleEvictions(std::vector<CacheBlk*> &evict_blks,
+    PacketList &writebacks)
+{
+    bool replacement = false;
+    for (const auto& blk : evict_blks) {
+        if (blk->isValid()) {
+            replacement = true;
+
+            const MSHR* mshr =
+                mshrQueue.findMatch(regenerateBlkAddr(blk), blk->isSecure());
+            if (mshr) {
+                // Must be an outstanding upgrade or clean request on a block
+                // we're about to replace
+                assert((!blk->isWritable() && mshr->needsWritable()) ||
+                       mshr->isCleaning());
+                return false;
+            }
+        }
+    }
+
+    // The victim will be replaced by a new entry, so increase the replacement
+    // counter if a valid block is being replaced
+    if (replacement) {
+        stats.replacements++;
+
+        // Evict valid blocks associated to this victim block
+        for (auto& blk : evict_blks) {
+            if (blk->isValid()) {
+                evictBlock(blk, writebacks);
+            }
+        }
+    }
+
+    return true;
+}
+
 bool
 BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
-    uint32_t delay, Cycles tag_latency)
+                                 PacketList &writebacks)
 {
     // tempBlock does not exist in the tags, so don't do anything for it.
     if (blk == tempBlock) {
@@ -790,11 +844,11 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
 
     // The compressor is called to compress the updated data, so that its
     // metadata can be updated.
-    std::size_t compression_size = 0;
     Cycles compression_lat = Cycles(0);
     Cycles decompression_lat = Cycles(0);
-    compressor->compress(data, compression_lat, decompression_lat,
-                         compression_size);
+    const auto comp_data =
+        compressor->compress(data, compression_lat, decompression_lat);
+    std::size_t compression_size = comp_data->getSizeBits();
 
     // If block's compression factor increased, it may not be co-allocatable
     // anymore. If so, some blocks might need to be evicted to make room for
@@ -813,39 +867,22 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
     // allocated blocks to make room for the expansion, but other approaches
     // that take the replacement data of the superblock into account may
     // generate better results
-    std::vector<CacheBlk*> evict_blks;
     const bool was_compressed = compression_blk->isCompressed();
     if (was_compressed && !is_co_allocatable) {
-        // Get all co-allocated blocks
+        std::vector<CacheBlk*> evict_blks;
         for (const auto& sub_blk : superblock->blks) {
             if (sub_blk->isValid() && (compression_blk != sub_blk)) {
-                // Check for transient state allocations. If any of the
-                // entries listed for eviction has a transient state, the
-                // allocation fails
-                const Addr repl_addr = regenerateBlkAddr(sub_blk);
-                const MSHR *repl_mshr =
-                    mshrQueue.findMatch(repl_addr, sub_blk->isSecure());
-                if (repl_mshr) {
-                    DPRINTF(CacheRepl, "Aborting data expansion of %s due " \
-                            "to replacement of block in transient state: %s\n",
-                            compression_blk->print(), sub_blk->print());
-                    // Too hard to replace block with transient state, so it
-                    // cannot be evicted. Mark the update as failed and expect
-                    // the caller to evict this block. Since this is called
-                    // only when writebacks arrive, and packets do not contain
-                    // compressed data, there is no need to decompress
-                    compression_blk->setSizeBits(blkSize * 8);
-                    compression_blk->setDecompressionLatency(Cycles(0));
-                    compression_blk->setUncompressed();
-                    return false;
-                }
-
                 evict_blks.push_back(sub_blk);
             }
         }
 
+        // Try to evict blocks; if it fails, give up on update
+        if (!handleEvictions(evict_blks, writebacks)) {
+            return false;
+        }
+
         // Update the number of data expansions
-        dataExpansions++;
+        stats.dataExpansions++;
 
         DPRINTF(CacheComp, "Data expansion: expanding [%s] from %d to %d bits"
                 "\n", blk->print(), prev_size, compression_size);
@@ -860,17 +897,6 @@ BaseCache::updateCompressionData(CacheBlk *blk, const uint64_t* data,
     compression_blk->setSizeBits(compression_size);
     compression_blk->setDecompressionLatency(decompression_lat);
 
-    // Evict valid blocks
-    for (const auto& evict_blk : evict_blks) {
-        if (evict_blk->isValid()) {
-            if (evict_blk->wasPrefetched()) {
-                unusedPrefetches++;
-            }
-            Cycles lat = calculateAccessLatency(evict_blk, delay, tag_latency);
-            evictBlock(evict_blk, clockEdge(lat + forwardLatency));
-        }
-    }
-
     return true;
 }
 
@@ -884,7 +910,7 @@ BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool)
     // satisfying a string of Read and ReadEx requests from
     // upper-level caches, a Read will mark the block as shared but we
     // can satisfy a following ReadEx anyway since we can rely on the
-    // Read requester(s) to have buffered the ReadEx snoop and to
+    // Read requestor(s) to have buffered the ReadEx snoop and to
     // invalidate their blocks after receiving them.
     // assert(!pkt->needsWritable() || blk->isWritable());
     assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
@@ -1001,7 +1027,8 @@ BaseCache::calculateAccessLatency(const CacheBlk* blk, const uint32_t delay,
 }
 
 bool
-BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
+BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat,
+                  PacketList &writebacks)
 {
     // sanity check
     assert(pkt->isRequest());
@@ -1074,6 +1101,11 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
         }
     }
 
+    // The critical latency part of a write depends only on the tag access
+    if (pkt->isWrite()) {
+        lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
+    }
+
     // Writeback handling is special case.  We can write the block into
     // the cache without having a writeable copy (or any copy at all).
     if (pkt->isWriteback()) {
@@ -1093,22 +1125,15 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
             // and we just had to wait for the time to find a match in the
             // MSHR. As of now assume a mshr queue search takes as long as
             // a tag lookup for simplicity.
-            lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
-
             return true;
         }
 
         if (!blk) {
             // need to do a replacement
-            blk = allocateBlock(pkt, tag_latency);
+            blk = allocateBlock(pkt, writebacks);
             if (!blk) {
                 // no replaceable block available: give up, fwd to next level.
                 incMissCount(pkt);
-
-                // A writeback searches for the block, then writes the data.
-                // As the block could not be found, it was a tag-only access.
-                lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
-
                 return false;
             }
 
@@ -1119,15 +1144,7 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
             // a smaller size, and now it doesn't fit the entry anymore).
             // If that is the case we might need to evict blocks.
             if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
-                pkt->headerDelay, tag_latency)) {
-                // This is a failed data expansion (write), which happened
-                // after finding the replacement entries and accessing the
-                // block's data. There were no replaceable entries available
-                // to make room for the expanded block, and since it does not
-                // fit anymore and it has been properly updated to contain
-                // the new data, forward it to the next level
-                lat = calculateAccessLatency(blk, pkt->headerDelay,
-                                             tag_latency);
+                writebacks)) {
                 invalidateBlock(blk);
                 return false;
             }
@@ -1151,9 +1168,6 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
         DPRINTF(Cache, "%s new state is %s\n", __func__, blk->print());
         incHitCount(pkt);
 
-        // A writeback searches for the block, then writes the data
-        lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
-
         // When the packet metadata arrives, the tag lookup will be done while
         // the payload is arriving. Then the block will be ready to access as
         // soon as the fill is done
@@ -1186,27 +1200,16 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
 
         if (!blk) {
             if (pkt->writeThrough()) {
-                // A writeback searches for the block, then writes the data.
-                // As the block could not be found, it was a tag-only access.
-                lat = calculateTagOnlyLatency(pkt->headerDelay, tag_latency);
-
                 // if this is a write through packet, we don't try to
                 // allocate if the block is not present
                 return false;
             } else {
                 // a writeback that misses needs to allocate a new block
-                blk = allocateBlock(pkt, tag_latency);
+                blk = allocateBlock(pkt, writebacks);
                 if (!blk) {
                     // no replaceable block available: give up, fwd to
                     // next level.
                     incMissCount(pkt);
-
-                    // A writeback searches for the block, then writes the
-                    // data. As the block could not be found, it was a tag-only
-                    // access.
-                    lat = calculateTagOnlyLatency(pkt->headerDelay,
-                                                  tag_latency);
-
                     return false;
                 }
 
@@ -1218,15 +1221,7 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
             // a smaller size, and now it doesn't fit the entry anymore).
             // If that is the case we might need to evict blocks.
             if (!updateCompressionData(blk, pkt->getConstPtr<uint64_t>(),
-                pkt->headerDelay, tag_latency)) {
-                // This is a failed data expansion (write), which happened
-                // after finding the replacement entries and accessing the
-                // block's data. There were no replaceable entries available
-                // to make room for the expanded block, and since it does not
-                // fit anymore and it has been properly updated to contain
-                // the new data, forward it to the next level
-                lat = calculateAccessLatency(blk, pkt->headerDelay,
-                                             tag_latency);
+                writebacks)) {
                 invalidateBlock(blk);
                 return false;
             }
@@ -1247,9 +1242,6 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
 
         incHitCount(pkt);
 
-        // A writeback searches for the block, then writes the data
-        lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
-
         // When the packet metadata arrives, the tag lookup will be done while
         // the payload is arriving. Then the block will be ready to access as
         // soon as the fill is done
@@ -1264,12 +1256,12 @@ BaseCache::access(PacketPtr pkt, CacheBlk *&blk, Cycles &lat)
         incHitCount(pkt);
 
         // Calculate access latency based on the need to access the data array
-        if (pkt->isRead() || pkt->isWrite()) {
+        if (pkt->isRead()) {
             lat = calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
 
             // When a block is compressed, it must first be decompressed
             // before being read. This adds to the access latency.
-            if (compressor && pkt->isRead()) {
+            if (compressor) {
                 lat += compressor->getDecompressionLatency(blk);
             }
         } else {
@@ -1311,7 +1303,8 @@ BaseCache::maintainClusivity(bool from_cache, CacheBlk *blk)
 }
 
 CacheBlk*
-BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, bool allocate)
+BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, PacketList &writebacks,
+                      bool allocate)
 {
     assert(pkt->isResponse());
     Addr addr = pkt->getAddr();
@@ -1328,12 +1321,9 @@ BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, bool allocate)
         // better have read new data...
         assert(pkt->hasData() || pkt->cmd == MemCmd::InvalidateResp);
 
-        // Need to do a replacement if allocating, otherwise we stick
-        // with the temporary storage. The tag lookup has already been
-        // done to decide the eviction victims, so it is set to 0 here.
-        // The eviction itself, however, is delayed until the new data
-        // for the block that is requesting the replacement arrives.
-        blk = allocate ? allocateBlock(pkt, Cycles(0)) : nullptr;
+        // need to do a replacement if allocating, otherwise we stick
+        // with the temporary storage
+        blk = allocate ? allocateBlock(pkt, writebacks) : nullptr;
 
         if (!blk) {
             // No replaceable block or a mostly exclusive
@@ -1387,30 +1377,6 @@ BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, bool allocate)
             chatty_assert(!isReadOnly, "Should never see dirty snoop response "
                           "in read-only cache %s\n", name());
 
-        } else if (pkt->cmd.isSWPrefetch() && pkt->needsWritable()) {
-            // All other copies of the block were invalidated and we
-            // have an exclusive copy.
-
-            // The coherence protocol assumes that if we fetched an
-            // exclusive copy of the block, we have the intention to
-            // modify it. Therefore the MSHR for the PrefetchExReq has
-            // been the point of ordering and this cache has commited
-            // to respond to snoops for the block.
-            //
-            // In most cases this is true anyway - a PrefetchExReq
-            // will be followed by a WriteReq. However, if that
-            // doesn't happen, the block is not marked as dirty and
-            // the cache doesn't respond to snoops that has committed
-            // to do so.
-            //
-            // To avoid deadlocks in cases where there is a snoop
-            // between the PrefetchExReq and the expected WriteReq, we
-            // proactively mark the block as Dirty.
-
-            blk->status |= BlkDirty;
-
-            panic_if(!isReadOnly, "Prefetch exclusive requests from read-only "
-                     "cache %s\n", name());
         }
     }
 
@@ -1434,7 +1400,7 @@ BaseCache::handleFill(PacketPtr pkt, CacheBlk *blk, bool allocate)
 }
 
 CacheBlk*
-BaseCache::allocateBlock(const PacketPtr pkt, Cycles tag_latency)
+BaseCache::allocateBlock(const PacketPtr pkt, PacketList &writebacks)
 {
     // Get address
     const Addr addr = pkt->getAddr();
@@ -1454,9 +1420,10 @@ BaseCache::allocateBlock(const PacketPtr pkt, Cycles tag_latency)
     // compressor is used, the compression/decompression methods are called to
     // calculate the amount of extra cycles needed to read or write compressed
     // blocks.
-    if (compressor) {
-        compressor->compress(pkt->getConstPtr<uint64_t>(), compression_lat,
-                             decompression_lat, blk_size_bits);
+    if (compressor && pkt->hasData()) {
+        const auto comp_data = compressor->compress(
+            pkt->getConstPtr<uint64_t>(), compression_lat, decompression_lat);
+        blk_size_bits = comp_data->getSizeBits();
     }
 
     // Find replacement victim
@@ -1471,49 +1438,9 @@ BaseCache::allocateBlock(const PacketPtr pkt, Cycles tag_latency)
     // Print victim block's information
     DPRINTF(CacheRepl, "Replacement victim: %s\n", victim->print());
 
-    // Check for transient state allocations. If any of the entries listed
-    // for eviction has a transient state, the allocation fails
-    bool replacement = false;
-    for (const auto& blk : evict_blks) {
-        if (blk->isValid()) {
-            replacement = true;
-
-            Addr repl_addr = regenerateBlkAddr(blk);
-            MSHR *repl_mshr = mshrQueue.findMatch(repl_addr, blk->isSecure());
-            if (repl_mshr) {
-                // must be an outstanding upgrade or clean request
-                // on a block we're about to replace...
-                assert((!blk->isWritable() && repl_mshr->needsWritable()) ||
-                       repl_mshr->isCleaning());
-
-                // too hard to replace block with transient state
-                // allocation failed, block not inserted
-                return nullptr;
-            }
-        }
-    }
-
-    // The victim will be replaced by a new entry, so increase the replacement
-    // counter if a valid block is being replaced
-    if (replacement) {
-        // Evict valid blocks associated to this victim block
-        for (const auto& blk : evict_blks) {
-            if (blk->isValid()) {
-                DPRINTF(CacheRepl, "Evicting %s (%#llx) to make room for " \
-                        "%#llx (%s)\n", blk->print(), regenerateBlkAddr(blk),
-                        addr, is_secure);
-
-                if (blk->wasPrefetched()) {
-                    unusedPrefetches++;
-                }
-
-                Cycles lat =
-                    calculateAccessLatency(blk, pkt->headerDelay, tag_latency);
-                evictBlock(blk, clockEdge(lat + forwardLatency));
-            }
-        }
-
-        replacements++;
+    // Try to evict blocks; if it fails, give up on allocation
+    if (!handleEvictions(evict_blks, writebacks)) {
+        return nullptr;
     }
 
     // If using a compressor, set compression data. This must be done before
@@ -1532,6 +1459,11 @@ BaseCache::allocateBlock(const PacketPtr pkt, Cycles tag_latency)
 void
 BaseCache::invalidateBlock(CacheBlk *blk)
 {
+    // If block is still marked as prefetched, then it hasn't been used
+    if (blk->wasPrefetched()) {
+        stats.unusedPrefetches++;
+    }
+
     // If handling a block present in the Tags, let it do its invalidation
     // process, which will update stats and invalidate the block itself
     if (blk != tempBlock) {
@@ -1542,15 +1474,11 @@ BaseCache::invalidateBlock(CacheBlk *blk)
 }
 
 void
-BaseCache::evictBlock(CacheBlk *blk, Tick forward_timing)
+BaseCache::evictBlock(CacheBlk *blk, PacketList &writebacks)
 {
     PacketPtr pkt = evictBlock(blk);
     if (pkt) {
-        if (system->isTimingMode()) {
-            doWritebacks(pkt, forward_timing);
-        } else {
-            doWritebacksAtomic(pkt);
-        }
+        writebacks.push_back(pkt);
     }
 }
 
@@ -1561,10 +1489,10 @@ BaseCache::writebackBlk(CacheBlk *blk)
                   "Writeback from read-only cache");
     assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
 
-    writebacks[Request::wbMasterId]++;
+    stats.writebacks[Request::wbRequestorId]++;
 
     RequestPtr req = std::make_shared<Request>(
-        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+        regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId);
 
     if (blk->isSecure())
         req->setFlags(Request::SECURE);
@@ -1606,7 +1534,7 @@ PacketPtr
 BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
 {
     RequestPtr req = std::make_shared<Request>(
-        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+        regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId);
 
     if (blk->isSecure()) {
         req->setFlags(Request::SECURE);
@@ -1679,7 +1607,7 @@ BaseCache::writebackVisitor(CacheBlk &blk)
         assert(blk.isValid());
 
         RequestPtr request = std::make_shared<Request>(
-            regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
+            regenerateBlkAddr(&blk), blkSize, 0, Request::funcRequestorId);
 
         request->taskId(blk.task_id);
         if (blk.isSecure()) {
@@ -1716,7 +1644,7 @@ BaseCache::nextQueueReadyTime() const
 
     // Don't signal prefetch ready time if no MSHRs available
     // Will signal once enoguh MSHRs are deallocated
-    if (prefetcher && mshrQueue.canPrefetch()) {
+    if (prefetcher && mshrQueue.canPrefetch() && !isBlocked()) {
         nextReady = std::min(nextReady,
                              prefetcher->nextPrefetchReadyTime());
     }
@@ -1819,7 +1747,9 @@ BaseCache::sendMSHRQueuePacket(MSHR* mshr)
                     __func__, pkt->print(), blk->print());
             PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(),
                                              pkt->id);
-            doWritebacks(wb_pkt, 0);
+            PacketList writebacks;
+            writebacks.push_back(wb_pkt);
+            doWritebacks(writebacks, 0);
         }
 
         return false;
@@ -1882,248 +1812,350 @@ BaseCache::unserialize(CheckpointIn &cp)
     }
 }
 
-void
-BaseCache::regStats()
+
+BaseCache::CacheCmdStats::CacheCmdStats(BaseCache &c,
+                                        const std::string &name)
+    : Stats::Group(&c), cache(c),
+
+    hits(
+        this, (name + "_hits").c_str(),
+        ("number of " + name + " hits").c_str()),
+    misses(
+        this, (name + "_misses").c_str(),
+        ("number of " + name + " misses").c_str()),
+    missLatency(
+        this, (name + "_miss_latency").c_str(),
+        ("number of " + name + " miss cycles").c_str()),
+    accesses(
+        this, (name + "_accesses").c_str(),
+        ("number of " + name + " accesses(hits+misses)").c_str()),
+    missRate(
+        this, (name + "_miss_rate").c_str(),
+        ("miss rate for " + name + " accesses").c_str()),
+    avgMissLatency(
+        this, (name + "_avg_miss_latency").c_str(),
+        ("average " + name + " miss latency").c_str()),
+    mshr_hits(
+        this, (name + "_mshr_hits").c_str(),
+        ("number of " + name + " MSHR hits").c_str()),
+    mshr_misses(
+        this, (name + "_mshr_misses").c_str(),
+        ("number of " + name + " MSHR misses").c_str()),
+    mshr_uncacheable(
+        this, (name + "_mshr_uncacheable").c_str(),
+        ("number of " + name + " MSHR uncacheable").c_str()),
+    mshr_miss_latency(
+        this, (name + "_mshr_miss_latency").c_str(),
+        ("number of " + name + " MSHR miss cycles").c_str()),
+    mshr_uncacheable_lat(
+        this, (name + "_mshr_uncacheable_latency").c_str(),
+        ("number of " + name + " MSHR uncacheable cycles").c_str()),
+    mshrMissRate(
+        this, (name + "_mshr_miss_rate").c_str(),
+        ("mshr miss rate for " + name + " accesses").c_str()),
+    avgMshrMissLatency(
+        this, (name + "_avg_mshr_miss_latency").c_str(),
+        ("average " + name + " mshr miss latency").c_str()),
+    avgMshrUncacheableLatency(
+        this, (name + "_avg_mshr_uncacheable_latency").c_str(),
+        ("average " + name + " mshr uncacheable latency").c_str())
 {
-    ClockedObject::regStats();
+}
 
+void
+BaseCache::CacheCmdStats::regStatsFromParent()
+{
     using namespace Stats;
 
-    // Hit statistics
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        hits[access_idx]
-            .init(system->maxMasters())
-            .name(name() + "." + cstr + "_hits")
-            .desc("number of " + cstr + " hits")
-            .flags(total | nozero | nonan)
-            ;
-        for (int i = 0; i < system->maxMasters(); i++) {
-            hits[access_idx].subname(i, system->getMasterName(i));
-        }
-    }
-
-// These macros make it easier to sum the right subset of commands and
-// to change the subset of commands that are considered "demand" vs
-// "non-demand"
-#define SUM_DEMAND(s) \
-    (s[MemCmd::ReadReq] + s[MemCmd::WriteReq] + s[MemCmd::WriteLineReq] + \
-     s[MemCmd::ReadExReq] + s[MemCmd::ReadCleanReq] + s[MemCmd::ReadSharedReq])
+    Stats::Group::regStats();
+    System *system = cache.system;
+    const auto max_requestors = system->maxRequestors();
 
-// should writebacks be included here?  prior code was inconsistent...
-#define SUM_NON_DEMAND(s) \
-    (s[MemCmd::SoftPFReq] + s[MemCmd::HardPFReq] + s[MemCmd::SoftPFExReq])
+    hits
+        .init(max_requestors)
+        .flags(total | nozero | nonan)
+        ;
+    for (int i = 0; i < max_requestors; i++) {
+        hits.subname(i, system->getRequestorName(i));
+    }
 
-    demandHits
-        .name(name() + ".demand_hits")
-        .desc("number of demand (read+write) hits")
+    // Miss statistics
+    misses
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    demandHits = SUM_DEMAND(hits);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandHits.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        misses.subname(i, system->getRequestorName(i));
     }
 
-    overallHits
-        .name(name() + ".overall_hits")
-        .desc("number of overall hits")
+    // Miss latency statistics
+    missLatency
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    overallHits = demandHits + SUM_NON_DEMAND(hits);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallHits.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        missLatency.subname(i, system->getRequestorName(i));
     }
 
-    // Miss statistics
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        misses[access_idx]
-            .init(system->maxMasters())
-            .name(name() + "." + cstr + "_misses")
-            .desc("number of " + cstr + " misses")
-            .flags(total | nozero | nonan)
-            ;
-        for (int i = 0; i < system->maxMasters(); i++) {
-            misses[access_idx].subname(i, system->getMasterName(i));
-        }
+    // access formulas
+    accesses.flags(total | nozero | nonan);
+    accesses = hits + misses;
+    for (int i = 0; i < max_requestors; i++) {
+        accesses.subname(i, system->getRequestorName(i));
     }
 
-    demandMisses
-        .name(name() + ".demand_misses")
-        .desc("number of demand (read+write) misses")
+    // miss rate formulas
+    missRate.flags(total | nozero | nonan);
+    missRate = misses / accesses;
+    for (int i = 0; i < max_requestors; i++) {
+        missRate.subname(i, system->getRequestorName(i));
+    }
+
+    // miss latency formulas
+    avgMissLatency.flags(total | nozero | nonan);
+    avgMissLatency = missLatency / misses;
+    for (int i = 0; i < max_requestors; i++) {
+        avgMissLatency.subname(i, system->getRequestorName(i));
+    }
+
+    // MSHR statistics
+    // MSHR hit statistics
+    mshr_hits
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    demandMisses = SUM_DEMAND(misses);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandMisses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_hits.subname(i, system->getRequestorName(i));
     }
 
-    overallMisses
-        .name(name() + ".overall_misses")
-        .desc("number of overall misses")
+    // MSHR miss statistics
+    mshr_misses
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    overallMisses = demandMisses + SUM_NON_DEMAND(misses);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallMisses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_misses.subname(i, system->getRequestorName(i));
     }
 
-    // Miss latency statistics
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        missLatency[access_idx]
-            .init(system->maxMasters())
-            .name(name() + "." + cstr + "_miss_latency")
-            .desc("number of " + cstr + " miss cycles")
-            .flags(total | nozero | nonan)
-            ;
-        for (int i = 0; i < system->maxMasters(); i++) {
-            missLatency[access_idx].subname(i, system->getMasterName(i));
-        }
+    // MSHR miss latency statistics
+    mshr_miss_latency
+        .init(max_requestors)
+        .flags(total | nozero | nonan)
+        ;
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_miss_latency.subname(i, system->getRequestorName(i));
     }
 
-    demandMissLatency
-        .name(name() + ".demand_miss_latency")
-        .desc("number of demand (read+write) miss cycles")
+    // MSHR uncacheable statistics
+    mshr_uncacheable
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    demandMissLatency = SUM_DEMAND(missLatency);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_uncacheable.subname(i, system->getRequestorName(i));
     }
 
-    overallMissLatency
-        .name(name() + ".overall_miss_latency")
-        .desc("number of overall miss cycles")
+    // MSHR miss latency statistics
+    mshr_uncacheable_lat
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_uncacheable_lat.subname(i, system->getRequestorName(i));
     }
 
-    // access formulas
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        accesses[access_idx]
-            .name(name() + "." + cstr + "_accesses")
-            .desc("number of " + cstr + " accesses(hits+misses)")
-            .flags(total | nozero | nonan)
-            ;
-        accesses[access_idx] = hits[access_idx] + misses[access_idx];
-
-        for (int i = 0; i < system->maxMasters(); i++) {
-            accesses[access_idx].subname(i, system->getMasterName(i));
-        }
+    // MSHR miss rate formulas
+    mshrMissRate.flags(total | nozero | nonan);
+    mshrMissRate = mshr_misses / accesses;
+
+    for (int i = 0; i < max_requestors; i++) {
+        mshrMissRate.subname(i, system->getRequestorName(i));
     }
 
-    demandAccesses
-        .name(name() + ".demand_accesses")
-        .desc("number of demand (read+write) accesses")
-        .flags(total | nozero | nonan)
-        ;
-    demandAccesses = demandHits + demandMisses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandAccesses.subname(i, system->getMasterName(i));
+    // mshrMiss latency formulas
+    avgMshrMissLatency.flags(total | nozero | nonan);
+    avgMshrMissLatency = mshr_miss_latency / mshr_misses;
+    for (int i = 0; i < max_requestors; i++) {
+        avgMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
-    overallAccesses
-        .name(name() + ".overall_accesses")
-        .desc("number of overall (read+write) accesses")
-        .flags(total | nozero | nonan)
-        ;
-    overallAccesses = overallHits + overallMisses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallAccesses.subname(i, system->getMasterName(i));
+    // mshrUncacheable latency formulas
+    avgMshrUncacheableLatency.flags(total | nozero | nonan);
+    avgMshrUncacheableLatency = mshr_uncacheable_lat / mshr_uncacheable;
+    for (int i = 0; i < max_requestors; i++) {
+        avgMshrUncacheableLatency.subname(i, system->getRequestorName(i));
     }
+}
 
-    // miss rate formulas
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        missRate[access_idx]
-            .name(name() + "." + cstr + "_miss_rate")
-            .desc("miss rate for " + cstr + " accesses")
-            .flags(total | nozero | nonan)
-            ;
-        missRate[access_idx] = misses[access_idx] / accesses[access_idx];
-
-        for (int i = 0; i < system->maxMasters(); i++) {
-            missRate[access_idx].subname(i, system->getMasterName(i));
-        }
+BaseCache::CacheStats::CacheStats(BaseCache &c)
+    : Stats::Group(&c), cache(c),
+
+    demandHits(this, "demand_hits", "number of demand (read+write) hits"),
+
+    overallHits(this, "overall_hits", "number of overall hits"),
+    demandMisses(this, "demand_misses",
+                 "number of demand (read+write) misses"),
+    overallMisses(this, "overall_misses", "number of overall misses"),
+    demandMissLatency(this, "demand_miss_latency",
+                      "number of demand (read+write) miss cycles"),
+    overallMissLatency(this, "overall_miss_latency",
+                       "number of overall miss cycles"),
+    demandAccesses(this, "demand_accesses",
+                   "number of demand (read+write) accesses"),
+    overallAccesses(this, "overall_accesses",
+                    "number of overall (read+write) accesses"),
+    demandMissRate(this, "demand_miss_rate",
+                   "miss rate for demand accesses"),
+    overallMissRate(this, "overall_miss_rate",
+                    "miss rate for overall accesses"),
+    demandAvgMissLatency(this, "demand_avg_miss_latency",
+                         "average overall miss latency"),
+    overallAvgMissLatency(this, "overall_avg_miss_latency",
+                          "average overall miss latency"),
+    blocked_cycles(this, "blocked_cycles",
+                   "number of cycles access was blocked"),
+    blocked_causes(this, "blocked", "number of cycles access was blocked"),
+    avg_blocked(this, "avg_blocked_cycles",
+                "average number of cycles each access was blocked"),
+    unusedPrefetches(this, "unused_prefetches",
+                     "number of HardPF blocks evicted w/o reference"),
+    writebacks(this, "writebacks", "number of writebacks"),
+    demandMshrHits(this, "demand_mshr_hits",
+                   "number of demand (read+write) MSHR hits"),
+    overallMshrHits(this, "overall_mshr_hits",
+                    "number of overall MSHR hits"),
+    demandMshrMisses(this, "demand_mshr_misses",
+                     "number of demand (read+write) MSHR misses"),
+    overallMshrMisses(this, "overall_mshr_misses",
+                      "number of overall MSHR misses"),
+    overallMshrUncacheable(this, "overall_mshr_uncacheable_misses",
+                           "number of overall MSHR uncacheable misses"),
+    demandMshrMissLatency(this, "demand_mshr_miss_latency",
+                          "number of demand (read+write) MSHR miss cycles"),
+    overallMshrMissLatency(this, "overall_mshr_miss_latency",
+                           "number of overall MSHR miss cycles"),
+    overallMshrUncacheableLatency(this, "overall_mshr_uncacheable_latency",
+                                  "number of overall MSHR uncacheable cycles"),
+    demandMshrMissRate(this, "demand_mshr_miss_rate",
+                       "mshr miss rate for demand accesses"),
+    overallMshrMissRate(this, "overall_mshr_miss_rate",
+                        "mshr miss rate for overall accesses"),
+    demandAvgMshrMissLatency(this, "demand_avg_mshr_miss_latency",
+                             "average overall mshr miss latency"),
+    overallAvgMshrMissLatency(this, "overall_avg_mshr_miss_latency",
+                              "average overall mshr miss latency"),
+    overallAvgMshrUncacheableLatency(
+        this, "overall_avg_mshr_uncacheable_latency",
+        "average overall mshr uncacheable latency"),
+    replacements(this, "replacements", "number of replacements"),
+
+    dataExpansions(this, "data_expansions", "number of data expansions"),
+    cmd(MemCmd::NUM_MEM_CMDS)
+{
+    for (int idx = 0; idx < MemCmd::NUM_MEM_CMDS; ++idx)
+        cmd[idx].reset(new CacheCmdStats(c, MemCmd(idx).toString()));
+}
+
+void
+BaseCache::CacheStats::regStats()
+{
+    using namespace Stats;
+
+    Stats::Group::regStats();
+
+    System *system = cache.system;
+    const auto max_requestors = system->maxRequestors();
+
+    for (auto &cs : cmd)
+        cs->regStatsFromParent();
+
+// These macros make it easier to sum the right subset of commands and
+// to change the subset of commands that are considered "demand" vs
+// "non-demand"
+#define SUM_DEMAND(s)                                                   \
+    (cmd[MemCmd::ReadReq]->s + cmd[MemCmd::WriteReq]->s +               \
+     cmd[MemCmd::WriteLineReq]->s + cmd[MemCmd::ReadExReq]->s +         \
+     cmd[MemCmd::ReadCleanReq]->s + cmd[MemCmd::ReadSharedReq]->s)
+
+// should writebacks be included here?  prior code was inconsistent...
+#define SUM_NON_DEMAND(s)                                       \
+    (cmd[MemCmd::SoftPFReq]->s + cmd[MemCmd::HardPFReq]->s +    \
+     cmd[MemCmd::SoftPFExReq]->s)
+
+    demandHits.flags(total | nozero | nonan);
+    demandHits = SUM_DEMAND(hits);
+    for (int i = 0; i < max_requestors; i++) {
+        demandHits.subname(i, system->getRequestorName(i));
     }
 
-    demandMissRate
-        .name(name() + ".demand_miss_rate")
-        .desc("miss rate for demand accesses")
-        .flags(total | nozero | nonan)
-        ;
-    demandMissRate = demandMisses / demandAccesses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandMissRate.subname(i, system->getMasterName(i));
+    overallHits.flags(total | nozero | nonan);
+    overallHits = demandHits + SUM_NON_DEMAND(hits);
+    for (int i = 0; i < max_requestors; i++) {
+        overallHits.subname(i, system->getRequestorName(i));
     }
 
-    overallMissRate
-        .name(name() + ".overall_miss_rate")
-        .desc("miss rate for overall accesses")
-        .flags(total | nozero | nonan)
-        ;
-    overallMissRate = overallMisses / overallAccesses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallMissRate.subname(i, system->getMasterName(i));
+    demandMisses.flags(total | nozero | nonan);
+    demandMisses = SUM_DEMAND(misses);
+    for (int i = 0; i < max_requestors; i++) {
+        demandMisses.subname(i, system->getRequestorName(i));
     }
 
-    // miss latency formulas
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        avgMissLatency[access_idx]
-            .name(name() + "." + cstr + "_avg_miss_latency")
-            .desc("average " + cstr + " miss latency")
-            .flags(total | nozero | nonan)
-            ;
-        avgMissLatency[access_idx] =
-            missLatency[access_idx] / misses[access_idx];
-
-        for (int i = 0; i < system->maxMasters(); i++) {
-            avgMissLatency[access_idx].subname(i, system->getMasterName(i));
-        }
+    overallMisses.flags(total | nozero | nonan);
+    overallMisses = demandMisses + SUM_NON_DEMAND(misses);
+    for (int i = 0; i < max_requestors; i++) {
+        overallMisses.subname(i, system->getRequestorName(i));
     }
 
-    demandAvgMissLatency
-        .name(name() + ".demand_avg_miss_latency")
-        .desc("average overall miss latency")
-        .flags(total | nozero | nonan)
-        ;
+    demandMissLatency.flags(total | nozero | nonan);
+    demandMissLatency = SUM_DEMAND(missLatency);
+    for (int i = 0; i < max_requestors; i++) {
+        demandMissLatency.subname(i, system->getRequestorName(i));
+    }
+
+    overallMissLatency.flags(total | nozero | nonan);
+    overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
+    for (int i = 0; i < max_requestors; i++) {
+        overallMissLatency.subname(i, system->getRequestorName(i));
+    }
+
+    demandAccesses.flags(total | nozero | nonan);
+    demandAccesses = demandHits + demandMisses;
+    for (int i = 0; i < max_requestors; i++) {
+        demandAccesses.subname(i, system->getRequestorName(i));
+    }
+
+    overallAccesses.flags(total | nozero | nonan);
+    overallAccesses = overallHits + overallMisses;
+    for (int i = 0; i < max_requestors; i++) {
+        overallAccesses.subname(i, system->getRequestorName(i));
+    }
+
+    demandMissRate.flags(total | nozero | nonan);
+    demandMissRate = demandMisses / demandAccesses;
+    for (int i = 0; i < max_requestors; i++) {
+        demandMissRate.subname(i, system->getRequestorName(i));
+    }
+
+    overallMissRate.flags(total | nozero | nonan);
+    overallMissRate = overallMisses / overallAccesses;
+    for (int i = 0; i < max_requestors; i++) {
+        overallMissRate.subname(i, system->getRequestorName(i));
+    }
+
+    demandAvgMissLatency.flags(total | nozero | nonan);
     demandAvgMissLatency = demandMissLatency / demandMisses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandAvgMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandAvgMissLatency.subname(i, system->getRequestorName(i));
     }
 
-    overallAvgMissLatency
-        .name(name() + ".overall_avg_miss_latency")
-        .desc("average overall miss latency")
-        .flags(total | nozero | nonan)
-        ;
+    overallAvgMissLatency.flags(total | nozero | nonan);
     overallAvgMissLatency = overallMissLatency / overallMisses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallAvgMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallAvgMissLatency.subname(i, system->getRequestorName(i));
     }
 
     blocked_cycles.init(NUM_BLOCKED_CAUSES);
     blocked_cycles
-        .name(name() + ".blocked_cycles")
-        .desc("number of cycles access was blocked")
         .subname(Blocked_NoMSHRs, "no_mshrs")
         .subname(Blocked_NoTargets, "no_targets")
         ;
@@ -2131,352 +2163,111 @@ BaseCache::regStats()
 
     blocked_causes.init(NUM_BLOCKED_CAUSES);
     blocked_causes
-        .name(name() + ".blocked")
-        .desc("number of cycles access was blocked")
         .subname(Blocked_NoMSHRs, "no_mshrs")
         .subname(Blocked_NoTargets, "no_targets")
         ;
 
     avg_blocked
-        .name(name() + ".avg_blocked_cycles")
-        .desc("average number of cycles each access was blocked")
         .subname(Blocked_NoMSHRs, "no_mshrs")
         .subname(Blocked_NoTargets, "no_targets")
         ;
-
     avg_blocked = blocked_cycles / blocked_causes;
 
-    unusedPrefetches
-        .name(name() + ".unused_prefetches")
-        .desc("number of HardPF blocks evicted w/o reference")
-        .flags(nozero)
-        ;
+    unusedPrefetches.flags(nozero);
 
     writebacks
-        .init(system->maxMasters())
-        .name(name() + ".writebacks")
-        .desc("number of writebacks")
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        writebacks.subname(i, system->getMasterName(i));
-    }
-
-    // MSHR statistics
-    // MSHR hit statistics
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        mshr_hits[access_idx]
-            .init(system->maxMasters())
-            .name(name() + "." + cstr + "_mshr_hits")
-            .desc("number of " + cstr + " MSHR hits")
-            .flags(total | nozero | nonan)
-            ;
-        for (int i = 0; i < system->maxMasters(); i++) {
-            mshr_hits[access_idx].subname(i, system->getMasterName(i));
-        }
+    for (int i = 0; i < max_requestors; i++) {
+        writebacks.subname(i, system->getRequestorName(i));
     }
 
-    demandMshrHits
-        .name(name() + ".demand_mshr_hits")
-        .desc("number of demand (read+write) MSHR hits")
-        .flags(total | nozero | nonan)
-        ;
+    demandMshrHits.flags(total | nozero | nonan);
     demandMshrHits = SUM_DEMAND(mshr_hits);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandMshrHits.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMshrHits.subname(i, system->getRequestorName(i));
     }
 
-    overallMshrHits
-        .name(name() + ".overall_mshr_hits")
-        .desc("number of overall MSHR hits")
-        .flags(total | nozero | nonan)
-        ;
+    overallMshrHits.flags(total | nozero | nonan);
     overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallMshrHits.subname(i, system->getMasterName(i));
-    }
-
-    // MSHR miss statistics
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        mshr_misses[access_idx]
-            .init(system->maxMasters())
-            .name(name() + "." + cstr + "_mshr_misses")
-            .desc("number of " + cstr + " MSHR misses")
-            .flags(total | nozero | nonan)
-            ;
-        for (int i = 0; i < system->maxMasters(); i++) {
-            mshr_misses[access_idx].subname(i, system->getMasterName(i));
-        }
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrHits.subname(i, system->getRequestorName(i));
     }
 
-    demandMshrMisses
-        .name(name() + ".demand_mshr_misses")
-        .desc("number of demand (read+write) MSHR misses")
-        .flags(total | nozero | nonan)
-        ;
+    demandMshrMisses.flags(total | nozero | nonan);
     demandMshrMisses = SUM_DEMAND(mshr_misses);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandMshrMisses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMshrMisses.subname(i, system->getRequestorName(i));
     }
 
-    overallMshrMisses
-        .name(name() + ".overall_mshr_misses")
-        .desc("number of overall MSHR misses")
-        .flags(total | nozero | nonan)
-        ;
+    overallMshrMisses.flags(total | nozero | nonan);
     overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallMshrMisses.subname(i, system->getMasterName(i));
-    }
-
-    // MSHR miss latency statistics
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        mshr_miss_latency[access_idx]
-            .init(system->maxMasters())
-            .name(name() + "." + cstr + "_mshr_miss_latency")
-            .desc("number of " + cstr + " MSHR miss cycles")
-            .flags(total | nozero | nonan)
-            ;
-        for (int i = 0; i < system->maxMasters(); i++) {
-            mshr_miss_latency[access_idx].subname(i, system->getMasterName(i));
-        }
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrMisses.subname(i, system->getRequestorName(i));
     }
 
-    demandMshrMissLatency
-        .name(name() + ".demand_mshr_miss_latency")
-        .desc("number of demand (read+write) MSHR miss cycles")
-        .flags(total | nozero | nonan)
-        ;
+    demandMshrMissLatency.flags(total | nozero | nonan);
     demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandMshrMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
-    overallMshrMissLatency
-        .name(name() + ".overall_mshr_miss_latency")
-        .desc("number of overall MSHR miss cycles")
-        .flags(total | nozero | nonan)
-        ;
+    overallMshrMissLatency.flags(total | nozero | nonan);
     overallMshrMissLatency =
         demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallMshrMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
-    // MSHR uncacheable statistics
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        mshr_uncacheable[access_idx]
-            .init(system->maxMasters())
-            .name(name() + "." + cstr + "_mshr_uncacheable")
-            .desc("number of " + cstr + " MSHR uncacheable")
-            .flags(total | nozero | nonan)
-            ;
-        for (int i = 0; i < system->maxMasters(); i++) {
-            mshr_uncacheable[access_idx].subname(i, system->getMasterName(i));
-        }
-    }
-
-    overallMshrUncacheable
-        .name(name() + ".overall_mshr_uncacheable_misses")
-        .desc("number of overall MSHR uncacheable misses")
-        .flags(total | nozero | nonan)
-        ;
+    overallMshrUncacheable.flags(total | nozero | nonan);
     overallMshrUncacheable =
         SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallMshrUncacheable.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrUncacheable.subname(i, system->getRequestorName(i));
     }
 
-    // MSHR miss latency statistics
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        mshr_uncacheable_lat[access_idx]
-            .init(system->maxMasters())
-            .name(name() + "." + cstr + "_mshr_uncacheable_latency")
-            .desc("number of " + cstr + " MSHR uncacheable cycles")
-            .flags(total | nozero | nonan)
-            ;
-        for (int i = 0; i < system->maxMasters(); i++) {
-            mshr_uncacheable_lat[access_idx].subname(
-                i, system->getMasterName(i));
-        }
-    }
 
-    overallMshrUncacheableLatency
-        .name(name() + ".overall_mshr_uncacheable_latency")
-        .desc("number of overall MSHR uncacheable cycles")
-        .flags(total | nozero | nonan)
-        ;
+    overallMshrUncacheableLatency.flags(total | nozero | nonan);
     overallMshrUncacheableLatency =
         SUM_DEMAND(mshr_uncacheable_lat) +
         SUM_NON_DEMAND(mshr_uncacheable_lat);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrUncacheableLatency.subname(i, system->getRequestorName(i));
     }
 
-#if 0
-    // MSHR access formulas
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        mshrAccesses[access_idx]
-            .name(name() + "." + cstr + "_mshr_accesses")
-            .desc("number of " + cstr + " mshr accesses(hits+misses)")
-            .flags(total | nozero | nonan)
-            ;
-        mshrAccesses[access_idx] =
-            mshr_hits[access_idx] + mshr_misses[access_idx]
-            + mshr_uncacheable[access_idx];
-    }
-
-    demandMshrAccesses
-        .name(name() + ".demand_mshr_accesses")
-        .desc("number of demand (read+write) mshr accesses")
-        .flags(total | nozero | nonan)
-        ;
-    demandMshrAccesses = demandMshrHits + demandMshrMisses;
-
-    overallMshrAccesses
-        .name(name() + ".overall_mshr_accesses")
-        .desc("number of overall (read+write) mshr accesses")
-        .flags(total | nozero | nonan)
-        ;
-    overallMshrAccesses = overallMshrHits + overallMshrMisses
-        + overallMshrUncacheable;
-#endif
-
-    // MSHR miss rate formulas
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        mshrMissRate[access_idx]
-            .name(name() + "." + cstr + "_mshr_miss_rate")
-            .desc("mshr miss rate for " + cstr + " accesses")
-            .flags(total | nozero | nonan)
-            ;
-        mshrMissRate[access_idx] =
-            mshr_misses[access_idx] / accesses[access_idx];
-
-        for (int i = 0; i < system->maxMasters(); i++) {
-            mshrMissRate[access_idx].subname(i, system->getMasterName(i));
-        }
-    }
-
-    demandMshrMissRate
-        .name(name() + ".demand_mshr_miss_rate")
-        .desc("mshr miss rate for demand accesses")
-        .flags(total | nozero | nonan)
-        ;
+    demandMshrMissRate.flags(total | nozero | nonan);
     demandMshrMissRate = demandMshrMisses / demandAccesses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandMshrMissRate.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMshrMissRate.subname(i, system->getRequestorName(i));
     }
 
-    overallMshrMissRate
-        .name(name() + ".overall_mshr_miss_rate")
-        .desc("mshr miss rate for overall accesses")
-        .flags(total | nozero | nonan)
-        ;
+    overallMshrMissRate.flags(total | nozero | nonan);
     overallMshrMissRate = overallMshrMisses / overallAccesses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallMshrMissRate.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrMissRate.subname(i, system->getRequestorName(i));
     }
 
-    // mshrMiss latency formulas
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        avgMshrMissLatency[access_idx]
-            .name(name() + "." + cstr + "_avg_mshr_miss_latency")
-            .desc("average " + cstr + " mshr miss latency")
-            .flags(total | nozero | nonan)
-            ;
-        avgMshrMissLatency[access_idx] =
-            mshr_miss_latency[access_idx] / mshr_misses[access_idx];
-
-        for (int i = 0; i < system->maxMasters(); i++) {
-            avgMshrMissLatency[access_idx].subname(
-                i, system->getMasterName(i));
-        }
-    }
-
-    demandAvgMshrMissLatency
-        .name(name() + ".demand_avg_mshr_miss_latency")
-        .desc("average overall mshr miss latency")
-        .flags(total | nozero | nonan)
-        ;
+    demandAvgMshrMissLatency.flags(total | nozero | nonan);
     demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandAvgMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
-    overallAvgMshrMissLatency
-        .name(name() + ".overall_avg_mshr_miss_latency")
-        .desc("average overall mshr miss latency")
-        .flags(total | nozero | nonan)
-        ;
+    overallAvgMshrMissLatency.flags(total | nozero | nonan);
     overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
-    }
-
-    // mshrUncacheable latency formulas
-    for (int access_idx = 0; access_idx < MemCmd::NUM_MEM_CMDS; ++access_idx) {
-        MemCmd cmd(access_idx);
-        const string &cstr = cmd.toString();
-
-        avgMshrUncacheableLatency[access_idx]
-            .name(name() + "." + cstr + "_avg_mshr_uncacheable_latency")
-            .desc("average " + cstr + " mshr uncacheable latency")
-            .flags(total | nozero | nonan)
-            ;
-        avgMshrUncacheableLatency[access_idx] =
-            mshr_uncacheable_lat[access_idx] / mshr_uncacheable[access_idx];
-
-        for (int i = 0; i < system->maxMasters(); i++) {
-            avgMshrUncacheableLatency[access_idx].subname(
-                i, system->getMasterName(i));
-        }
+    for (int i = 0; i < max_requestors; i++) {
+        overallAvgMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
-    overallAvgMshrUncacheableLatency
-        .name(name() + ".overall_avg_mshr_uncacheable_latency")
-        .desc("average overall mshr uncacheable latency")
-        .flags(total | nozero | nonan)
-        ;
+    overallAvgMshrUncacheableLatency.flags(total | nozero | nonan);
     overallAvgMshrUncacheableLatency =
         overallMshrUncacheableLatency / overallMshrUncacheable;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallAvgMshrUncacheableLatency.subname(i, system->getRequestorName(i));
     }
 
-    replacements
-        .name(name() + ".replacements")
-        .desc("number of replacements")
-        ;
-
-    dataExpansions
-        .name(name() + ".data_expansions")
-        .desc("number of data expansions")
-        .flags(nozero | nonan)
-        ;
+    dataExpansions.flags(nozero | nonan);
 }
 
 void
@@ -2500,7 +2291,7 @@ BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt)
 
     assert(pkt->isResponse());
 
-    // Express snoop responses from master to slave, e.g., from L1 to L2
+    // Express snoop responses from requestor to responder, e.g., from L1 to L2
     cache->recvTimingSnoopResp(pkt);
     return true;
 }
@@ -2574,7 +2365,7 @@ BaseCache::CpuSidePort::getAddrRanges() const
 BaseCache::
 CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
                          const std::string &_label)
-    : CacheSlavePort(_name, _cache, _label), cache(_cache)
+    : CacheResponsePort(_name, _cache, _label), cache(_cache)
 {
 }
 
@@ -2661,7 +2452,7 @@ BaseCache::CacheReqPacketQueue::sendDeferredPacket()
 BaseCache::MemSidePort::MemSidePort(const std::string &_name,
                                     BaseCache *_cache,
                                     const std::string &_label)
-    : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
+    : CacheRequestPort(_name, _cache, _reqQueue, _snoopRespQueue),
       _reqQueue(*_cache, *this, _snoopRespQueue, _label),
       _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
 {