mem: Fix/Improve stats in src/mem
[gem5.git] / src / mem / cache / cache.cc
index 3bb2667af7d9e84bb110734287e83a70819c6d4d..4bec7de38e5c9a6bb93faee6ba57229fc02c7ea2 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2010-2018 ARM Limited
+ * Copyright (c) 2010-2019 ARM Limited
  * All rights reserved.
  *
  * The license below extends only to copyright in the software and shall
  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Authors: Erik Hallnor
- *          Dave Greene
- *          Nathan Binkert
- *          Steve Reinhardt
- *          Ron Dreslinski
- *          Andreas Sandberg
- *          Nikos Nikoleris
  */
 
 /**
 #include "mem/request.hh"
 #include "params/Cache.hh"
 
-Cache::Cache(const CacheParams *p)
-    : BaseCache(p, p->system->cacheLineSize()),
+Cache::Cache(const CacheParams &p)
+    : BaseCache(p, p.system->cacheLineSize()),
       doFastWrites(true)
 {
+    assert(p.tags);
+    assert(p.replacement_policy);
 }
 
 void
@@ -97,14 +91,14 @@ Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
 
                 // if we have a dirty copy, make sure the recipient
                 // keeps it marked dirty (in the modified state)
-                if (blk->isDirty()) {
+                if (blk->isSet(CacheBlk::DirtyBit)) {
                     pkt->setCacheResponding();
-                    blk->status &= ~BlkDirty;
+                    blk->clearCoherenceBits(CacheBlk::DirtyBit);
                 }
-            } else if (blk->isWritable() && !pending_downgrade &&
-                       !pkt->hasSharers() &&
-                       pkt->cmd != MemCmd::ReadCleanReq) {
-                // we can give the requester a writable copy on a read
+            } else if (blk->isSet(CacheBlk::WritableBit) &&
+                !pending_downgrade && !pkt->hasSharers() &&
+                pkt->cmd != MemCmd::ReadCleanReq) {
+                // we can give the requestor a writable copy on a read
                 // request if:
                 // - we have a writable copy at this level (& below)
                 // - we don't have a pending snoop from below
@@ -114,7 +108,7 @@ Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
                 //   snooping the packet)
                 // - the read has explicitly asked for a clean
                 //   copy of the line
-                if (blk->isDirty()) {
+                if (blk->isSet(CacheBlk::DirtyBit)) {
                     // special considerations if we're owner:
                     if (!deferred_response) {
                         // respond with the line in Modified state
@@ -136,7 +130,7 @@ Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk,
                         // the cache hierarchy through a cache,
                         // and first snoop upwards in all other
                         // branches
-                        blk->status &= ~BlkDirty;
+                        blk->clearCoherenceBits(CacheBlk::DirtyBit);
                     } else {
                         // if we're responding after our own miss,
                         // there's a window where the recipient didn't
@@ -305,7 +299,8 @@ Cache::promoteWholeLineWrites(PacketPtr pkt)
 {
     // Cache line clearing instructions
     if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
-        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
+        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) &&
+        !pkt->isMaskedWrite()) {
         pkt->cmd = MemCmd::WriteLineReq;
         DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
     }
@@ -333,7 +328,7 @@ Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
         // should have flushed and have no valid block
         assert(!blk || !blk->isValid());
 
-        mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
+        stats.cmdStats(pkt).mshrUncacheable[pkt->req->requestorId()]++;
 
         if (pkt->isWrite()) {
             allocateWriteBuffer(pkt, forward_time);
@@ -378,12 +373,12 @@ Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
         if (!mshr) {
             // copy the request and create a new SoftPFReq packet
             RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
-                                                       pkt->req->getSize(),
-                                                       pkt->req->getFlags(),
-                                                       pkt->req->masterId());
+                                                    pkt->req->getSize(),
+                                                    pkt->req->getFlags(),
+                                                    pkt->req->requestorId());
             pf = new Packet(req, pkt->cmd);
             pf->allocate();
-            assert(pf->getAddr() == pkt->getAddr());
+            assert(pf->matchAddr(pkt));
             assert(pf->getSize() == pkt->getSize());
         }
 
@@ -391,7 +386,7 @@ Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
 
         // request_time is used here, taking into account lat and the delay
         // charged if the packet comes from the xbar.
-        cpuSidePort.schedTimingResp(pkt, request_time, true);
+        cpuSidePort.schedTimingResp(pkt, request_time);
 
         // If an outstanding request is in progress (we found an
         // MSHR) this is set to null
@@ -454,7 +449,7 @@ Cache::recvTimingReq(PacketPtr pkt)
         // this express snoop travels towards the memory, and at
         // every crossbar it is snooped upwards thus reaching
         // every cache in the system
-        bool M5_VAR_USED success = memSidePort.sendTimingReq(snoop_pkt);
+        M5_VAR_USED bool success = memSidePort.sendTimingReq(snoop_pkt);
         // express snoops always succeed
         assert(success);
 
@@ -503,7 +498,7 @@ Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
     const bool useUpgrades = true;
     assert(cpu_pkt->cmd != MemCmd::WriteLineReq || is_whole_line_write);
     if (is_whole_line_write) {
-        assert(!blkValid || !blk->isWritable());
+        assert(!blkValid || !blk->isSet(CacheBlk::WritableBit));
         // forward as invalidate to all other caches, this gives us
         // the line in Exclusive state, and invalidates all other
         // copies
@@ -512,7 +507,7 @@ Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
         // only reason to be here is that blk is read only and we need
         // it to be writable
         assert(needsWritable);
-        assert(!blk->isWritable());
+        assert(!blk->isSet(CacheBlk::WritableBit));
         cmd = cpu_pkt->isLLSC() ? MemCmd::SCUpgradeReq : MemCmd::UpgradeReq;
     } else if (cpu_pkt->cmd == MemCmd::SCUpgradeFailReq ||
                cpu_pkt->cmd == MemCmd::StoreCondFailReq) {
@@ -597,7 +592,7 @@ Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
             bus_pkt->print());
 
 #if TRACING_ON
-    CacheBlk::State old_state = blk ? blk->status : 0;
+    const std::string old_state = blk ? blk->print() : "";
 #endif
 
     Cycles latency = ticksToCycles(memSidePort.sendAtomic(bus_pkt));
@@ -605,7 +600,7 @@ Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *&blk,
     bool is_invalidate = bus_pkt->isInvalidate();
 
     // We are now dealing with the response handling
-    DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
+    DPRINTF(Cache, "%s: Receive response: %s for %s\n", __func__,
             bus_pkt->print(), old_state);
 
     // If packet was a forward, the response (if any) is already
@@ -658,6 +653,21 @@ Cache::recvAtomic(PacketPtr pkt)
 {
     promoteWholeLineWrites(pkt);
 
+    // follow the same flow as in recvTimingReq, and check if a cache
+    // above us is responding
+    if (pkt->cacheResponding()) {
+        assert(!pkt->req->isCacheInvalidate());
+        DPRINTF(Cache, "Cache above responding to %s: not responding\n",
+                pkt->print());
+
+        // if a cache is responding, and it had the line in Owned
+        // rather than Modified state, we need to invalidate any
+        // copies that are not on the same path to memory
+        assert(pkt->needsWritable() && !pkt->responderHadWritable());
+
+        return memSidePort.sendAtomic(pkt);
+    }
+
     return BaseCache::recvAtomic(pkt);
 }
 
@@ -670,10 +680,9 @@ Cache::recvAtomic(PacketPtr pkt)
 
 
 void
-Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
-                          PacketList &writebacks)
+Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk)
 {
-    MSHR::Target *initial_tgt = mshr->getTarget();
+    QueueEntry::Target *initial_tgt = mshr->getTarget();
     // First offset for critical word first calculations
     const int initial_offset = initial_tgt->pkt->getOffset(blkSize);
 
@@ -695,6 +704,32 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
 
             // Software prefetch handling for cache closest to core
             if (tgt_pkt->cmd.isSWPrefetch()) {
+                if (tgt_pkt->needsWritable()) {
+                    // All other copies of the block were invalidated and we
+                    // have an exclusive copy.
+
+                    // The coherence protocol assumes that if we fetched an
+                    // exclusive copy of the block, we have the intention to
+                    // modify it. Therefore the MSHR for the PrefetchExReq has
+                    // been the point of ordering and this cache has commited
+                    // to respond to snoops for the block.
+                    //
+                    // In most cases this is true anyway - a PrefetchExReq
+                    // will be followed by a WriteReq. However, if that
+                    // doesn't happen, the block is not marked as dirty and
+                    // the cache doesn't respond to snoops that has committed
+                    // to do so.
+                    //
+                    // To avoid deadlocks in cases where there is a snoop
+                    // between the PrefetchExReq and the expected WriteReq, we
+                    // proactively mark the block as Dirty.
+                    assert(blk);
+                    blk->setCoherenceBits(CacheBlk::DirtyBit);
+
+                    panic_if(isReadOnly, "Prefetch exclusive requests from "
+                            "read-only cache %s\n", name());
+                }
+
                 // a software prefetch would have already been ack'd
                 // immediately with dummy data so the core would be able to
                 // retire it. This request completes right here, so we
@@ -712,10 +747,17 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
             if (tgt_pkt->cmd == MemCmd::WriteLineReq) {
                 assert(!is_error);
                 assert(blk);
-                assert(blk->isWritable());
+                assert(blk->isSet(CacheBlk::WritableBit));
             }
 
-            if (blk && blk->isValid() && !mshr->isForward) {
+            // Here we decide whether we will satisfy the target using
+            // data from the block or from the response. We use the
+            // block data to satisfy the request when the block is
+            // present and valid and in addition the response in not
+            // forwarding data to the cache above (we didn't fill
+            // either); otherwise we use the packet data.
+            if (blk && blk->isValid() &&
+                (!mshr->isForward || !pkt->hasData())) {
                 satisfyRequest(tgt_pkt, blk, true, mshr->hasPostDowngrade());
 
                 // How many bytes past the first request is this one
@@ -734,8 +776,9 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
 
                 assert(!tgt_pkt->req->isUncacheable());
 
-                assert(tgt_pkt->req->masterId() < system->maxMasters());
-                missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
+                assert(tgt_pkt->req->requestorId() < system->maxRequestors());
+                stats.cmdStats(tgt_pkt)
+                    .missLatency[tgt_pkt->req->requestorId()] +=
                     completion_time - target.recvTime;
             } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
                 // failed StoreCond upgrade
@@ -749,15 +792,15 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
                     pkt->payloadDelay;
                 tgt_pkt->req->setExtraData(0);
             } else {
-                // We are about to send a response to a cache above
-                // that asked for an invalidation; we need to
-                // invalidate our copy immediately as the most
-                // up-to-date copy of the block will now be in the
-                // cache above. It will also prevent this cache from
-                // responding (if the block was previously dirty) to
-                // snoops as they should snoop the caches above where
-                // they will get the response from.
                 if (is_invalidate && blk && blk->isValid()) {
+                    // We are about to send a response to a cache above
+                    // that asked for an invalidation; we need to
+                    // invalidate our copy immediately as the most
+                    // up-to-date copy of the block will now be in the
+                    // cache above. It will also prevent this cache from
+                    // responding (if the block was previously dirty) to
+                    // snoops as they should snoop the caches above where
+                    // they will get the response from.
                     invalidateBlock(blk);
                 }
                 // not a cache fill, just forwarding response
@@ -765,13 +808,28 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
                 // from lower level cahces/memory to the core.
                 completion_time += clockEdge(responseLatency) +
                     pkt->payloadDelay;
-                if (pkt->isRead() && !is_error) {
-                    // sanity check
-                    assert(pkt->getAddr() == tgt_pkt->getAddr());
-                    assert(pkt->getSize() >= tgt_pkt->getSize());
+                if (!is_error) {
+                    if (pkt->isRead()) {
+                        // sanity check
+                        assert(pkt->matchAddr(tgt_pkt));
+                        assert(pkt->getSize() >= tgt_pkt->getSize());
 
-                    tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
+                        tgt_pkt->setData(pkt->getConstPtr<uint8_t>());
+                    } else {
+                        // MSHR targets can read data either from the
+                        // block or the response pkt. If we can't get data
+                        // from the block (i.e., invalid or has old data)
+                        // or the response (did not bring in any data)
+                        // then make sure that the target didn't expect
+                        // any.
+                        assert(!tgt_pkt->hasRespData());
+                    }
                 }
+
+                // this response did not allocate here and therefore
+                // it was not consumed, make sure that any flags are
+                // carried over to cache above
+                tgt_pkt->copyResponderFlags(pkt);
             }
             tgt_pkt->makeTimingResponse();
             // if this packet is an error copy that to the new packet
@@ -788,13 +846,13 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
             }
             // Reset the bus additional time as it is now accounted for
             tgt_pkt->headerDelay = tgt_pkt->payloadDelay = 0;
-            cpuSidePort.schedTimingResp(tgt_pkt, completion_time, true);
+            cpuSidePort.schedTimingResp(tgt_pkt, completion_time);
             break;
 
           case MSHR::Target::FromPrefetcher:
             assert(tgt_pkt->cmd == MemCmd::HardPFReq);
             if (blk)
-                blk->status |= BlkHWPrefetched;
+                blk->setPrefetched();
             delete tgt_pkt;
             break;
 
@@ -832,7 +890,7 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
         if (is_invalidate || mshr->hasPostInvalidate()) {
             invalidateBlock(blk);
         } else if (mshr->hasPostDowngrade()) {
-            blk->status &= ~BlkWritable;
+            blk->clearCoherenceBits(CacheBlk::WritableBit);
         }
     }
 }
@@ -840,7 +898,7 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
 PacketPtr
 Cache::evictBlock(CacheBlk *blk)
 {
-    PacketPtr pkt = (blk->isDirty() || writebackClean) ?
+    PacketPtr pkt = (blk->isSet(CacheBlk::DirtyBit) || writebackClean) ?
         writebackBlk(blk) : cleanEvictBlk(blk);
 
     invalidateBlock(blk);
@@ -852,16 +910,16 @@ PacketPtr
 Cache::cleanEvictBlk(CacheBlk *blk)
 {
     assert(!writebackClean);
-    assert(blk && blk->isValid() && !blk->isDirty());
+    assert(blk && blk->isValid() && !blk->isSet(CacheBlk::DirtyBit));
 
     // Creating a zero sized write, a message to the snoop filter
     RequestPtr req = std::make_shared<Request>(
-        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+        regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId);
 
     if (blk->isSecure())
         req->setFlags(Request::SECURE);
 
-    req->taskId(blk->task_id);
+    req->taskId(blk->getTaskId());
 
     PacketPtr pkt = new Packet(req, MemCmd::CleanEvict);
     pkt->allocate();
@@ -918,7 +976,7 @@ Cache::doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
     pkt->headerDelay = pkt->payloadDelay = 0;
     DPRINTF(CacheVerbose, "%s: created response: %s tick: %lu\n", __func__,
             pkt->print(), forward_time);
-    memSidePort.schedTimingSnoopResp(pkt, forward_time, true);
+    memSidePort.schedTimingSnoopResp(pkt, forward_time);
 }
 
 uint32_t
@@ -936,7 +994,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
     // responds in atomic mode, so remember a few things about the
     // original packet up front
     bool invalidate = pkt->isInvalidate();
-    bool M5_VAR_USED needs_writable = pkt->needsWritable();
+    M5_VAR_USED bool needs_writable = pkt->needsWritable();
 
     // at the moment we could get an uncacheable write which does not
     // have the invalidate flag, and we need a suitable way of dealing
@@ -950,8 +1008,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
     if (forwardSnoops) {
         // first propagate snoop upward to see if anyone above us wants to
         // handle it.  save & restore packet src since it will get
-        // rewritten to be relative to cpu-side bus (if any)
-        bool alreadyResponded = pkt->cacheResponding();
+        // rewritten to be relative to CPU-side bus (if any)
         if (is_timing) {
             // copy the packet so that we can clear any flags before
             // forwarding it upwards, we also allocate data (passing
@@ -969,19 +1026,9 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
             // cache
             snoop_delay += snoopPkt.headerDelay;
 
-            if (snoopPkt.cacheResponding()) {
-                // cache-to-cache response from some upper cache
-                assert(!alreadyResponded);
-                pkt->setCacheResponding();
-            }
-            // upstream cache has the block, or has an outstanding
-            // MSHR, pass the flag on
-            if (snoopPkt.hasSharers()) {
-                pkt->setHasSharers();
-            }
             // If this request is a prefetch or clean evict and an upper level
             // signals block present, make sure to propagate the block
-            // presence to the requester.
+            // presence to the requestor.
             if (snoopPkt.isBlockCached()) {
                 pkt->setBlockCached();
             }
@@ -990,11 +1037,16 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
             if (snoopPkt.satisfied()) {
                 pkt->setSatisfied();
             }
+
+            // Copy over flags from the snoop response to make sure we
+            // inform the final destination
+            pkt->copyResponderFlags(&snoopPkt);
         } else {
+            bool already_responded = pkt->cacheResponding();
             cpuSidePort.sendAtomicSnoop(pkt);
-            if (!alreadyResponded && pkt->cacheResponding()) {
+            if (!already_responded && pkt->cacheResponding()) {
                 // cache-to-cache response from some upper cache:
-                // forward response to original requester
+                // forward response to original requestor
                 assert(pkt->isResponse());
             }
         }
@@ -1003,10 +1055,11 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
     bool respond = false;
     bool blk_valid = blk && blk->isValid();
     if (pkt->isClean()) {
-        if (blk_valid && blk->isDirty()) {
+        if (blk_valid && blk->isSet(CacheBlk::DirtyBit)) {
             DPRINTF(CacheVerbose, "%s: packet (snoop) %s found block: %s\n",
                     __func__, pkt->print(), blk->print());
-            PacketPtr wb_pkt = writecleanBlk(blk, pkt->req->getDest(), pkt->id);
+            PacketPtr wb_pkt =
+                writecleanBlk(blk, pkt->req->getDest(), pkt->id);
             PacketList writebacks;
             writebacks.push_back(wb_pkt);
 
@@ -1048,10 +1101,11 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
         // invalidation itself is taken care of below. We don't respond to
         // cache maintenance operations as this is done by the destination
         // xbar.
-        respond = blk->isDirty() && pkt->needsResponse();
+        respond = blk->isSet(CacheBlk::DirtyBit) && pkt->needsResponse();
 
-        chatty_assert(!(isReadOnly && blk->isDirty()), "Should never have "
-                      "a dirty block in a read-only cache %s\n", name());
+        chatty_assert(!(isReadOnly && blk->isSet(CacheBlk::DirtyBit)),
+            "Should never have a dirty block in a read-only cache %s\n",
+            name());
     }
 
     // Invalidate any prefetch's from below that would strip write permissions
@@ -1075,8 +1129,9 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
         // which means we go from Modified to Owned (and will respond
         // below), remain in Owned (and will respond below), from
         // Exclusive to Shared, or remain in Shared
-        if (!pkt->req->isUncacheable())
-            blk->status &= ~BlkWritable;
+        if (!pkt->req->isUncacheable()) {
+            blk->clearCoherenceBits(CacheBlk::WritableBit);
+        }
         DPRINTF(Cache, "new state is %s\n", blk->print());
     }
 
@@ -1085,7 +1140,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
         // memory, and also prevent any memory from even seeing the
         // request
         pkt->setCacheResponding();
-        if (!pkt->isClean() && blk->isWritable()) {
+        if (!pkt->isClean() && blk->isSet(CacheBlk::WritableBit)) {
             // inform the cache hierarchy that this cache had the line
             // in the Modified state so that we avoid unnecessary
             // invalidations (see Packet::setResponderHadWritable)
@@ -1116,6 +1171,12 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
             if (pkt->hasData())
                 pkt->setDataFromBlock(blk->data, blkSize);
         }
+
+        // When a block is compressed, it must first be decompressed before
+        // being read, and this increases the snoop delay.
+        if (compressor && pkt->isRead()) {
+            snoop_delay += compressor->getDecompressionLatency(blk);
+        }
     }
 
     if (!respond && is_deferred) {
@@ -1167,13 +1228,6 @@ Cache::recvTimingSnoopReq(PacketPtr pkt)
         return;
     }
 
-    // Bypass any existing cache maintenance requests if the request
-    // has been satisfied already (i.e., the dirty block has been
-    // found).
-    if (mshr && pkt->req->isCacheMaintenance() && pkt->satisfied()) {
-        return;
-    }
-
     // Let the MSHR itself track the snoop and decide whether we want
     // to go ahead and do the regular cache snoop
     if (mshr && mshr->handleSnoop(pkt, order++)) {
@@ -1342,7 +1396,7 @@ Cache::sendMSHRQueuePacket(MSHR* mshr)
         // prefetchSquash first may result in the MSHR being
         // prematurely deallocated.
         if (snoop_pkt.cacheResponding()) {
-            auto M5_VAR_USED r = outstandingSnoop.insert(snoop_pkt.req);
+            M5_VAR_USED auto r = outstandingSnoop.insert(snoop_pkt.req);
             assert(r.second);
 
             // if we are getting a snoop response with no sharers it
@@ -1377,12 +1431,3 @@ Cache::sendMSHRQueuePacket(MSHR* mshr)
 
     return BaseCache::sendMSHRQueuePacket(mshr);
 }
-
-Cache*
-CacheParams::create()
-{
-    assert(tags);
-    assert(replacement_policy);
-
-    return new Cache(this);
-}