mem-cache: Fix missing header in associative set
[gem5.git] / src / mem / cache / cache.cc
index a601a7be3957fe38e16d487cbe8ef0935d49fb27..e7dd5efc952ac772520b0ba9341a6f71b9980ca6 100644 (file)
@@ -305,7 +305,8 @@ Cache::promoteWholeLineWrites(PacketPtr pkt)
 {
     // Cache line clearing instructions
     if (doFastWrites && (pkt->cmd == MemCmd::WriteReq) &&
-        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0)) {
+        (pkt->getSize() == blkSize) && (pkt->getOffset(blkSize) == 0) &&
+        !pkt->isMaskedWrite()) {
         pkt->cmd = MemCmd::WriteLineReq;
         DPRINTF(Cache, "packet promoted from Write to WriteLineReq\n");
     }
@@ -333,7 +334,7 @@ Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
         // should have flushed and have no valid block
         assert(!blk || !blk->isValid());
 
-        mshr_uncacheable[pkt->cmdToIndex()][pkt->req->masterId()]++;
+        stats.cmdStats(pkt).mshr_uncacheable[pkt->req->masterId()]++;
 
         if (pkt->isWrite()) {
             allocateWriteBuffer(pkt, forward_time);
@@ -709,6 +710,32 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk)
 
             // Software prefetch handling for cache closest to core
             if (tgt_pkt->cmd.isSWPrefetch()) {
+                if (tgt_pkt->needsWritable()) {
+                    // All other copies of the block were invalidated and we
+                    // have an exclusive copy.
+
+                    // The coherence protocol assumes that if we fetched an
+                    // exclusive copy of the block, we have the intention to
+                    // modify it. Therefore the MSHR for the PrefetchExReq has
+                    // been the point of ordering and this cache has commited
+                    // to respond to snoops for the block.
+                    //
+                    // In most cases this is true anyway - a PrefetchExReq
+                    // will be followed by a WriteReq. However, if that
+                    // doesn't happen, the block is not marked as dirty and
+                    // the cache doesn't respond to snoops that has committed
+                    // to do so.
+                    //
+                    // To avoid deadlocks in cases where there is a snoop
+                    // between the PrefetchExReq and the expected WriteReq, we
+                    // proactively mark the block as Dirty.
+                    assert(blk);
+                    blk->status |= BlkDirty;
+
+                    panic_if(isReadOnly, "Prefetch exclusive requests from "
+                            "read-only cache %s\n", name());
+                }
+
                 // a software prefetch would have already been ack'd
                 // immediately with dummy data so the core would be able to
                 // retire it. This request completes right here, so we
@@ -749,7 +776,8 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk)
                 assert(!tgt_pkt->req->isUncacheable());
 
                 assert(tgt_pkt->req->masterId() < system->maxMasters());
-                missLatency[tgt_pkt->cmdToIndex()][tgt_pkt->req->masterId()] +=
+                stats.cmdStats(tgt_pkt)
+                    .missLatency[tgt_pkt->req->masterId()] +=
                     completion_time - target.recvTime;
             } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
                 // failed StoreCond upgrade