Port: Make getAddrRanges const
[gem5.git] / src / mem / cache / cache_impl.hh
index 3312fc94156c35042e85ac4efe301a28c7390f3a..f7901261ff6f7e238ac3d649a21853b2d73d5f09 100644 (file)
@@ -50,7 +50,6 @@
  * Cache definitions.
  */
 
-#include "base/fast_alloc.hh"
 #include "base/misc.hh"
 #include "base/range.hh"
 #include "base/types.hh"
@@ -276,7 +275,7 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
     if (pkt->req->isUncacheable()) {
         if (pkt->req->isClearLL()) {
             tags->clearLocks();
-        } else {
+        } else if (pkt->isWrite()) {
            blk = tags->findBlock(pkt->getAddr());
            if (blk != NULL) {
                tags->invalidateBlk(blk);
@@ -349,10 +348,10 @@ Cache<TagStore>::access(PacketPtr pkt, BlkType *&blk,
 }
 
 
-class ForwardResponseRecord : public Packet::SenderState, public FastAlloc
+class ForwardResponseRecord : public Packet::SenderState
 {
     Packet::SenderState *prevSenderState;
-    Packet::NodeID prevSrc;
+    PortID prevSrc;
 #ifndef NDEBUG
     BaseCache *cache;
 #endif
@@ -379,6 +378,13 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
 //@todo Add back in MemDebug Calls
 //    MemDebug::cacheAccess(pkt);
 
+
+    /// @todo temporary hack to deal with memory corruption issue until
+    /// 4-phase transactions are complete
+    for (int x = 0; x < pendingDelete.size(); x++)
+        delete pendingDelete[x];
+    pendingDelete.clear();
+
     // we charge hitLatency for doing just about anything here
     Tick time =  curTick() + hitLatency;
 
@@ -422,14 +428,18 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
         }
         // since we're the official target but we aren't responding,
         // delete the packet now.
-        delete pkt;
+
+        /// @todo nominally we should just delete the packet here,
+        /// however, until 4-phase stuff we can't because sending
+        /// cache is still relying on it
+        pendingDelete.push_back(pkt);
         return true;
     }
 
     if (pkt->req->isUncacheable()) {
         if (pkt->req->isClearLL()) {
             tags->clearLocks();
-        } else {
+        } else if (pkt->isWrite()) {
             BlkType *blk = tags->findBlock(pkt->getAddr());
             if (blk != NULL) {
                 tags->invalidateBlk(blk);
@@ -490,7 +500,10 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
             pkt->makeTimingResponse();
             cpuSidePort->respond(pkt, curTick()+lat);
         } else {
-            delete pkt;
+            /// @todo nominally we should just delete the packet here,
+            /// however, until 4-phase stuff we can't because sending
+            /// cache is still relying on it
+            pendingDelete.push_back(pkt);
         }
     } else {
         // miss
@@ -782,7 +795,7 @@ Cache<TagStore>::functionalAccess(PacketPtr pkt, bool fromCpuSide)
         // continues towards the memory side
         if (fromCpuSide) {
             memSidePort->sendFunctional(pkt);
-        } else if (forwardSnoops && cpuSidePort->getMasterPort().isSnooping()) {
+        } else if (forwardSnoops && cpuSidePort->isSnooping()) {
             // if it came from the memory side, it must be a snoop request
             // and we should only forward it if we are forwarding snoops
             cpuSidePort->sendFunctionalSnoop(pkt);
@@ -890,7 +903,7 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
 
                 assert(!target->pkt->req->isUncacheable());
 
-                assert(pkt->req->masterId() < system->maxMasters());
+                assert(target->pkt->req->masterId() < system->maxMasters());
                 missLatency[target->pkt->cmdToIndex()][target->pkt->req->masterId()] +=
                     completion_time - target->recvTime;
             } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
@@ -1193,14 +1206,12 @@ Cache<TagStore>::handleSnoop(PacketPtr pkt, BlkType *blk,
                 pkt->assertShared();
             }
         } else {
-            Packet::NodeID origSrc = pkt->getSrc();
             cpuSidePort->sendAtomicSnoop(pkt);
             if (!alreadyResponded && pkt->memInhibitAsserted()) {
                 // cache-to-cache response from some upper cache:
                 // forward response to original requester
                 assert(pkt->isResponse());
             }
-            pkt->setSrc(origSrc);
         }
     }
 
@@ -1294,8 +1305,10 @@ Cache<TagStore>::snoopTiming(PacketPtr pkt)
                 pkt->getAddr());
 
         //Look through writebacks for any non-uncachable writes, use that
-        for (int i = 0; i < writebacks.size(); i++) {
-            mshr = writebacks[i];
+        if (writebacks.size()) {
+            // We should only ever find a single match
+            assert(writebacks.size() == 1);
+            mshr = writebacks[0];
             assert(!mshr->isUncacheable());
             assert(mshr->getNumTargets() == 1);
             PacketPtr wb_pkt = mshr->getTarget()->pkt;
@@ -1321,16 +1334,14 @@ Cache<TagStore>::snoopTiming(PacketPtr pkt)
                 markInService(mshr);
                 delete wb_pkt;
             }
-
-            // If this was a shared writeback, there may still be
-            // other shared copies above that require invalidation.
-            // We could be more selective and return here if the
-            // request is non-exclusive or if the writeback is
-            // exclusive.
-            break;
-        }
+        } // writebacks.size()
     }
 
+    // If this was a shared writeback, there may still be
+    // other shared copies above that require invalidation.
+    // We could be more selective and return here if the
+    // request is non-exclusive or if the writeback is
+    // exclusive.
     handleSnoop(pkt, blk, true, false, false);
 }
 
@@ -1577,7 +1588,7 @@ Cache<TagStore>::unserialize(Checkpoint *cp, const std::string &section)
 
 template<class TagStore>
 AddrRangeList
-Cache<TagStore>::CpuSidePort::getAddrRanges()
+Cache<TagStore>::CpuSidePort::getAddrRanges() const
 {
     return cache->getAddrRanges();
 }