Fix a bug to handle the fact that a CPU can send Functional accesses while a sendTimi...
[gem5.git] / src / mem / cache / base_cache.cc
index 02a46b4445493d0e0a1caa4adc0871e5a97d7288..3af61375d45e1a166f9b052e4e7ce915d27730b3 100644 (file)
  * Definition of BaseCache functions.
  */
 
-#include "mem/cache/base_cache.hh"
-#include "cpu/smt.hh"
 #include "cpu/base.hh"
+#include "cpu/smt.hh"
+#include "mem/cache/base_cache.hh"
+#include "mem/cache/miss/mshr.hh"
 
 using namespace std;
 
 BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
                                 bool _isCpuSide)
-    : Port(_name), cache(_cache), isCpuSide(_isCpuSide)
+    : Port(_name, _cache), cache(_cache), isCpuSide(_isCpuSide)
 {
     blocked = false;
+    waitingOnRetry = false;
     //Start ports at null if more than one is created we should panic
     //cpuSidePort = NULL;
     //memSidePort = NULL;
@@ -69,9 +71,23 @@ BaseCache::CachePort::deviceBlockSize()
 }
 
 bool
-BaseCache::CachePort::recvTiming(Packet *pkt)
+BaseCache::CachePort::recvTiming(PacketPtr pkt)
 {
-    if (blocked)
+    if (isCpuSide
+        && !pkt->req->isUncacheable()
+        && pkt->isInvalidate()
+        && !pkt->isRead() && !pkt->isWrite()) {
+        //Upgrade or Invalidate
+        //Look into what happens if two slave caches on bus
+        DPRINTF(Cache, "%s %x ?\n", pkt->cmdString(), pkt->getAddr());
+
+        assert(!(pkt->flags & SATISFIED));
+        pkt->flags |= SATISFIED;
+        //Invalidates/Upgrades need no response if they get the bus
+        return true;
+    }
+
+    if (pkt->isRequest() && blocked)
     {
         DPRINTF(Cache,"Scheduling a retry while blocked\n");
         mustSendRetry = true;
@@ -81,58 +97,150 @@ BaseCache::CachePort::recvTiming(Packet *pkt)
 }
 
 Tick
-BaseCache::CachePort::recvAtomic(Packet *pkt)
+BaseCache::CachePort::recvAtomic(PacketPtr pkt)
 {
     return cache->doAtomicAccess(pkt, isCpuSide);
 }
 
+bool
+BaseCache::CachePort::checkFunctional(PacketPtr pkt)
+{
+    //Check storage here first
+    list<PacketPtr>::iterator i = drainList.begin();
+    list<PacketPtr>::iterator iend = drainList.end();
+    bool notDone = true;
+    while (i != iend && notDone) {
+        PacketPtr target = *i;
+        // If the target contains data, and it overlaps the
+        // probed request, need to update data
+        if (target->intersect(pkt)) {
+            DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a drain\n",
+                    pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
+            notDone = fixPacket(pkt, target);
+        }
+        i++;
+    }
+    //Also check the response not yet ready to be on the list
+    std::list<std::pair<Tick,PacketPtr> >::iterator j = transmitList.begin();
+    std::list<std::pair<Tick,PacketPtr> >::iterator jend = transmitList.end();
+
+    while (j != jend && notDone) {
+        PacketPtr target = j->second;
+        // If the target contains data, and it overlaps the
+        // probed request, need to update data
+        if (target->intersect(pkt)) {
+            DPRINTF(Cache, "Functional %s access to blk_addr %x intersects a response\n",
+                    pkt->cmdString(), pkt->getAddr() & ~(cache->getBlockSize() - 1));
+            notDone = fixDelayedResponsePacket(pkt, target);
+        }
+        j++;
+    }
+    return notDone;
+}
+
 void
-BaseCache::CachePort::recvFunctional(Packet *pkt)
+BaseCache::CachePort::recvFunctional(PacketPtr pkt)
 {
-    cache->doFunctionalAccess(pkt, isCpuSide);
+    bool notDone = checkFunctional(pkt);
+    if (notDone)
+        cache->doFunctionalAccess(pkt, isCpuSide);
+}
+
+void
+BaseCache::CachePort::checkAndSendFunctional(PacketPtr pkt)
+{
+    bool notDone = checkFunctional(pkt);
+    if (notDone)
+        sendFunctional(pkt);
 }
 
 void
 BaseCache::CachePort::recvRetry()
 {
-    Packet *pkt;
+    PacketPtr pkt;
+    assert(waitingOnRetry);
     if (!drainList.empty()) {
+        DPRINTF(CachePort, "%s attempting to send a retry for response (%i waiting)\n"
+                , name(), drainList.size());
         //We have some responses to drain first
-        bool result = true;
-        while (result && !drainList.empty()) {
-            result = sendTiming(drainList.front());
-            if (result)
-                drainList.pop_front();
+        pkt = drainList.front();
+        drainList.pop_front();
+        if (sendTiming(pkt)) {
+            DPRINTF(CachePort, "%s sucessful in sending a retry for"
+                    "response (%i still waiting)\n", name(), drainList.size());
+            if (!drainList.empty() ||
+                !isCpuSide && cache->doMasterRequest() ||
+                isCpuSide && cache->doSlaveRequest()) {
+
+                DPRINTF(CachePort, "%s has more responses/requests\n", name());
+                BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
+                reqCpu->schedule(curTick + 1);
+            }
+            waitingOnRetry = false;
+        }
+        else {
+            drainList.push_front(pkt);
         }
+        // Check if we're done draining once this list is empty
+        if (drainList.empty())
+            cache->checkDrain();
     }
     else if (!isCpuSide)
     {
+        DPRINTF(CachePort, "%s attempting to send a retry for MSHR\n", name());
+        if (!cache->doMasterRequest()) {
+            //This can happen if I am the owner of a block and see an upgrade
+            //while the block was in my WB Buffers.  I just remove the
+            //wb and de-assert the masterRequest
+            waitingOnRetry = false;
+            return;
+        }
         pkt = cache->getPacket();
+        MSHR* mshr = (MSHR*) pkt->senderState;
+        //Copy the packet, it may be modified/destroyed elsewhere
+        PacketPtr copyPkt = new Packet(*pkt);
+        copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
+        mshr->pkt = copyPkt;
+
         bool success = sendTiming(pkt);
         DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
                 pkt->getAddr(), success ? "succesful" : "unsuccesful");
-        cache->sendResult(pkt, success);
+
+        waitingOnRetry = !success;
+        if (waitingOnRetry) {
+            DPRINTF(CachePort, "%s now waiting on a retry\n", name());
+        }
+
+        cache->sendResult(pkt, mshr, success);
+
         if (success && cache->doMasterRequest())
         {
+            DPRINTF(CachePort, "%s has more requests\n", name());
             //Still more to issue, rerequest in 1 cycle
-            pkt = NULL;
-            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
+            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
             reqCpu->schedule(curTick + 1);
         }
     }
     else
     {
+        assert(cache->doSlaveRequest());
+        //pkt = cache->getCoherencePacket();
+        //We save the packet, no reordering on CSHRS
         pkt = cache->getCoherencePacket();
+        MSHR* cshr = (MSHR*)pkt->senderState;
         bool success = sendTiming(pkt);
+        cache->sendCoherenceResult(pkt, cshr, success);
+        waitingOnRetry = !success;
         if (success && cache->doSlaveRequest())
         {
+            DPRINTF(CachePort, "%s has more requests\n", name());
             //Still more to issue, rerequest in 1 cycle
-            pkt = NULL;
-            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this);
+            BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false);
             reqCpu->schedule(curTick + 1);
         }
-
     }
+    if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name());
+    else DPRINTF(CachePort, "%s no longer waiting on retry\n", name());
     return;
 }
 void
@@ -159,34 +267,71 @@ BaseCache::CachePort::clearBlocked()
     }
 }
 
-BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort)
-    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort)
+BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, bool _newResponse)
+    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort),
+      newResponse(_newResponse)
 {
-    this->setFlags(AutoDelete);
+    if (!newResponse)
+        this->setFlags(AutoDelete);
     pkt = NULL;
 }
 
-BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, Packet *_pkt)
-    : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort), pkt(_pkt)
-{
-    this->setFlags(AutoDelete);
-}
-
 void
 BaseCache::CacheEvent::process()
 {
-    if (!pkt)
+    if (!newResponse)
     {
-        if (!cachePort->isCpuSide)
-        {
-            //MSHR
+        if (cachePort->waitingOnRetry) return;
+       //We have some responses to drain first
+        if (!cachePort->drainList.empty()) {
+            DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name());
+            if (cachePort->sendTiming(cachePort->drainList.front())) {
+                DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name());
+                cachePort->drainList.pop_front();
+                if (!cachePort->drainList.empty() ||
+                    !cachePort->isCpuSide && cachePort->cache->doMasterRequest() ||
+                    cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) {
+
+                    DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name());
+                    this->schedule(curTick + 1);
+                }
+            }
+            else {
+                cachePort->waitingOnRetry = true;
+                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
+            }
+        }
+        else if (!cachePort->isCpuSide)
+        {            //MSHR
+            DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name());
+            if (!cachePort->cache->doMasterRequest()) {
+                //This can happen if I am the owner of a block and see an upgrade
+                //while the block was in my WB Buffers.  I just remove the
+                //wb and de-assert the masterRequest
+                return;
+            }
+
             pkt = cachePort->cache->getPacket();
+            MSHR* mshr = (MSHR*) pkt->senderState;
+            //Copy the packet, it may be modified/destroyed elsewhere
+            PacketPtr copyPkt = new Packet(*pkt);
+            copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>());
+            mshr->pkt = copyPkt;
+
             bool success = cachePort->sendTiming(pkt);
             DPRINTF(Cache, "Address %x was %s in sending the timing request\n",
                     pkt->getAddr(), success ? "succesful" : "unsuccesful");
-            cachePort->cache->sendResult(pkt, success);
+
+            cachePort->waitingOnRetry = !success;
+            if (cachePort->waitingOnRetry) {
+                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
+            }
+
+            cachePort->cache->sendResult(pkt, mshr, success);
             if (success && cachePort->cache->doMasterRequest())
             {
+                DPRINTF(CachePort, "%s still more MSHR requests to send\n",
+                        cachePort->name());
                 //Still more to issue, rerequest in 1 cycle
                 pkt = NULL;
                 this->schedule(curTick+1);
@@ -195,10 +340,18 @@ BaseCache::CacheEvent::process()
         else
         {
             //CSHR
+            assert(cachePort->cache->doSlaveRequest());
             pkt = cachePort->cache->getCoherencePacket();
+            MSHR* cshr = (MSHR*) pkt->senderState;
             bool success = cachePort->sendTiming(pkt);
+            cachePort->cache->sendCoherenceResult(pkt, cshr, success);
+            cachePort->waitingOnRetry = !success;
+            if (cachePort->waitingOnRetry)
+                DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name());
             if (success && cachePort->cache->doSlaveRequest())
             {
+                DPRINTF(CachePort, "%s still more CSHR requests to send\n",
+                        cachePort->name());
                 //Still more to issue, rerequest in 1 cycle
                 pkt = NULL;
                 this->schedule(curTick+1);
@@ -206,18 +359,37 @@ BaseCache::CacheEvent::process()
         }
         return;
     }
-    //Response
-    //Know the packet to send
-    pkt->result = Packet::Success;
+    //Else it's a response
+    assert(cachePort->transmitList.size());
+    assert(cachePort->transmitList.front().first <= curTick);
+    pkt = cachePort->transmitList.front().second;
+    cachePort->transmitList.pop_front();
+    if (!cachePort->transmitList.empty()) {
+        Tick time = cachePort->transmitList.front().first;
+        schedule(time <= curTick ? curTick+1 : time);
+    }
+
+    if (pkt->flags & NACKED_LINE)
+        pkt->result = Packet::Nacked;
+    else
+        pkt->result = Packet::Success;
     pkt->makeTimingResponse();
-    if (!cachePort->drainList.empty()) {
-        //Already blocked waiting for bus, just append
+    DPRINTF(CachePort, "%s attempting to send a response\n", cachePort->name());
+    if (!cachePort->drainList.empty() || cachePort->waitingOnRetry) {
+        //Already have a list, just append
         cachePort->drainList.push_back(pkt);
+        DPRINTF(CachePort, "%s appending response onto drain list\n", cachePort->name());
     }
     else if (!cachePort->sendTiming(pkt)) {
         //It failed, save it to list of drain events
+        DPRINTF(CachePort, "%s now waiting for a retry\n", cachePort->name());
         cachePort->drainList.push_back(pkt);
+        cachePort->waitingOnRetry = true;
     }
+
+    // Check if we're done draining once this list is empty
+    if (cachePort->drainList.empty() && cachePort->transmitList.empty())
+        cachePort->cache->checkDrain();
 }
 
 const char *
@@ -231,20 +403,22 @@ BaseCache::getPort(const std::string &if_name, int idx)
 {
     if (if_name == "")
     {
-        if(cpuSidePort == NULL)
+        if(cpuSidePort == NULL) {
             cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
+            sendEvent = new CacheEvent(cpuSidePort, true);
+        }
         return cpuSidePort;
     }
     else if (if_name == "functional")
     {
-        if(cpuSidePort == NULL)
-            cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
-        return cpuSidePort;
+        return new CachePort(name() + "-cpu_side_port", this, true);
     }
     else if (if_name == "cpu_side")
     {
-        if(cpuSidePort == NULL)
+        if(cpuSidePort == NULL) {
             cpuSidePort = new CachePort(name() + "-cpu_side_port", this, true);
+            sendEvent = new CacheEvent(cpuSidePort, true);
+        }
         return cpuSidePort;
     }
     else if (if_name == "mem_side")
@@ -252,6 +426,7 @@ BaseCache::getPort(const std::string &if_name, int idx)
         if (memSidePort != NULL)
             panic("Already have a mem side for this cache\n");
         memSidePort = new CachePort(name() + "-mem_side_port", this, false);
+        memSendEvent = new CacheEvent(memSidePort, true);
         return memSidePort;
     }
     else panic("Port name %s unrecognized\n", if_name);
@@ -481,3 +656,18 @@ BaseCache::regStats()
         ;
 
 }
+
+unsigned int
+BaseCache::drain(Event *de)
+{
+    // Set status
+    if (!canDrain()) {
+        drainEvent = de;
+
+        changeState(SimObject::Draining);
+        return 1;
+    }
+
+    changeState(SimObject::Drained);
+    return 0;
+}