ruby: handle llsc accesses through CacheEntry, not CacheMemory
[gem5.git] / src / mem / bridge.cc
index 1a8437aa156b58b799a87ae75baf451b1a57424a..ecaf6de0483dbce5b9ad4904d25bbdd2d3b4a119 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2012 ARM Limited
+ * Copyright (c) 2011-2013 ARM Limited
  * All rights reserved
  *
  * The license below extends only to copyright in the software and shall
@@ -44,7 +44,7 @@
 
 /**
  * @file
- * Implementation of a memory-mapped bus bridge that connects a master
+ * Implementation of a memory-mapped bridge that connects a master
  * and a slave through a request and response queue.
  */
 
@@ -108,25 +108,20 @@ Bridge::init()
 {
     // make sure both sides are connected and have the same block size
     if (!slavePort.isConnected() || !masterPort.isConnected())
-        fatal("Both ports of bus bridge are not connected to a bus.\n");
-
-    if (slavePort.peerBlockSize() != masterPort.peerBlockSize())
-        fatal("Slave port size %d, master port size %d \n " \
-              "Busses don't have the same block size... Not supported.\n",
-              slavePort.peerBlockSize(), masterPort.peerBlockSize());
+        fatal("Both ports of a bridge must be connected.\n");
 
     // notify the master side  of our address ranges
     slavePort.sendRangeChange();
 }
 
 bool
-Bridge::BridgeSlavePort::respQueueFull()
+Bridge::BridgeSlavePort::respQueueFull() const
 {
     return outstandingResponses == respQueueLimit;
 }
 
 bool
-Bridge::BridgeMasterPort::reqQueueFull()
+Bridge::BridgeMasterPort::reqQueueFull() const
 {
     return transmitList.size() == reqQueueLimit;
 }
@@ -142,7 +137,7 @@ Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt)
     DPRINTF(Bridge, "Request queue size: %d\n", transmitList.size());
 
     // @todo: We need to pay for this and not just zero it out
-    pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
+    pkt->headerDelay = pkt->payloadDelay = 0;
 
     slavePort.schedTimingResp(pkt, bridge.clockEdge(delay));
 
@@ -155,28 +150,38 @@ Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
     DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n",
             pkt->cmdString(), pkt->getAddr());
 
-    // ensure we do not have something waiting to retry
-    if(retryReq)
-        return false;
+    // we should not see a timing request if we are already in a retry
+    assert(!retryReq);
 
     DPRINTF(Bridge, "Response queue size: %d outresp: %d\n",
             transmitList.size(), outstandingResponses);
 
+    // if the request queue is full then there is no hope
     if (masterPort.reqQueueFull()) {
         DPRINTF(Bridge, "Request queue full\n");
         retryReq = true;
-    } else if (pkt->needsResponse()) {
-        if (respQueueFull()) {
-            DPRINTF(Bridge, "Response queue full\n");
-            retryReq = true;
-        } else {
-            DPRINTF(Bridge, "Reserving space for response\n");
-            assert(outstandingResponses != respQueueLimit);
-            ++outstandingResponses;
-            retryReq = false;
+    } else {
+        // look at the response queue if we expect to see a response
+        bool expects_response = pkt->needsResponse() &&
+            !pkt->memInhibitAsserted();
+        if (expects_response) {
+            if (respQueueFull()) {
+                DPRINTF(Bridge, "Response queue full\n");
+                retryReq = true;
+            } else {
+                // ok to send the request with space for the response
+                DPRINTF(Bridge, "Reserving space for response\n");
+                assert(outstandingResponses != respQueueLimit);
+                ++outstandingResponses;
+
+                // no need to set retryReq to false as this is already the
+                // case
+            }
+        }
 
+        if (!retryReq) {
             // @todo: We need to pay for this and not just zero it out
-            pkt->busFirstWordDelay = pkt->busLastWordDelay = 0;
+            pkt->headerDelay = pkt->payloadDelay = 0;
 
             masterPort.schedTimingReq(pkt, bridge.clockEdge(delay));
         }
@@ -195,22 +200,13 @@ Bridge::BridgeSlavePort::retryStalledReq()
     if (retryReq) {
         DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
         retryReq = false;
-        sendRetry();
+        sendRetryReq();
     }
 }
 
 void
 Bridge::BridgeMasterPort::schedTimingReq(PacketPtr pkt, Tick when)
 {
-    // If we expect to see a response, we need to restore the source
-    // and destination field that is potentially changed by a second
-    // bus
-    if (!pkt->memInhibitAsserted() && pkt->needsResponse()) {
-        // Update the sender state so we can deal with the response
-        // appropriately
-        pkt->pushSenderState(new RequestState(pkt->getSrc()));
-    }
-
     // If we're about to put this packet at the head of the queue, we
     // need to schedule an event to do the transmit.  Otherwise there
     // should already be an event scheduled for sending the head
@@ -221,27 +217,13 @@ Bridge::BridgeMasterPort::schedTimingReq(PacketPtr pkt, Tick when)
 
     assert(transmitList.size() != reqQueueLimit);
 
-    transmitList.push_back(DeferredPacket(pkt, when));
+    transmitList.emplace_back(pkt, when);
 }
 
 
 void
 Bridge::BridgeSlavePort::schedTimingResp(PacketPtr pkt, Tick when)
 {
-    // This is a response for a request we forwarded earlier.  The
-    // corresponding request state should be stored in the packet's
-    // senderState field.
-    RequestState *req_state =
-        dynamic_cast<RequestState*>(pkt->popSenderState());
-    assert(req_state != NULL);
-    pkt->setDest(req_state->origSrc);
-    delete req_state;
-
-    // the bridge assumes that at least one bus has set the
-    // destination field of the packet
-    assert(pkt->isDestValid());
-    DPRINTF(Bridge, "response, new dest %d\n", pkt->getDest());
-
     // If we're about to put this packet at the head of the queue, we
     // need to schedule an event to do the transmit.  Otherwise there
     // should already be an event scheduled for sending the head
@@ -250,7 +232,7 @@ Bridge::BridgeSlavePort::schedTimingResp(PacketPtr pkt, Tick when)
         bridge.schedule(sendEvent, when);
     }
 
-    transmitList.push_back(DeferredPacket(pkt, when));
+    transmitList.emplace_back(pkt, when);
 }
 
 void
@@ -274,10 +256,10 @@ Bridge::BridgeMasterPort::trySendTiming()
 
         // If there are more packets to send, schedule event to try again.
         if (!transmitList.empty()) {
-            req = transmitList.front();
+            DeferredPacket next_req = transmitList.front();
             DPRINTF(Bridge, "Scheduling next send\n");
-            bridge.schedule(sendEvent, std::max(req.tick,
-                                                bridge.nextCycle()));
+            bridge.schedule(sendEvent, std::max(next_req.tick,
+                                                bridge.clockEdge()));
         }
 
         // if we have stalled a request due to a full request queue,
@@ -315,10 +297,10 @@ Bridge::BridgeSlavePort::trySendTiming()
 
         // If there are more packets to send, schedule event to try again.
         if (!transmitList.empty()) {
-            resp = transmitList.front();
+            DeferredPacket next_resp = transmitList.front();
             DPRINTF(Bridge, "Scheduling next send\n");
-            bridge.schedule(sendEvent, std::max(resp.tick,
-                                                bridge.nextCycle()));
+            bridge.schedule(sendEvent, std::max(next_resp.tick,
+                                                bridge.clockEdge()));
         }
 
         // if there is space in the request queue and we were stalling
@@ -327,7 +309,7 @@ Bridge::BridgeSlavePort::trySendTiming()
         if (!masterPort.reqQueueFull() && retryReq) {
             DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
             retryReq = false;
-            sendRetry();
+            sendRetryReq();
         }
     }
 
@@ -336,23 +318,15 @@ Bridge::BridgeSlavePort::trySendTiming()
 }
 
 void
-Bridge::BridgeMasterPort::recvRetry()
+Bridge::BridgeMasterPort::recvReqRetry()
 {
-    Tick nextReady = transmitList.front().tick;
-    if (nextReady <= curTick())
-        trySendTiming();
-    else
-        bridge.schedule(sendEvent, nextReady);
+    trySendTiming();
 }
 
 void
-Bridge::BridgeSlavePort::recvRetry()
+Bridge::BridgeSlavePort::recvRespRetry()
 {
-    Tick nextReady = transmitList.front().tick;
-    if (nextReady <= curTick())
-        trySendTiming();
-    else
-        bridge.schedule(sendEvent, nextReady);
+    trySendTiming();
 }
 
 Tick
@@ -364,12 +338,10 @@ Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt)
 void
 Bridge::BridgeSlavePort::recvFunctional(PacketPtr pkt)
 {
-    std::list<DeferredPacket>::iterator i;
-
     pkt->pushLabel(name());
 
     // check the response queue
-    for (i = transmitList.begin();  i != transmitList.end(); ++i) {
+    for (auto i = transmitList.begin();  i != transmitList.end(); ++i) {
         if (pkt->checkFunctional((*i).pkt)) {
             pkt->makeResponse();
             return;
@@ -391,7 +363,7 @@ bool
 Bridge::BridgeMasterPort::checkFunctional(PacketPtr pkt)
 {
     bool found = false;
-    std::list<DeferredPacket>::iterator i = transmitList.begin();
+    auto i = transmitList.begin();
 
     while(i != transmitList.end() && !found) {
         if (pkt->checkFunctional((*i).pkt)) {