/*
- * Copyright (c) 2012 ARM Limited
+ * Copyright (c) 2012,2015 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* Andreas Hansson
*/
+#include "base/trace.hh"
+#include "debug/Drain.hh"
#include "debug/PacketQueue.hh"
#include "mem/packet_queue.hh"
using namespace std;
-PacketQueue::PacketQueue(EventManager& _em, Port& _port,
- const std::string _label)
- : em(_em), label(_label), sendEvent(this), drainEvent(NULL), port(_port),
- waitingOnRetry(false)
+PacketQueue::PacketQueue(EventManager& _em, const std::string& _label,
+ bool disable_sanity_check)
+ : em(_em), sendEvent(this), _disableSanityCheck(disable_sanity_check),
+ label(_label), waitingOnRetry(false)
{
}
{
DPRINTF(PacketQueue, "Queue %s received retry\n", name());
assert(waitingOnRetry);
+ waitingOnRetry = false;
sendDeferredPacket();
}
+bool
+PacketQueue::hasAddr(Addr addr) const
+{
+ // caller is responsible for ensuring that all packets have the
+ // same alignment
+ for (const auto& p : transmitList) {
+ if (p.pkt->getAddr() == addr)
+ return true;
+ }
+ return false;
+}
+
bool
PacketQueue::checkFunctional(PacketPtr pkt)
{
pkt->pushLabel(label);
- DeferredPacketIterator i = transmitList.begin();
- DeferredPacketIterator end = transmitList.end();
+ auto i = transmitList.begin();
bool found = false;
- while (!found && i != end) {
+ while (!found && i != transmitList.end()) {
// If the buffered packet contains data, and it overlaps the
// current packet, then update data
found = pkt->checkFunctional(i->pkt);
}
void
-PacketQueue::schedSendEvent(Tick when)
+PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool force_order)
{
- // if we are waiting on a retry, do not schedule a send event, and
- // instead rely on retry being called
- if (waitingOnRetry) {
- assert(!sendEvent.scheduled());
- return;
- }
+ DPRINTF(PacketQueue, "%s for %s address %x size %d when %lu ord: %i\n",
+ __func__, pkt->cmdString(), pkt->getAddr(), pkt->getSize(), when,
+ force_order);
- if (!sendEvent.scheduled()) {
- em.schedule(&sendEvent, when);
- } else if (sendEvent.when() > when) {
- em.reschedule(&sendEvent, when);
- }
-}
+ // we can still send a packet before the end of this tick
+ assert(when >= curTick());
-void
-PacketQueue::schedSendTiming(PacketPtr pkt, Tick when, bool send_as_snoop)
-{
- assert(when > curTick());
+ // express snoops should never be queued
+ assert(!pkt->isExpressSnoop());
- // nothing on the list, or earlier than current front element,
- // schedule an event
- if (transmitList.empty() || when < transmitList.front().tick) {
- // note that currently we ignore a potentially outstanding retry
- // and could in theory put a new packet at the head of the
- // transmit list before retrying the existing packet
- transmitList.push_front(DeferredPacket(when, pkt, send_as_snoop));
- schedSendEvent(when);
- return;
+ // add a very basic sanity check on the port to ensure the
+ // invisible buffer is not growing beyond reasonable limits
+ if (!_disableSanityCheck && transmitList.size() > 100) {
+ panic("Packet queue %s has grown beyond 100 packets\n",
+ name());
}
- // list is non-empty and this belongs at the end
- if (when >= transmitList.back().tick) {
- transmitList.push_back(DeferredPacket(when, pkt, send_as_snoop));
+ // nothing on the list
+ if (transmitList.empty()) {
+ transmitList.emplace_front(when, pkt);
+ schedSendEvent(when);
return;
}
- // this belongs in the middle somewhere, insertion sort
- DeferredPacketIterator i = transmitList.begin();
- ++i; // already checked for insertion at front
- while (i != transmitList.end() && when >= i->tick)
- ++i;
- transmitList.insert(i, DeferredPacket(when, pkt, send_as_snoop));
-}
-
-void PacketQueue::trySendTiming()
-{
- assert(deferredPacketReady());
+ // we should either have an outstanding retry, or a send event
+ // scheduled, but there is an unfortunate corner case where the
+ // x86 page-table walker and timing CPU send out a new request as
+ // part of the receiving of a response (called by
+ // PacketQueue::sendDeferredPacket), in which we end up calling
+ // ourselves again before we had a chance to update waitingOnRetry
+ // assert(waitingOnRetry || sendEvent.scheduled());
- // take the next packet off the list here, as we might return to
- // ourselves through the sendTiming call below
- DeferredPacket dp = transmitList.front();
- transmitList.pop_front();
+ // this belongs in the middle somewhere, so search from the end to
+ // order by tick; however, if force_order is set, also make sure
+ // not to re-order in front of some existing packet with the same
+ // address
+ auto i = transmitList.end();
+ --i;
+ while (i != transmitList.begin() && when < i->tick &&
+ !(force_order && i->pkt->getAddr() == pkt->getAddr()))
+ --i;
- // attempt to send the packet and remember the outcome
- if (!dp.sendAsSnoop)
- waitingOnRetry = !port.sendTiming(dp.pkt);
- else
- waitingOnRetry = !port.sendTimingSnoop(dp.pkt);
+ // emplace inserts the element before the position pointed to by
+ // the iterator, so advance it one step
+ transmitList.emplace(++i, when, pkt);
+}
+void
+PacketQueue::schedSendEvent(Tick when)
+{
+ // if we are waiting on a retry just hold off
if (waitingOnRetry) {
- // put the packet back at the front of the list (packet should
- // not have changed since it wasn't accepted)
+ DPRINTF(PacketQueue, "Not scheduling send as waiting for retry\n");
assert(!sendEvent.scheduled());
- transmitList.push_front(dp);
+ return;
}
-}
-void
-PacketQueue::scheduleSend(Tick time)
-{
- // the next ready time is either determined by the next deferred packet,
- // or in the cache through the MSHR ready time
- Tick nextReady = std::min(deferredPacketReadyTime(), time);
+ if (when != MaxTick) {
+ // we cannot go back in time, and to be consistent we stick to
+ // one tick in the future
+ when = std::max(when, curTick() + 1);
+ // @todo Revisit the +1
- if (nextReady != MaxTick) {
- // if the sendTiming caused someone else to call our
- // recvTiming we could already have an event scheduled, check
- if (!sendEvent.scheduled())
- em.schedule(&sendEvent, std::max(nextReady, curTick() + 1));
+ if (!sendEvent.scheduled()) {
+ em.schedule(&sendEvent, when);
+ } else if (when < sendEvent.when()) {
+ // if the new time is earlier than when the event
+ // currently is scheduled, move it forward
+ em.reschedule(&sendEvent, when);
+ }
} else {
- // no more to send, so if we're draining, we may be done
- if (drainEvent && !sendEvent.scheduled()) {
- drainEvent->process();
- drainEvent = NULL;
+ // we get a MaxTick when there is no more to send, so if we're
+ // draining, we may be done at this point
+ if (drainState() == DrainState::Draining &&
+ transmitList.empty() && !sendEvent.scheduled()) {
+
+ DPRINTF(Drain, "PacketQueue done draining,"
+ "processing drain event\n");
+ signalDrainDone();
}
}
}
void
PacketQueue::sendDeferredPacket()
{
- // try to send what is on the list, this will set waitingOnRetry
- // accordingly
- trySendTiming();
+ // sanity checks
+ assert(!waitingOnRetry);
+ assert(deferredPacketReady());
+
+ DeferredPacket dp = transmitList.front();
+
+ // take the packet of the list before sending it, as sending of
+ // the packet in some cases causes a new packet to be enqueued
+ // (most notaly when responding to the timing CPU, leading to a
+ // new request hitting in the L1 icache, leading to a new
+ // response)
+ transmitList.pop_front();
+
+ // use the appropriate implementation of sendTiming based on the
+ // type of queue
+ waitingOnRetry = !sendTiming(dp.pkt);
// if we succeeded and are not waiting for a retry, schedule the
// next send
if (!waitingOnRetry) {
- scheduleSend();
+ schedSendEvent(deferredPacketReadyTime());
+ } else {
+ // put the packet back at the front of the list
+ transmitList.emplace_front(dp);
}
}
sendDeferredPacket();
}
-unsigned int
-PacketQueue::drain(Event *de)
+DrainState
+PacketQueue::drain()
+{
+ if (transmitList.empty()) {
+ return DrainState::Drained;
+ } else {
+ DPRINTF(Drain, "PacketQueue not drained\n");
+ return DrainState::Draining;
+ }
+}
+
+ReqPacketQueue::ReqPacketQueue(EventManager& _em, MasterPort& _masterPort,
+ const std::string _label)
+ : PacketQueue(_em, _label), masterPort(_masterPort)
+{
+}
+
+bool
+ReqPacketQueue::sendTiming(PacketPtr pkt)
+{
+ return masterPort.sendTimingReq(pkt);
+}
+
+SnoopRespPacketQueue::SnoopRespPacketQueue(EventManager& _em,
+ MasterPort& _masterPort,
+ const std::string _label)
+ : PacketQueue(_em, _label), masterPort(_masterPort)
+{
+}
+
+bool
+SnoopRespPacketQueue::sendTiming(PacketPtr pkt)
+{
+ return masterPort.sendTimingSnoopResp(pkt);
+}
+
+RespPacketQueue::RespPacketQueue(EventManager& _em, SlavePort& _slavePort,
+ const std::string _label)
+ : PacketQueue(_em, _label), slavePort(_slavePort)
+{
+}
+
+bool
+RespPacketQueue::sendTiming(PacketPtr pkt)
{
- if (transmitList.empty() && !sendEvent.scheduled())
- return 0;
- drainEvent = de;
- return 1;
+ return slavePort.sendTimingResp(pkt);
}