/*
- * Copyright (c) 2010-2012 ARM Limited
+ * Copyright (c) 2010-2013 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
#include "base/random.hh"
#include "mem/simple_mem.hh"
+#include "debug/Drain.hh"
using namespace std;
SimpleMemory::SimpleMemory(const SimpleMemoryParams* p) :
AbstractMemory(p),
- port(name() + ".port", *this), lat(p->latency),
- lat_var(p->latency_var), bandwidth(p->bandwidth),
- isBusy(false), retryReq(false), releaseEvent(this)
+ port(name() + ".port", *this), latency(p->latency),
+ latency_var(p->latency_var), bandwidth(p->bandwidth), isBusy(false),
+ retryReq(false), retryResp(false),
+ releaseEvent(this), dequeueEvent(this)
{
}
void
SimpleMemory::init()
{
+ AbstractMemory::init();
+
// allow unconnected memories as this is used in several ruby
// systems at the moment
if (port.isConnected()) {
}
Tick
-SimpleMemory::calculateLatency(PacketPtr pkt)
-{
- if (pkt->memInhibitAsserted()) {
- return 0;
- } else {
- Tick latency = lat;
- if (lat_var != 0)
- latency += random_mt.random<Tick>(0, lat_var);
- return latency;
- }
-}
-
-Tick
-SimpleMemory::doAtomicAccess(PacketPtr pkt)
+SimpleMemory::recvAtomic(PacketPtr pkt)
{
access(pkt);
- return calculateLatency(pkt);
+ return pkt->memInhibitAsserted() ? 0 : getLatency();
}
void
-SimpleMemory::doFunctionalAccess(PacketPtr pkt)
+SimpleMemory::recvFunctional(PacketPtr pkt)
{
+ pkt->pushLabel(name());
+
functionalAccess(pkt);
+
+ bool done = false;
+ auto p = packetQueue.begin();
+ // potentially update the packets in our packet queue as well
+ while (!done && p != packetQueue.end()) {
+ done = pkt->checkFunctional(p->pkt);
+ ++p;
+ }
+
+ pkt->popLabel();
}
bool
return false;
}
+ // @todo someone should pay for this
+ pkt->headerDelay = pkt->payloadDelay = 0;
+
// update the release time according to the bandwidth limit, and
// do so with respect to the time it takes to finish this request
// rather than long term as it is the short term data rate that is
// go ahead and deal with the packet and put the response in the
// queue if there is one
bool needsResponse = pkt->needsResponse();
- Tick latency = doAtomicAccess(pkt);
+ recvAtomic(pkt);
// turn packet around to go back to requester if response expected
if (needsResponse) {
- // doAtomicAccess() should already have turned packet into
+ // recvAtomic() should already have turned packet into
// atomic response
assert(pkt->isResponse());
- port.schedTimingResp(pkt, curTick() + latency);
+ // to keep things simple (and in order), we put the packet at
+ // the end even if the latency suggests it should be sent
+ // before the packet(s) before it
+ packetQueue.emplace_back(pkt, curTick() + getLatency());
+ if (!retryResp && !dequeueEvent.scheduled())
+ schedule(dequeueEvent, packetQueue.back().tick);
} else {
- delete pkt;
+ pendingDelete.push_back(pkt);
}
return true;
isBusy = false;
if (retryReq) {
retryReq = false;
- port.sendRetry();
+ port.sendRetryReq();
+ }
+}
+
+void
+SimpleMemory::dequeue()
+{
+ assert(!packetQueue.empty());
+ DeferredPacket deferred_pkt = packetQueue.front();
+
+ retryResp = !port.sendTimingResp(deferred_pkt.pkt);
+
+ if (!retryResp) {
+ packetQueue.pop_front();
+
+ // if the queue is not empty, schedule the next dequeue event,
+ // otherwise signal that we are drained if we were asked to do so
+ if (!packetQueue.empty()) {
+ // if there were packets that got in-between then we
+ // already have an event scheduled, so use re-schedule
+ reschedule(dequeueEvent,
+ std::max(packetQueue.front().tick, curTick()), true);
+ } else if (drainState() == DrainState::Draining) {
+ DPRINTF(Drain, "Draining of SimpleMemory complete\n");
+ signalDrainDone();
+ }
}
}
+Tick
+SimpleMemory::getLatency() const
+{
+ return latency +
+ (latency_var ? random_mt.random<Tick>(0, latency_var) : 0);
+}
+
+void
+SimpleMemory::recvRespRetry()
+{
+ assert(retryResp);
+
+ dequeue();
+}
+
BaseSlavePort &
SimpleMemory::getSlavePort(const std::string &if_name, PortID idx)
{
}
}
-unsigned int
-SimpleMemory::drain(DrainManager *dm)
+DrainState
+SimpleMemory::drain()
{
- int count = port.drain(dm);
-
- if (count)
- setDrainState(Drainable::Draining);
- else
- setDrainState(Drainable::Drained);
- return count;
+ if (!packetQueue.empty()) {
+ DPRINTF(Drain, "SimpleMemory Queue has requests, waiting to drain\n");
+ return DrainState::Draining;
+ } else {
+ return DrainState::Drained;
+ }
}
SimpleMemory::MemoryPort::MemoryPort(const std::string& _name,
SimpleMemory& _memory)
- : QueuedSlavePort(_name, &_memory, queueImpl),
- queueImpl(_memory, *this), memory(_memory)
+ : SlavePort(_name, &_memory), memory(_memory)
{ }
AddrRangeList
Tick
SimpleMemory::MemoryPort::recvAtomic(PacketPtr pkt)
{
- return memory.doAtomicAccess(pkt);
+ return memory.recvAtomic(pkt);
}
void
SimpleMemory::MemoryPort::recvFunctional(PacketPtr pkt)
{
- pkt->pushLabel(memory.name());
-
- if (!queue.checkFunctional(pkt)) {
- // Default implementation of SimpleTimingPort::recvFunctional()
- // calls recvAtomic() and throws away the latency; we can save a
- // little here by just not calculating the latency.
- memory.doFunctionalAccess(pkt);
- }
-
- pkt->popLabel();
+ memory.recvFunctional(pkt);
}
bool
return memory.recvTimingReq(pkt);
}
+void
+SimpleMemory::MemoryPort::recvRespRetry()
+{
+ memory.recvRespRetry();
+}
+
SimpleMemory*
SimpleMemoryParams::create()
{