/*
- * Copyright (c) 2010-2012 ARM Limited
+ * Copyright (c) 2010-2013 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
SimpleMemory::SimpleMemory(const SimpleMemoryParams* p) :
AbstractMemory(p),
- port(name() + ".port", *this), lat(p->latency),
- lat_var(p->latency_var), bandwidth(p->bandwidth),
- isBusy(false), retryReq(false), releaseEvent(this)
+ port(name() + ".port", *this), latency(p->latency),
+ latency_var(p->latency_var), bandwidth(p->bandwidth), isBusy(false),
+ retryReq(false), retryResp(false),
+ releaseEvent(this), dequeueEvent(this), drainManager(NULL)
{
}
}
Tick
-SimpleMemory::calculateLatency(PacketPtr pkt)
-{
- if (pkt->memInhibitAsserted()) {
- return 0;
- } else {
- Tick latency = lat;
- if (lat_var != 0)
- latency += random_mt.random<Tick>(0, lat_var);
- return latency;
- }
-}
-
-Tick
-SimpleMemory::doAtomicAccess(PacketPtr pkt)
+SimpleMemory::recvAtomic(PacketPtr pkt)
{
access(pkt);
- return calculateLatency(pkt);
+ return pkt->memInhibitAsserted() ? 0 : getLatency();
}
void
-SimpleMemory::doFunctionalAccess(PacketPtr pkt)
+SimpleMemory::recvFunctional(PacketPtr pkt)
{
+ pkt->pushLabel(name());
+
functionalAccess(pkt);
+
+ // potentially update the packets in our packet queue as well
+ for (auto i = packetQueue.begin(); i != packetQueue.end(); ++i)
+ pkt->checkFunctional(i->pkt);
+
+ pkt->popLabel();
}
bool
// go ahead and deal with the packet and put the response in the
// queue if there is one
bool needsResponse = pkt->needsResponse();
- Tick latency = doAtomicAccess(pkt);
+ recvAtomic(pkt);
// turn packet around to go back to requester if response expected
if (needsResponse) {
- // doAtomicAccess() should already have turned packet into
+ // recvAtomic() should already have turned packet into
// atomic response
assert(pkt->isResponse());
- port.schedTimingResp(pkt, curTick() + latency);
+ // to keep things simple (and in order), we put the packet at
+ // the end even if the latency suggests it should be sent
+ // before the packet(s) before it
+ packetQueue.push_back(DeferredPacket(pkt, curTick() + getLatency()));
+ if (!dequeueEvent.scheduled())
+ schedule(dequeueEvent, packetQueue.back().tick);
} else {
pendingDelete.push_back(pkt);
}
}
}
+void
+SimpleMemory::dequeue()
+{
+ assert(!packetQueue.empty());
+ DeferredPacket deferred_pkt = packetQueue.front();
+
+ retryResp = !port.sendTimingResp(deferred_pkt.pkt);
+
+ if (!retryResp) {
+ packetQueue.pop_front();
+
+ // if the queue is not empty, schedule the next dequeue event,
+ // otherwise signal that we are drained if we were asked to do so
+ if (!packetQueue.empty()) {
+ // if there were packets that got in-between then we
+ // already have an event scheduled, so use re-schedule
+ reschedule(dequeueEvent,
+ std::max(packetQueue.front().tick, curTick()), true);
+ } else if (drainManager) {
+ drainManager->signalDrainDone();
+ drainManager = NULL;
+ }
+ }
+}
+
+Tick
+SimpleMemory::getLatency() const
+{
+ return latency +
+ (latency_var ? random_mt.random<Tick>(0, latency_var) : 0);
+}
+
+void
+SimpleMemory::recvRetry()
+{
+ assert(retryResp);
+
+ dequeue();
+}
+
BaseSlavePort &
SimpleMemory::getSlavePort(const std::string &if_name, PortID idx)
{
unsigned int
SimpleMemory::drain(DrainManager *dm)
{
- int count = port.drain(dm);
+ int count = 0;
+
+ // also track our internal queue
+ if (!packetQueue.empty()) {
+ count += 1;
+ drainManager = dm;
+ }
if (count)
setDrainState(Drainable::Draining);
SimpleMemory::MemoryPort::MemoryPort(const std::string& _name,
SimpleMemory& _memory)
- : QueuedSlavePort(_name, &_memory, queueImpl),
- queueImpl(_memory, *this), memory(_memory)
+ : SlavePort(_name, &_memory), memory(_memory)
{ }
AddrRangeList
Tick
SimpleMemory::MemoryPort::recvAtomic(PacketPtr pkt)
{
- return memory.doAtomicAccess(pkt);
+ return memory.recvAtomic(pkt);
}
void
SimpleMemory::MemoryPort::recvFunctional(PacketPtr pkt)
{
- pkt->pushLabel(memory.name());
-
- if (!queue.checkFunctional(pkt)) {
- // Default implementation of SimpleTimingPort::recvFunctional()
- // calls recvAtomic() and throws away the latency; we can save a
- // little here by just not calculating the latency.
- memory.doFunctionalAccess(pkt);
- }
-
- pkt->popLabel();
+ memory.recvFunctional(pkt);
}
bool
return memory.recvTimingReq(pkt);
}
+void
+SimpleMemory::MemoryPort::recvRetry()
+{
+ memory.recvRetry();
+}
+
SimpleMemory*
SimpleMemoryParams::create()
{
/*
- * Copyright (c) 2012 ARM Limited
+ * Copyright (c) 2012-2013 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
#ifndef __SIMPLE_MEMORY_HH__
#define __SIMPLE_MEMORY_HH__
+#include <deque>
+
#include "mem/abstract_mem.hh"
-#include "mem/tport.hh"
+#include "mem/port.hh"
#include "params/SimpleMemory.hh"
/**
* The simple memory is a basic single-ported memory controller with
- * an configurable throughput and latency, potentially with a variance
- * added to the latter. It uses a QueueSlavePort to avoid dealing with
- * the flow control of sending responses.
+ * a configurable throughput and latency.
+ *
* @sa \ref gem5MemorySystem "gem5 Memory System"
*/
class SimpleMemory : public AbstractMemory
private:
- class MemoryPort : public QueuedSlavePort
+ /**
+ * A deferred packet stores a packet along with its scheduled
+ * transmission time
+ */
+ class DeferredPacket
+ {
+
+ public:
+
+ const Tick tick;
+ const PacketPtr pkt;
+
+ DeferredPacket(PacketPtr _pkt, Tick _tick) : tick(_tick), pkt(_pkt)
+ { }
+ };
+
+ class MemoryPort : public SlavePort
{
private:
- /// Queue holding the response packets
- SlavePacketQueue queueImpl;
SimpleMemory& memory;
public:
bool recvTimingReq(PacketPtr pkt);
+ void recvRetry();
+
AddrRangeList getAddrRanges() const;
};
MemoryPort port;
- Tick lat;
- Tick lat_var;
+ /**
+ * Latency from that a request is accepted until the response is
+ * ready to be sent.
+ */
+ const Tick latency;
+
+ /**
+ * Fudge factor added to the latency.
+ */
+ const Tick latency_var;
+
+ /**
+ * Internal (unbounded) storage to mimic the delay caused by the
+ * actual memory access. Note that this is where the packet spends
+ * the memory latency.
+ */
+ std::deque<DeferredPacket> packetQueue;
- /// Bandwidth in ticks per byte
+ /**
+ * Bandwidth in ticks per byte. The regulation affects the
+ * acceptance rate of requests and the queueing takes place after
+ * the regulation.
+ */
const double bandwidth;
/**
*/
bool retryReq;
+ /**
+ * Remember if we failed to send a response and are awaiting a
+ * retry. This is only used as a check.
+ */
+ bool retryResp;
+
/**
* Release the memory after being busy and send a retry if a
* request was rejected in the meanwhile.
EventWrapper<SimpleMemory, &SimpleMemory::release> releaseEvent;
+ /**
+ * Dequeue a packet from our internal packet queue and move it to
+ * the port where it will be sent as soon as possible.
+ */
+ void dequeue();
+
+ EventWrapper<SimpleMemory, &SimpleMemory::dequeue> dequeueEvent;
+
+ /**
+ * Detemine the latency.
+ *
+ * @return the latency seen by the current packet
+ */
+ Tick getLatency() const;
+
/** @todo this is a temporary workaround until the 4-phase code is
* committed. upstream caches needs this packet until true is returned, so
* hold onto it for deletion until a subsequent call
*/
std::vector<PacketPtr> pendingDelete;
+ /**
+ * If we need to drain, keep the drain manager around until we're
+ * done here.
+ */
+ DrainManager *drainManager;
+
public:
SimpleMemory(const SimpleMemoryParams *p);
- virtual ~SimpleMemory() { }
unsigned int drain(DrainManager *dm);
- virtual BaseSlavePort& getSlavePort(const std::string& if_name,
- PortID idx = InvalidPortID);
- virtual void init();
+ BaseSlavePort& getSlavePort(const std::string& if_name,
+ PortID idx = InvalidPortID);
+ void init();
protected:
- Tick doAtomicAccess(PacketPtr pkt);
- void doFunctionalAccess(PacketPtr pkt);
+ Tick recvAtomic(PacketPtr pkt);
+
+ void recvFunctional(PacketPtr pkt);
+
bool recvTimingReq(PacketPtr pkt);
- Tick calculateLatency(PacketPtr pkt);
+
+ void recvRetry();
};