X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fmem%2Fsimple_mem.cc;h=bb44b8c850ec0172518a956e213957e052edf156;hb=289a8ebdb1348994164c292f24970e56817cd1d6;hp=aa9168bf711313be514ff32e916c0b3700b7452d;hpb=46d9adb68c96b94ae25bbe92d34e375daf532ece;p=gem5.git diff --git a/src/mem/simple_mem.cc b/src/mem/simple_mem.cc index aa9168bf7..bb44b8c85 100644 --- a/src/mem/simple_mem.cc +++ b/src/mem/simple_mem.cc @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010-2012 ARM Limited + * Copyright (c) 2010-2013, 2015 ARM Limited * All rights reserved * * The license below extends only to copyright in the software and shall @@ -44,91 +44,216 @@ #include "base/random.hh" #include "mem/simple_mem.hh" +#include "debug/Drain.hh" using namespace std; -SimpleMemory::SimpleMemory(const Params* p) : +SimpleMemory::SimpleMemory(const SimpleMemoryParams* p) : AbstractMemory(p), - lat(p->latency), lat_var(p->latency_var) + port(name() + ".port", *this), latency(p->latency), + latency_var(p->latency_var), bandwidth(p->bandwidth), isBusy(false), + retryReq(false), retryResp(false), + releaseEvent(this), dequeueEvent(this) { - for (size_t i = 0; i < p->port_port_connection_count; ++i) { - ports.push_back(new MemoryPort(csprintf("%s-port-%d", name(), i), - *this)); - } } void SimpleMemory::init() { - for (vector::iterator p = ports.begin(); p != ports.end(); - ++p) { - if (!(*p)->isConnected()) { - fatal("SimpleMemory port %s is unconnected!\n", (*p)->name()); - } else { - (*p)->sendRangeChange(); - } + AbstractMemory::init(); + + // allow unconnected memories as this is used in several ruby + // systems at the moment + if (port.isConnected()) { + port.sendRangeChange(); } } Tick -SimpleMemory::calculateLatency(PacketPtr pkt) +SimpleMemory::recvAtomic(PacketPtr pkt) +{ + panic_if(pkt->cacheResponding(), "Should not see packets where cache " + "is responding"); + + access(pkt); + return getLatency(); +} + +void +SimpleMemory::recvFunctional(PacketPtr pkt) { - if (pkt->memInhibitAsserted()) { - return 0; + pkt->pushLabel(name()); + + functionalAccess(pkt); + + bool done = false; + auto p = packetQueue.begin(); + // potentially update the packets in our packet queue as well + while (!done && p != packetQueue.end()) { + done = pkt->checkFunctional(p->pkt); + ++p; + } + + pkt->popLabel(); +} + +bool +SimpleMemory::recvTimingReq(PacketPtr pkt) +{ + panic_if(pkt->cacheResponding(), "Should not see packets where cache " + "is responding"); + + panic_if(!(pkt->isRead() || pkt->isWrite()), + "Should only see read and writes at memory controller, " + "saw %s to %#llx\n", pkt->cmdString(), pkt->getAddr()); + + // we should not get a new request after committing to retry the + // current one, but unfortunately the CPU violates this rule, so + // simply ignore it for now + if (retryReq) + return false; + + // if we are busy with a read or write, remember that we have to + // retry + if (isBusy) { + retryReq = true; + return false; + } + + // technically the packet only reaches us after the header delay, + // and since this is a memory controller we also need to + // deserialise the payload before performing any write operation + Tick receive_delay = pkt->headerDelay + pkt->payloadDelay; + pkt->headerDelay = pkt->payloadDelay = 0; + + // update the release time according to the bandwidth limit, and + // do so with respect to the time it takes to finish this request + // rather than long term as it is the short term data rate that is + // limited for any real memory + + // calculate an appropriate tick to release to not exceed + // the bandwidth limit + Tick duration = pkt->getSize() * bandwidth; + + // only consider ourselves busy if there is any need to wait + // to avoid extra events being scheduled for (infinitely) fast + // memories + if (duration != 0) { + schedule(releaseEvent, curTick() + duration); + isBusy = true; + } + + // go ahead and deal with the packet and put the response in the + // queue if there is one + bool needsResponse = pkt->needsResponse(); + recvAtomic(pkt); + // turn packet around to go back to requester if response expected + if (needsResponse) { + // recvAtomic() should already have turned packet into + // atomic response + assert(pkt->isResponse()); + + Tick when_to_send = curTick() + receive_delay + getLatency(); + + // typically this should be added at the end, so start the + // insertion sort with the last element, also make sure not to + // re-order in front of some existing packet with the same + // address, the latter is important as this memory effectively + // hands out exclusive copies (shared is not asserted) + auto i = packetQueue.end(); + --i; + while (i != packetQueue.begin() && when_to_send < i->tick && + i->pkt->getAddr() != pkt->getAddr()) + --i; + + // emplace inserts the element before the position pointed to by + // the iterator, so advance it one step + packetQueue.emplace(++i, pkt, when_to_send); + + if (!retryResp && !dequeueEvent.scheduled()) + schedule(dequeueEvent, packetQueue.back().tick); } else { - Tick latency = lat; - if (lat_var != 0) - latency += random_mt.random(0, lat_var); - return latency; + pendingDelete.reset(pkt); + } + + return true; +} + +void +SimpleMemory::release() +{ + assert(isBusy); + isBusy = false; + if (retryReq) { + retryReq = false; + port.sendRetryReq(); + } +} + +void +SimpleMemory::dequeue() +{ + assert(!packetQueue.empty()); + DeferredPacket deferred_pkt = packetQueue.front(); + + retryResp = !port.sendTimingResp(deferred_pkt.pkt); + + if (!retryResp) { + packetQueue.pop_front(); + + // if the queue is not empty, schedule the next dequeue event, + // otherwise signal that we are drained if we were asked to do so + if (!packetQueue.empty()) { + // if there were packets that got in-between then we + // already have an event scheduled, so use re-schedule + reschedule(dequeueEvent, + std::max(packetQueue.front().tick, curTick()), true); + } else if (drainState() == DrainState::Draining) { + DPRINTF(Drain, "Draining of SimpleMemory complete\n"); + signalDrainDone(); + } } } Tick -SimpleMemory::doAtomicAccess(PacketPtr pkt) +SimpleMemory::getLatency() const { - access(pkt); - return calculateLatency(pkt); + return latency + + (latency_var ? random_mt.random(0, latency_var) : 0); } void -SimpleMemory::doFunctionalAccess(PacketPtr pkt) +SimpleMemory::recvRespRetry() { - functionalAccess(pkt); + assert(retryResp); + + dequeue(); } -SlavePort & -SimpleMemory::getSlavePort(const std::string &if_name, int idx) +BaseSlavePort & +SimpleMemory::getSlavePort(const std::string &if_name, PortID idx) { if (if_name != "port") { return MemObject::getSlavePort(if_name, idx); } else { - if (idx >= static_cast(ports.size())) { - fatal("SimpleMemory::getSlavePort: unknown index %d\n", idx); - } - - return *ports[idx]; + return port; } } -unsigned int -SimpleMemory::drain(Event *de) +DrainState +SimpleMemory::drain() { - int count = 0; - for (vector::iterator p = ports.begin(); p != ports.end(); - ++p) { - count += (*p)->drain(de); + if (!packetQueue.empty()) { + DPRINTF(Drain, "SimpleMemory Queue has requests, waiting to drain\n"); + return DrainState::Draining; + } else { + return DrainState::Drained; } - - if (count) - changeState(Draining); - else - changeState(Drained); - return count; } SimpleMemory::MemoryPort::MemoryPort(const std::string& _name, SimpleMemory& _memory) - : SimpleTimingPort(_name, &_memory), memory(_memory) + : SlavePort(_name, &_memory), memory(_memory) { } AddrRangeList @@ -142,22 +267,25 @@ SimpleMemory::MemoryPort::getAddrRanges() const Tick SimpleMemory::MemoryPort::recvAtomic(PacketPtr pkt) { - return memory.doAtomicAccess(pkt); + return memory.recvAtomic(pkt); } void SimpleMemory::MemoryPort::recvFunctional(PacketPtr pkt) { - pkt->pushLabel(memory.name()); + memory.recvFunctional(pkt); +} - if (!queue.checkFunctional(pkt)) { - // Default implementation of SimpleTimingPort::recvFunctional() - // calls recvAtomic() and throws away the latency; we can save a - // little here by just not calculating the latency. - memory.doFunctionalAccess(pkt); - } +bool +SimpleMemory::MemoryPort::recvTimingReq(PacketPtr pkt) +{ + return memory.recvTimingReq(pkt); +} - pkt->popLabel(); +void +SimpleMemory::MemoryPort::recvRespRetry() +{ + memory.recvRespRetry(); } SimpleMemory*