/*
- * Copyright (c) 2012, 2015 ARM Limited
+ * Copyright (c) 2012, 2015, 2017 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
#include "base/chunk_generator.hh"
#include "debug/DMA.hh"
#include "debug/Drain.hh"
+#include "mem/port_proxy.hh"
#include "sim/system.hh"
DmaPort::DmaPort(MemObject *dev, System *s)
: MasterPort(dev->name() + ".dma", dev),
- device(dev), sys(s), masterId(s->getMasterId(dev->name())),
- sendEvent(this), pendingCount(0), inRetry(false)
+ device(dev), sys(s), masterId(s->getMasterId(dev)),
+ sendEvent([this]{ sendDma(); }, dev->name()),
+ pendingCount(0), inRetry(false)
{ }
void
delete state;
}
- // delete the request that we created and also the packet
- delete pkt->req;
+ // delete the packet
delete pkt;
// we might be drained at this point, if so signal the drain event
bool
DmaPort::recvTimingResp(PacketPtr pkt)
{
- // We shouldn't ever get a cacheable block in ownership state
+ // We shouldn't ever get a cacheable block in Modified state
assert(pkt->req->isUncacheable() ||
- !(pkt->memInhibitAsserted() && !pkt->sharedAsserted()));
+ !(pkt->cacheResponding() && !pkt->hasSharers()));
handleResp(pkt);
event ? event->scheduled() : -1);
for (ChunkGenerator gen(addr, size, sys->cacheLineSize());
!gen.done(); gen.next()) {
- req = new Request(gen.addr(), gen.size(), flag, masterId);
+
+ req = std::make_shared<Request>(
+ gen.addr(), gen.size(), flag, masterId);
+
req->taskId(ContextSwitchTaskId::DMA);
PacketPtr pkt = new Packet(req, cmd);
panic("Unknown memory mode.");
}
-BaseMasterPort &
-DmaDevice::getMasterPort(const std::string &if_name, PortID idx)
+Port &
+DmaDevice::getPort(const std::string &if_name, PortID idx)
{
if (if_name == "dma") {
return dmaPort;
}
- return PioDevice::getMasterPort(if_name, idx);
+ return PioDevice::getPort(if_name, idx);
}
return;
const bool old_eob(atEndOfBlock());
+
+ if (port.sys->bypassCaches())
+ resumeFillFunctional();
+ else
+ resumeFillTiming();
+
+ if (!old_eob && atEndOfBlock())
+ onEndOfBlock();
+}
+
+void
+DmaReadFifo::resumeFillFunctional()
+{
+ const size_t fifo_space = buffer.capacity() - buffer.size();
+ const size_t kvm_watermark = port.sys->cacheLineSize();
+ if (fifo_space >= kvm_watermark || buffer.capacity() < kvm_watermark) {
+ const size_t block_remaining = endAddr - nextAddr;
+ const size_t xfer_size = std::min(fifo_space, block_remaining);
+ std::vector<uint8_t> tmp_buffer(xfer_size);
+
+ assert(pendingRequests.empty());
+ DPRINTF(DMA, "KVM Bypassing startAddr=%#x xfer_size=%#x " \
+ "fifo_space=%#x block_remaining=%#x\n",
+ nextAddr, xfer_size, fifo_space, block_remaining);
+
+ port.sys->physProxy.readBlob(nextAddr, tmp_buffer.data(), xfer_size);
+ buffer.write(tmp_buffer.begin(), xfer_size);
+ nextAddr += xfer_size;
+ }
+}
+
+void
+DmaReadFifo::resumeFillTiming()
+{
size_t size_pending(0);
for (auto &e : pendingRequests)
size_pending += e->requestSize();
pendingRequests.emplace_back(std::move(event));
}
-
- // EOB can be set before a call to dmaDone() if in-flight accesses
- // have been canceled.
- if (!old_eob && atEndOfBlock())
- onEndOfBlock();
}
void
handlePending();
resumeFill();
- if (!old_active && isActive())
+ if (old_active && !isActive())
onIdle();
}
signalDrainDone();
}
-
-
DrainState
DmaReadFifo::drain()
{