From 43114ad1dd678595a5a67c761f56508d3df3b4b7 Mon Sep 17 00:00:00 2001 From: Gabe Black Date: Thu, 24 Dec 2020 06:48:21 -0800 Subject: [PATCH] dev: Use regular atomic accesses for DMA in bypass mode. These are now accelerated with backdoor accesses and should be at least as fast as functional accesses. This removes a dependency on port proxies, and also stops the HDLCD from using functional accesses. Change-Id: I5e959288eb533d09cffa7b79938aa2f61e4aff7d Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/38720 Reviewed-by: Andreas Sandberg Maintainer: Andreas Sandberg Tested-by: kokoro --- src/dev/dma_device.cc | 14 ++++++++------ src/dev/dma_device.hh | 6 ++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/dev/dma_device.cc b/src/dev/dma_device.cc index bbdae7df0..9aebb7bd3 100644 --- a/src/dev/dma_device.cc +++ b/src/dev/dma_device.cc @@ -370,8 +370,8 @@ DmaReadFifo::DmaReadFifo(DmaPort &_port, size_t size, unsigned max_pending, Request::Flags flags) : maxReqSize(max_req_size), fifoSize(size), - reqFlags(flags), port(_port), proxy(port, port.sys->cacheLineSize()), - cacheLineSize(port.sys->cacheLineSize()), buffer(size) + reqFlags(flags), port(_port), cacheLineSize(port.sys->cacheLineSize()), + buffer(size) { freeRequests.resize(max_pending); for (auto &e : freeRequests) @@ -465,7 +465,7 @@ DmaReadFifo::resumeFill() const bool old_eob(atEndOfBlock()); if (port.sys->bypassCaches()) - resumeFillFunctional(); + resumeFillBypass(); else resumeFillTiming(); @@ -474,7 +474,7 @@ DmaReadFifo::resumeFill() } void -DmaReadFifo::resumeFillFunctional() +DmaReadFifo::resumeFillBypass() { const size_t fifo_space = buffer.capacity() - buffer.size(); if (fifo_space >= cacheLineSize || buffer.capacity() < cacheLineSize) { @@ -483,11 +483,13 @@ DmaReadFifo::resumeFillFunctional() std::vector tmp_buffer(xfer_size); assert(pendingRequests.empty()); - DPRINTF(DMA, "KVM Bypassing startAddr=%#x xfer_size=%#x " \ + DPRINTF(DMA, "Direct bypass startAddr=%#x xfer_size=%#x " \ "fifo_space=%#x block_remaining=%#x\n", nextAddr, xfer_size, fifo_space, block_remaining); - proxy.readBlob(nextAddr, tmp_buffer.data(), xfer_size); + port.dmaAction(MemCmd::ReadReq, nextAddr, xfer_size, nullptr, + tmp_buffer.data(), 0, reqFlags); + buffer.write(tmp_buffer.begin(), xfer_size); nextAddr += xfer_size; } diff --git a/src/dev/dma_device.hh b/src/dev/dma_device.hh index 3904a082d..330be1ac2 100644 --- a/src/dev/dma_device.hh +++ b/src/dev/dma_device.hh @@ -49,7 +49,6 @@ #include "base/circlebuf.hh" #include "dev/io_device.hh" #include "mem/backdoor.hh" -#include "mem/port_proxy.hh" #include "params/DmaDevice.hh" #include "sim/drain.hh" #include "sim/system.hh" @@ -508,7 +507,6 @@ class DmaReadFifo : public Drainable, public Serializable const Request::Flags reqFlags; DmaPort &port; - PortProxy proxy; const int cacheLineSize; @@ -554,8 +552,8 @@ class DmaReadFifo : public Drainable, public Serializable /** Try to issue new DMA requests during normal execution*/ void resumeFillTiming(); - /** Try to bypass DMA requests in KVM execution mode */ - void resumeFillFunctional(); + /** Try to bypass DMA requests in non-caching mode */ + void resumeFillBypass(); private: // Internal state Fifo buffer; -- 2.30.2