2 * Copyright (c) 2012-2013, 2015, 2017 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2004-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 #ifndef __DEV_DMA_DEVICE_HH__
46 #define __DEV_DMA_DEVICE_HH__
51 #include "base/circlebuf.hh"
52 #include "dev/io_device.hh"
53 #include "params/DmaDevice.hh"
54 #include "sim/drain.hh"
55 #include "sim/system.hh"
57 class DmaPort : public MasterPort, public Drainable
62 * Take the first packet of the transmit list and attempt to send
63 * it as a timing request. If it is successful, schedule the
64 * sending of the next packet, otherwise remember that we are
65 * waiting for a retry.
67 void trySendTimingReq();
70 * For timing, attempt to send the first item on the transmit
71 * list, and if it is successful and there are more packets
72 * waiting, then schedule the sending of the next packet. For
73 * atomic, simply send and process everything on the transmit
79 * Handle a response packet by updating the corresponding DMA
80 * request state to reflect the bytes received, and also update
81 * the pending request counter. If the DMA request that this
82 * packet is part of is complete, then signal the completion event
83 * if present, potentially with a delay added to it.
85 * @param pkt Response packet to handler
86 * @param delay Additional delay for scheduling the completion event
88 void handleResp(PacketPtr pkt, Tick delay = 0);
90 struct DmaReqState : public Packet::SenderState
92 /** Event to call on the device when this transaction (all packets)
94 Event *completionEvent;
96 /** Total number of bytes that this transaction involves. */
99 /** Number of bytes that have been acked for this transaction. */
102 /** Amount to delay completion of dma by */
105 DmaReqState(Event *ce, Addr tb, Tick _delay)
106 : completionEvent(ce), totBytes(tb), numBytes(0), delay(_delay)
111 /** The device that owns this port. */
112 MemObject *const device;
114 /** The system that device/port are in. This is used to select which mode
115 * we are currently operating in. */
118 /** Id for all requests */
119 const MasterID masterId;
122 /** Use a deque as we never do any insertion or removal in the middle */
123 std::deque<PacketPtr> transmitList;
125 /** Event used to schedule a future sending from the transmit list. */
126 EventFunctionWrapper sendEvent;
128 /** Number of outstanding packets the dma port has. */
129 uint32_t pendingCount;
131 /** If the port is currently waiting for a retry before it can
132 * send whatever it is that it's sending. */
137 bool recvTimingResp(PacketPtr pkt) override;
138 void recvReqRetry() override;
140 void queueDma(PacketPtr pkt);
144 DmaPort(MemObject *dev, System *s);
146 RequestPtr dmaAction(Packet::Command cmd, Addr addr, int size, Event *event,
147 uint8_t *data, Tick delay, Request::Flags flag = 0);
149 bool dmaPending() const { return pendingCount > 0; }
151 DrainState drain() override;
154 class DmaDevice : public PioDevice
160 typedef DmaDeviceParams Params;
161 DmaDevice(const Params *p);
162 virtual ~DmaDevice() { }
164 void dmaWrite(Addr addr, int size, Event *event, uint8_t *data,
167 dmaPort.dmaAction(MemCmd::WriteReq, addr, size, event, data, delay);
170 void dmaRead(Addr addr, int size, Event *event, uint8_t *data,
173 dmaPort.dmaAction(MemCmd::ReadReq, addr, size, event, data, delay);
176 bool dmaPending() const { return dmaPort.dmaPending(); }
178 void init() override;
180 unsigned int cacheBlockSize() const { return sys->cacheLineSize(); }
182 BaseMasterPort &getMasterPort(const std::string &if_name,
183 PortID idx = InvalidPortID) override;
188 * DMA callback class.
190 * Allows one to register for a callback event after a sequence of (potentially
191 * non-contiguous) DMA transfers on a DmaPort completes. Derived classes must
192 * implement the process() method and use getChunkEvent() to allocate a
193 * callback event for each participating DMA.
195 class DmaCallback : public Drainable
198 virtual const std::string name() const { return "DmaCallback"; }
201 * DmaPort ensures that all oustanding DMA accesses have completed before
202 * it finishes draining. However, DmaChunkEvents scheduled with a delay
203 * might still be sitting on the event queue. Therefore, draining is not
204 * complete until count is 0, which ensures that all outstanding
205 * DmaChunkEvents associated with this DmaCallback have fired.
207 DrainState drain() override
209 return count ? DrainState::Draining : DrainState::Drained;
219 virtual ~DmaCallback() { }
222 * Callback function invoked on completion of all chunks.
224 virtual void process() = 0;
228 * Called by DMA engine completion event on each chunk completion.
229 * Since the object may delete itself here, callers should not use
230 * the object pointer after calling this function.
236 // Need to notify DrainManager that this object is finished
237 // draining, even though it is immediately deleted.
246 * Request a chunk event. Chunks events should be provided to each DMA
247 * request that wishes to participate in this DmaCallback.
249 Event *getChunkEvent()
252 return new EventFunctionWrapper([this]{ chunkComplete(); }, name(),
258 * Buffered DMA engine helper class
260 * This class implements a simple DMA engine that feeds a FIFO
261 * buffer. The size of the buffer, the maximum number of pending
262 * requests and the maximum request size are all set when the engine
265 * An <i>asynchronous</i> transfer of a <i>block</i> of data
266 * (designated by a start address and a size) is started by calling
267 * the startFill() method. The DMA engine will aggressively try to
268 * keep the internal FIFO full. As soon as there is room in the FIFO
269 * for more data <i>and</i> there are free request slots, a new fill
272 * Data in the FIFO can be read back using the get() and tryGet()
273 * methods. Both request a block of data from the FIFO. However, get()
274 * panics if the block cannot be satisfied, while tryGet() simply
275 * returns false. The latter call makes it possible to implement
276 * custom buffer underrun handling.
278 * A simple use case would be something like this:
280 * // Create a DMA engine with a 1KiB buffer. Issue up to 8 concurrent
281 * // uncacheable 64 byte (maximum) requests.
282 * DmaReadFifo *dma = new DmaReadFifo(port, 1024, 64, 8,
283 * Request::UNCACHEABLE);
285 * // Start copying 4KiB data from 0xFF000000
286 * dma->startFill(0xFF000000, 0x1000);
288 * // Some time later when there is data in the FIFO.
290 * dma->get(data, sizeof(data))
294 * The DMA engine allows new blocks to be requested as soon as the
295 * last request for a block has been sent (i.e., there is no need to
296 * wait for pending requests to complete). This can be queried with
297 * the atEndOfBlock() method and more advanced implementations may
298 * override the onEndOfBlock() callback.
300 class DmaReadFifo : public Drainable, public Serializable
303 DmaReadFifo(DmaPort &port, size_t size,
304 unsigned max_req_size,
305 unsigned max_pending,
306 Request::Flags flags = 0);
310 public: // Serializable
311 void serialize(CheckpointOut &cp) const override;
312 void unserialize(CheckpointIn &cp) override;
315 DrainState drain() override;
317 public: // FIFO access
323 * Try to read data from the FIFO.
325 * This method reads len bytes of data from the FIFO and stores
326 * them in the memory location pointed to by dst. The method
327 * fails, and no data is written to the buffer, if the FIFO
328 * doesn't contain enough data to satisfy the request.
330 * @param dst Pointer to a destination buffer
331 * @param len Amount of data to read.
332 * @return true on success, false otherwise.
334 bool tryGet(uint8_t *dst, size_t len);
337 bool tryGet(T &value) {
338 return tryGet(static_cast<T *>(&value), sizeof(T));
342 * Read data from the FIFO and panic on failure.
346 * @param dst Pointer to a destination buffer
347 * @param len Amount of data to read.
349 void get(uint8_t *dst, size_t len);
354 get(static_cast<uint8_t *>(&value), sizeof(T));
358 /** Get the amount of data stored in the FIFO */
359 size_t size() const { return buffer.size(); }
360 /** Flush the FIFO */
361 void flush() { buffer.flush(); }
364 public: // FIFO fill control
367 * @name FIFO fill control
370 * Start filling the FIFO.
372 * @warn It's considered an error to call start on an active DMA
373 * engine unless the last request from the active block has been
374 * sent (i.e., atEndOfBlock() is true).
376 * @param start Physical address to copy from.
377 * @param size Size of the block to copy.
379 void startFill(Addr start, size_t size);
382 * Stop the DMA engine.
384 * Stop filling the FIFO and ignore incoming responses for pending
385 * requests. The onEndOfBlock() callback will not be called after
386 * this method has been invoked. However, once the last response
387 * has been received, the onIdle() callback will still be called.
392 * Has the DMA engine sent out the last request for the active
395 bool atEndOfBlock() const {
396 return nextAddr == endAddr;
400 * Is the DMA engine active (i.e., are there still in-flight
403 bool isActive() const {
404 return !(pendingRequests.empty() && atEndOfBlock());
408 protected: // Callbacks
414 * End of block callback
416 * This callback is called <i>once</i> after the last access in a
417 * block has been sent. It is legal for a derived class to call
418 * startFill() from this method to initiate a transfer.
420 virtual void onEndOfBlock() {};
423 * Last response received callback
425 * This callback is called when the DMA engine becomes idle (i.e.,
426 * there are no pending requests).
428 * It is possible for a DMA engine to reach the end of block and
429 * become idle at the same tick. In such a case, the
430 * onEndOfBlock() callback will be called first. This callback
431 * will <i>NOT</i> be called if that callback initiates a new DMA transfer.
433 virtual void onIdle() {};
436 private: // Configuration
437 /** Maximum request size in bytes */
438 const Addr maxReqSize;
439 /** Maximum FIFO size in bytes */
440 const size_t fifoSize;
442 const Request::Flags reqFlags;
447 class DmaDoneEvent : public Event
450 DmaDoneEvent(DmaReadFifo *_parent, size_t max_size);
454 bool canceled() const { return _canceled; }
455 void reset(size_t size);
458 bool done() const { return _done; }
459 size_t requestSize() const { return _requestSize; }
460 const uint8_t *data() const { return _data.data(); }
461 uint8_t *data() { return _data.data(); }
468 std::vector<uint8_t> _data;
471 typedef std::unique_ptr<DmaDoneEvent> DmaDoneEventUPtr;
474 * DMA request done, handle incoming data and issue new
479 /** Handle pending requests that have been flagged as done. */
480 void handlePending();
482 /** Try to issue new DMA requests or bypass DMA requests*/
485 /** Try to issue new DMA requests during normal execution*/
486 void resumeFillTiming();
488 /** Try to bypass DMA requests in KVM execution mode */
489 void resumeFillFunctional();
491 private: // Internal state
492 Fifo<uint8_t> buffer;
497 std::deque<DmaDoneEventUPtr> pendingRequests;
498 std::deque<DmaDoneEventUPtr> freeRequests;
501 #endif // __DEV_DMA_DEVICE_HH__