2 * Copyright (c) 2012 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2008 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 * Device model for Intel's I/O AT DMA copy engine.
47 #include "dev/pci/copy_engine.hh"
51 #include "base/cp_annotate.hh"
52 #include "base/trace.hh"
53 #include "debug/DMACopyEngine.hh"
54 #include "debug/Drain.hh"
55 #include "mem/packet.hh"
56 #include "mem/packet_access.hh"
57 #include "params/CopyEngine.hh"
58 #include "sim/stats.hh"
59 #include "sim/system.hh"
61 using namespace CopyEngineReg
;
63 CopyEngine::CopyEngine(const Params
*p
)
66 // All Reg regs are initialized to 0 by default
67 regs
.chanCount
= p
->ChanCnt
;
68 regs
.xferCap
= findMsbSet(p
->XferCap
);
71 if (regs
.chanCount
> 64)
72 fatal("CopyEngine interface doesn't support more than 64 DMA engines\n");
74 for (int x
= 0; x
< regs
.chanCount
; x
++) {
75 CopyEngineChannel
*ch
= new CopyEngineChannel(this, x
);
81 CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine
*_ce
, int cid
)
82 : cePort(_ce
, _ce
->sys
),
83 ce(_ce
), channelId(cid
), busy(false), underReset(false),
84 refreshNext(false), latBeforeBegin(ce
->params()->latBeforeBegin
),
85 latAfterCompletion(ce
->params()->latAfterCompletion
),
86 completionDataReg(0), nextState(Idle
),
87 fetchCompleteEvent([this]{ fetchDescComplete(); }, name()),
88 addrCompleteEvent([this]{ fetchAddrComplete(); }, name()),
89 readCompleteEvent([this]{ readCopyBytesComplete(); }, name()),
90 writeCompleteEvent([this]{ writeCopyBytesComplete(); }, name()),
91 statusCompleteEvent([this]{ writeStatusComplete(); }, name())
94 cr
.status
.dma_transfer_status(3);
96 cr
.completionAddr
= 0;
98 curDmaDesc
= new DmaDesc
;
99 memset(curDmaDesc
, 0, sizeof(DmaDesc
));
100 copyBuffer
= new uint8_t[ce
->params()->XferCap
];
103 CopyEngine::~CopyEngine()
105 for (int x
= 0; x
< chan
.size(); x
++) {
110 CopyEngine::CopyEngineChannel::~CopyEngineChannel()
113 delete [] copyBuffer
;
117 CopyEngine::getPort(const std::string
&if_name
, PortID idx
)
119 if (if_name
!= "dma") {
120 // pass it along to our super class
121 return PciDevice::getPort(if_name
, idx
);
123 if (idx
>= static_cast<int>(chan
.size())) {
124 panic("CopyEngine::getPort: unknown index %d\n", idx
);
127 return chan
[idx
]->getPort();
133 CopyEngine::CopyEngineChannel::getPort()
139 CopyEngine::CopyEngineChannel::recvCommand()
141 if (cr
.command
.start_dma()) {
143 cr
.status
.dma_transfer_status(0);
144 nextState
= DescriptorFetch
;
145 fetchAddress
= cr
.descChainAddr
;
146 if (ce
->drainState() == DrainState::Running
)
147 fetchDescriptor(cr
.descChainAddr
);
148 } else if (cr
.command
.append_dma()) {
150 nextState
= AddressFetch
;
151 if (ce
->drainState() == DrainState::Running
)
152 fetchNextAddr(lastDescriptorAddr
);
155 } else if (cr
.command
.reset_dma()) {
159 cr
.status
.dma_transfer_status(3);
162 } else if (cr
.command
.resume_dma() || cr
.command
.abort_dma() ||
163 cr
.command
.suspend_dma())
164 panic("Resume, Abort, and Suspend are not supported\n");
169 CopyEngine::read(PacketPtr pkt
)
174 if (!getBAR(pkt
->getAddr(), bar
, daddr
))
175 panic("Invalid PCI memory access to unmapped memory.\n");
177 // Only Memory register BAR is allowed
180 int size
= pkt
->getSize();
181 if (size
!= sizeof(uint64_t) && size
!= sizeof(uint32_t) &&
182 size
!= sizeof(uint16_t) && size
!= sizeof(uint8_t)) {
183 panic("Unknown size for MMIO access: %d\n", pkt
->getSize());
186 DPRINTF(DMACopyEngine
, "Read device register %#X size: %d\n", daddr
, size
);
189 /// Handle read of register here
195 assert(size
== sizeof(regs
.chanCount
));
196 pkt
->setLE
<uint8_t>(regs
.chanCount
);
199 assert(size
== sizeof(regs
.xferCap
));
200 pkt
->setLE
<uint8_t>(regs
.xferCap
);
203 assert(size
== sizeof(uint8_t));
204 pkt
->setLE
<uint8_t>(regs
.intrctrl());
205 regs
.intrctrl
.master_int_enable(0);
208 assert(size
== sizeof(regs
.attnStatus
));
209 pkt
->setLE
<uint32_t>(regs
.attnStatus
);
213 panic("Read request to unknown register number: %#x\n", daddr
);
215 pkt
->makeAtomicResponse();
220 // Find which channel we're accessing
223 while (daddr
>= 0x80) {
228 if (chanid
>= regs
.chanCount
)
229 panic("Access to channel %d (device only configured for %d channels)",
230 chanid
, regs
.chanCount
);
233 /// Channel registers are handled here
235 chan
[chanid
]->channelRead(pkt
, daddr
, size
);
237 pkt
->makeAtomicResponse();
242 CopyEngine::CopyEngineChannel::channelRead(Packet
*pkt
, Addr daddr
, int size
)
246 assert(size
== sizeof(uint16_t));
247 pkt
->setLE
<uint16_t>(cr
.ctrl());
251 assert(size
== sizeof(uint64_t));
252 pkt
->setLE
<uint64_t>(cr
.status() | (busy
? 0 : 1));
255 assert(size
== sizeof(uint64_t) || size
== sizeof(uint32_t));
256 if (size
== sizeof(uint64_t))
257 pkt
->setLE
<uint64_t>(cr
.descChainAddr
);
259 pkt
->setLE
<uint32_t>(bits(cr
.descChainAddr
,0,31));
261 case CHAN_CHAINADDR_HIGH
:
262 assert(size
== sizeof(uint32_t));
263 pkt
->setLE
<uint32_t>(bits(cr
.descChainAddr
,32,63));
266 assert(size
== sizeof(uint8_t));
267 pkt
->setLE
<uint32_t>(cr
.command());
270 assert(size
== sizeof(uint64_t) || size
== sizeof(uint32_t));
271 if (size
== sizeof(uint64_t))
272 pkt
->setLE
<uint64_t>(cr
.completionAddr
);
274 pkt
->setLE
<uint32_t>(bits(cr
.completionAddr
,0,31));
276 case CHAN_CMPLNADDR_HIGH
:
277 assert(size
== sizeof(uint32_t));
278 pkt
->setLE
<uint32_t>(bits(cr
.completionAddr
,32,63));
281 assert(size
== sizeof(uint32_t));
282 pkt
->setLE
<uint32_t>(cr
.error());
285 panic("Read request to unknown channel register number: (%d)%#x\n",
292 CopyEngine::write(PacketPtr pkt
)
298 if (!getBAR(pkt
->getAddr(), bar
, daddr
))
299 panic("Invalid PCI memory access to unmapped memory.\n");
301 // Only Memory register BAR is allowed
304 int size
= pkt
->getSize();
307 /// Handle write of register here
310 if (size
== sizeof(uint64_t)) {
311 uint64_t val M5_VAR_USED
= pkt
->getLE
<uint64_t>();
312 DPRINTF(DMACopyEngine
, "Wrote device register %#X value %#X\n",
314 } else if (size
== sizeof(uint32_t)) {
315 uint32_t val M5_VAR_USED
= pkt
->getLE
<uint32_t>();
316 DPRINTF(DMACopyEngine
, "Wrote device register %#X value %#X\n",
318 } else if (size
== sizeof(uint16_t)) {
319 uint16_t val M5_VAR_USED
= pkt
->getLE
<uint16_t>();
320 DPRINTF(DMACopyEngine
, "Wrote device register %#X value %#X\n",
322 } else if (size
== sizeof(uint8_t)) {
323 uint8_t val M5_VAR_USED
= pkt
->getLE
<uint8_t>();
324 DPRINTF(DMACopyEngine
, "Wrote device register %#X value %#X\n",
327 panic("Unknown size for MMIO access: %d\n", size
);
335 DPRINTF(DMACopyEngine
, "Warning, ignorning write to register %x\n",
339 regs
.intrctrl
.master_int_enable(bits(pkt
->getLE
<uint8_t>(), 0, 1));
342 panic("Read request to unknown register number: %#x\n", daddr
);
344 pkt
->makeAtomicResponse();
348 // Find which channel we're accessing
351 while (daddr
>= 0x80) {
356 if (chanid
>= regs
.chanCount
)
357 panic("Access to channel %d (device only configured for %d channels)",
358 chanid
, regs
.chanCount
);
361 /// Channel registers are handled here
363 chan
[chanid
]->channelWrite(pkt
, daddr
, size
);
365 pkt
->makeAtomicResponse();
370 CopyEngine::CopyEngineChannel::channelWrite(Packet
*pkt
, Addr daddr
, int size
)
374 assert(size
== sizeof(uint16_t));
376 old_int_disable
= cr
.ctrl
.interrupt_disable();
377 cr
.ctrl(pkt
->getLE
<uint16_t>());
378 if (cr
.ctrl
.interrupt_disable())
379 cr
.ctrl
.interrupt_disable(0);
381 cr
.ctrl
.interrupt_disable(old_int_disable
);
384 assert(size
== sizeof(uint64_t));
385 DPRINTF(DMACopyEngine
, "Warning, ignorning write to register %x\n",
389 assert(size
== sizeof(uint64_t) || size
== sizeof(uint32_t));
390 if (size
== sizeof(uint64_t))
391 cr
.descChainAddr
= pkt
->getLE
<uint64_t>();
393 cr
.descChainAddr
= (uint64_t)pkt
->getLE
<uint32_t>() |
394 (cr
.descChainAddr
& ~mask(32));
395 DPRINTF(DMACopyEngine
, "Chain Address %x\n", cr
.descChainAddr
);
397 case CHAN_CHAINADDR_HIGH
:
398 assert(size
== sizeof(uint32_t));
399 cr
.descChainAddr
= ((uint64_t)pkt
->getLE
<uint32_t>() << 32) |
400 (cr
.descChainAddr
& mask(32));
401 DPRINTF(DMACopyEngine
, "Chain Address %x\n", cr
.descChainAddr
);
404 assert(size
== sizeof(uint8_t));
405 cr
.command(pkt
->getLE
<uint8_t>());
409 assert(size
== sizeof(uint64_t) || size
== sizeof(uint32_t));
410 if (size
== sizeof(uint64_t))
411 cr
.completionAddr
= pkt
->getLE
<uint64_t>();
413 cr
.completionAddr
= pkt
->getLE
<uint32_t>() |
414 (cr
.completionAddr
& ~mask(32));
416 case CHAN_CMPLNADDR_HIGH
:
417 assert(size
== sizeof(uint32_t));
418 cr
.completionAddr
= ((uint64_t)pkt
->getLE
<uint32_t>() <<32) |
419 (cr
.completionAddr
& mask(32));
422 assert(size
== sizeof(uint32_t));
423 cr
.error(~pkt
->getLE
<uint32_t>() & cr
.error());
426 panic("Read request to unknown channel register number: (%d)%#x\n",
432 CopyEngine::regStats()
434 PciDevice::regStats();
436 using namespace Stats
;
438 .init(regs
.chanCount
)
439 .name(name() + ".bytes_copied")
440 .desc("Number of bytes copied by each engine")
444 .init(regs
.chanCount
)
445 .name(name() + ".copies_processed")
446 .desc("Number of copies processed by each engine")
452 CopyEngine::CopyEngineChannel::fetchDescriptor(Addr address
)
455 anBegin("FetchDescriptor");
456 DPRINTF(DMACopyEngine
, "Reading descriptor from at memory location %#x(%#x)\n",
457 address
, ce
->pciToDma(address
));
461 DPRINTF(DMACopyEngine
, "dmaAction: %#x, %d bytes, to addr %#x\n",
462 ce
->pciToDma(address
), sizeof(DmaDesc
), curDmaDesc
);
464 cePort
.dmaAction(MemCmd::ReadReq
, ce
->pciToDma(address
),
465 sizeof(DmaDesc
), &fetchCompleteEvent
,
466 (uint8_t*)curDmaDesc
, latBeforeBegin
);
467 lastDescriptorAddr
= address
;
471 CopyEngine::CopyEngineChannel::fetchDescComplete()
473 DPRINTF(DMACopyEngine
, "Read of descriptor complete\n");
475 if ((curDmaDesc
->command
& DESC_CTRL_NULL
)) {
476 DPRINTF(DMACopyEngine
, "Got NULL descriptor, skipping\n");
477 assert(!(curDmaDesc
->command
& DESC_CTRL_CP_STS
));
478 if (curDmaDesc
->command
& DESC_CTRL_CP_STS
) {
479 panic("Shouldn't be able to get here\n");
480 nextState
= CompletionWrite
;
481 if (inDrain()) return;
482 writeCompletionStatus();
493 if (curDmaDesc
->command
& ~DESC_CTRL_CP_STS
)
494 panic("Descriptor has flag other that completion status set\n");
497 if (inDrain()) return;
502 CopyEngine::CopyEngineChannel::readCopyBytes()
504 anBegin("ReadCopyBytes");
505 DPRINTF(DMACopyEngine
, "Reading %d bytes from buffer to memory location %#x(%#x)\n",
506 curDmaDesc
->len
, curDmaDesc
->dest
,
507 ce
->pciToDma(curDmaDesc
->src
));
508 cePort
.dmaAction(MemCmd::ReadReq
, ce
->pciToDma(curDmaDesc
->src
),
509 curDmaDesc
->len
, &readCompleteEvent
, copyBuffer
, 0);
513 CopyEngine::CopyEngineChannel::readCopyBytesComplete()
515 DPRINTF(DMACopyEngine
, "Read of bytes to copy complete\n");
517 nextState
= DMAWrite
;
518 if (inDrain()) return;
523 CopyEngine::CopyEngineChannel::writeCopyBytes()
525 anBegin("WriteCopyBytes");
526 DPRINTF(DMACopyEngine
, "Writing %d bytes from buffer to memory location %#x(%#x)\n",
527 curDmaDesc
->len
, curDmaDesc
->dest
,
528 ce
->pciToDma(curDmaDesc
->dest
));
530 cePort
.dmaAction(MemCmd::WriteReq
, ce
->pciToDma(curDmaDesc
->dest
),
531 curDmaDesc
->len
, &writeCompleteEvent
, copyBuffer
, 0);
533 ce
->bytesCopied
[channelId
] += curDmaDesc
->len
;
534 ce
->copiesProcessed
[channelId
]++;
538 CopyEngine::CopyEngineChannel::writeCopyBytesComplete()
540 DPRINTF(DMACopyEngine
, "Write of bytes to copy complete user1: %#x\n",
543 cr
.status
.compl_desc_addr(lastDescriptorAddr
>> 6);
544 completionDataReg
= cr
.status() | 1;
546 anQ("DMAUsedDescQ", channelId
, 1);
547 anQ("AppRecvQ", curDmaDesc
->user1
, curDmaDesc
->len
);
548 if (curDmaDesc
->command
& DESC_CTRL_CP_STS
) {
549 nextState
= CompletionWrite
;
550 if (inDrain()) return;
551 writeCompletionStatus();
555 continueProcessing();
559 CopyEngine::CopyEngineChannel::continueProcessing()
573 if (curDmaDesc
->next
) {
574 nextState
= DescriptorFetch
;
575 fetchAddress
= curDmaDesc
->next
;
576 if (inDrain()) return;
577 fetchDescriptor(curDmaDesc
->next
);
578 } else if (refreshNext
) {
579 nextState
= AddressFetch
;
581 if (inDrain()) return;
582 fetchNextAddr(lastDescriptorAddr
);
592 CopyEngine::CopyEngineChannel::writeCompletionStatus()
594 anBegin("WriteCompletionStatus");
595 DPRINTF(DMACopyEngine
, "Writing completion status %#x to address %#x(%#x)\n",
596 completionDataReg
, cr
.completionAddr
,
597 ce
->pciToDma(cr
.completionAddr
));
599 cePort
.dmaAction(MemCmd::WriteReq
,
600 ce
->pciToDma(cr
.completionAddr
),
601 sizeof(completionDataReg
), &statusCompleteEvent
,
602 (uint8_t*)&completionDataReg
, latAfterCompletion
);
606 CopyEngine::CopyEngineChannel::writeStatusComplete()
608 DPRINTF(DMACopyEngine
, "Writing completion status complete\n");
609 continueProcessing();
613 CopyEngine::CopyEngineChannel::fetchNextAddr(Addr address
)
615 anBegin("FetchNextAddr");
616 DPRINTF(DMACopyEngine
, "Fetching next address...\n");
618 cePort
.dmaAction(MemCmd::ReadReq
,
619 ce
->pciToDma(address
+ offsetof(DmaDesc
, next
)),
620 sizeof(Addr
), &addrCompleteEvent
,
621 (uint8_t*)curDmaDesc
+ offsetof(DmaDesc
, next
), 0);
625 CopyEngine::CopyEngineChannel::fetchAddrComplete()
627 DPRINTF(DMACopyEngine
, "Fetching next address complete: %#x\n",
629 if (!curDmaDesc
->next
) {
630 DPRINTF(DMACopyEngine
, "Got NULL descriptor, nothing more to do\n");
638 nextState
= DescriptorFetch
;
639 fetchAddress
= curDmaDesc
->next
;
640 if (inDrain()) return;
641 fetchDescriptor(curDmaDesc
->next
);
645 CopyEngine::CopyEngineChannel::inDrain()
647 if (drainState() == DrainState::Draining
) {
648 DPRINTF(Drain
, "CopyEngine done draining, processing drain event\n");
652 return ce
->drainState() != DrainState::Running
;
656 CopyEngine::CopyEngineChannel::drain()
658 if (nextState
== Idle
|| ce
->drainState() != DrainState::Running
) {
659 return DrainState::Drained
;
661 DPRINTF(Drain
, "CopyEngineChannel not drained\n");
662 return DrainState::Draining
;
667 CopyEngine::serialize(CheckpointOut
&cp
) const
669 PciDevice::serialize(cp
);
671 for (int x
=0; x
< chan
.size(); x
++)
672 chan
[x
]->serializeSection(cp
, csprintf("channel%d", x
));
676 CopyEngine::unserialize(CheckpointIn
&cp
)
678 PciDevice::unserialize(cp
);
679 regs
.unserialize(cp
);
680 for (int x
= 0; x
< chan
.size(); x
++)
681 chan
[x
]->unserializeSection(cp
, csprintf("channel%d", x
));
685 CopyEngine::CopyEngineChannel::serialize(CheckpointOut
&cp
) const
687 SERIALIZE_SCALAR(channelId
);
688 SERIALIZE_SCALAR(busy
);
689 SERIALIZE_SCALAR(underReset
);
690 SERIALIZE_SCALAR(refreshNext
);
691 SERIALIZE_SCALAR(lastDescriptorAddr
);
692 SERIALIZE_SCALAR(completionDataReg
);
693 SERIALIZE_SCALAR(fetchAddress
);
694 int nextState
= this->nextState
;
695 SERIALIZE_SCALAR(nextState
);
696 arrayParamOut(cp
, "curDmaDesc", (uint8_t*)curDmaDesc
, sizeof(DmaDesc
));
697 SERIALIZE_ARRAY(copyBuffer
, ce
->params()->XferCap
);
702 CopyEngine::CopyEngineChannel::unserialize(CheckpointIn
&cp
)
704 UNSERIALIZE_SCALAR(channelId
);
705 UNSERIALIZE_SCALAR(busy
);
706 UNSERIALIZE_SCALAR(underReset
);
707 UNSERIALIZE_SCALAR(refreshNext
);
708 UNSERIALIZE_SCALAR(lastDescriptorAddr
);
709 UNSERIALIZE_SCALAR(completionDataReg
);
710 UNSERIALIZE_SCALAR(fetchAddress
);
712 UNSERIALIZE_SCALAR(nextState
);
713 this->nextState
= (ChannelState
)nextState
;
714 arrayParamIn(cp
, "curDmaDesc", (uint8_t*)curDmaDesc
, sizeof(DmaDesc
));
715 UNSERIALIZE_ARRAY(copyBuffer
, ce
->params()->XferCap
);
721 CopyEngine::CopyEngineChannel::restartStateMachine()
725 fetchNextAddr(lastDescriptorAddr
);
727 case DescriptorFetch
:
728 fetchDescriptor(fetchAddress
);
736 case CompletionWrite
:
737 writeCompletionStatus();
742 panic("Unknown state for CopyEngineChannel\n");
747 CopyEngine::CopyEngineChannel::drainResume()
749 DPRINTF(DMACopyEngine
, "Restarting state machine at state %d\n", nextState
);
750 restartStateMachine();
754 CopyEngineParams::create()
756 return new CopyEngine(this);