arch, cpu, dev, gpu, mem, sim, python: start using getPort.
[gem5.git] / src / dev / pci / copy_engine.cc
1 /*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2008 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 */
42
43 /* @file
44 * Device model for Intel's I/O AT DMA copy engine.
45 */
46
47 #include "dev/pci/copy_engine.hh"
48
49 #include <algorithm>
50
51 #include "base/cp_annotate.hh"
52 #include "base/trace.hh"
53 #include "debug/DMACopyEngine.hh"
54 #include "debug/Drain.hh"
55 #include "mem/packet.hh"
56 #include "mem/packet_access.hh"
57 #include "params/CopyEngine.hh"
58 #include "sim/stats.hh"
59 #include "sim/system.hh"
60
61 using namespace CopyEngineReg;
62
63 CopyEngine::CopyEngine(const Params *p)
64 : PciDevice(p)
65 {
66 // All Reg regs are initialized to 0 by default
67 regs.chanCount = p->ChanCnt;
68 regs.xferCap = findMsbSet(p->XferCap);
69 regs.attnStatus = 0;
70
71 if (regs.chanCount > 64)
72 fatal("CopyEngine interface doesn't support more than 64 DMA engines\n");
73
74 for (int x = 0; x < regs.chanCount; x++) {
75 CopyEngineChannel *ch = new CopyEngineChannel(this, x);
76 chan.push_back(ch);
77 }
78 }
79
80
81 CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine *_ce, int cid)
82 : cePort(_ce, _ce->sys),
83 ce(_ce), channelId(cid), busy(false), underReset(false),
84 refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin),
85 latAfterCompletion(ce->params()->latAfterCompletion),
86 completionDataReg(0), nextState(Idle),
87 fetchCompleteEvent([this]{ fetchDescComplete(); }, name()),
88 addrCompleteEvent([this]{ fetchAddrComplete(); }, name()),
89 readCompleteEvent([this]{ readCopyBytesComplete(); }, name()),
90 writeCompleteEvent([this]{ writeCopyBytesComplete(); }, name()),
91 statusCompleteEvent([this]{ writeStatusComplete(); }, name())
92
93 {
94 cr.status.dma_transfer_status(3);
95 cr.descChainAddr = 0;
96 cr.completionAddr = 0;
97
98 curDmaDesc = new DmaDesc;
99 memset(curDmaDesc, 0, sizeof(DmaDesc));
100 copyBuffer = new uint8_t[ce->params()->XferCap];
101 }
102
103 CopyEngine::~CopyEngine()
104 {
105 for (int x = 0; x < chan.size(); x++) {
106 delete chan[x];
107 }
108 }
109
110 CopyEngine::CopyEngineChannel::~CopyEngineChannel()
111 {
112 delete curDmaDesc;
113 delete [] copyBuffer;
114 }
115
116 Port &
117 CopyEngine::getPort(const std::string &if_name, PortID idx)
118 {
119 if (if_name != "dma") {
120 // pass it along to our super class
121 return PciDevice::getPort(if_name, idx);
122 } else {
123 if (idx >= static_cast<int>(chan.size())) {
124 panic("CopyEngine::getPort: unknown index %d\n", idx);
125 }
126
127 return chan[idx]->getPort();
128 }
129 }
130
131
132 Port &
133 CopyEngine::CopyEngineChannel::getPort()
134 {
135 return cePort;
136 }
137
138 void
139 CopyEngine::CopyEngineChannel::recvCommand()
140 {
141 if (cr.command.start_dma()) {
142 assert(!busy);
143 cr.status.dma_transfer_status(0);
144 nextState = DescriptorFetch;
145 fetchAddress = cr.descChainAddr;
146 if (ce->drainState() == DrainState::Running)
147 fetchDescriptor(cr.descChainAddr);
148 } else if (cr.command.append_dma()) {
149 if (!busy) {
150 nextState = AddressFetch;
151 if (ce->drainState() == DrainState::Running)
152 fetchNextAddr(lastDescriptorAddr);
153 } else
154 refreshNext = true;
155 } else if (cr.command.reset_dma()) {
156 if (busy)
157 underReset = true;
158 else {
159 cr.status.dma_transfer_status(3);
160 nextState = Idle;
161 }
162 } else if (cr.command.resume_dma() || cr.command.abort_dma() ||
163 cr.command.suspend_dma())
164 panic("Resume, Abort, and Suspend are not supported\n");
165 cr.command(0);
166 }
167
168 Tick
169 CopyEngine::read(PacketPtr pkt)
170 {
171 int bar;
172 Addr daddr;
173
174 if (!getBAR(pkt->getAddr(), bar, daddr))
175 panic("Invalid PCI memory access to unmapped memory.\n");
176
177 // Only Memory register BAR is allowed
178 assert(bar == 0);
179
180 int size = pkt->getSize();
181 if (size != sizeof(uint64_t) && size != sizeof(uint32_t) &&
182 size != sizeof(uint16_t) && size != sizeof(uint8_t)) {
183 panic("Unknown size for MMIO access: %d\n", pkt->getSize());
184 }
185
186 DPRINTF(DMACopyEngine, "Read device register %#X size: %d\n", daddr, size);
187
188 ///
189 /// Handle read of register here
190 ///
191
192 if (daddr < 0x80) {
193 switch (daddr) {
194 case GEN_CHANCOUNT:
195 assert(size == sizeof(regs.chanCount));
196 pkt->setLE<uint8_t>(regs.chanCount);
197 break;
198 case GEN_XFERCAP:
199 assert(size == sizeof(regs.xferCap));
200 pkt->setLE<uint8_t>(regs.xferCap);
201 break;
202 case GEN_INTRCTRL:
203 assert(size == sizeof(uint8_t));
204 pkt->setLE<uint8_t>(regs.intrctrl());
205 regs.intrctrl.master_int_enable(0);
206 break;
207 case GEN_ATTNSTATUS:
208 assert(size == sizeof(regs.attnStatus));
209 pkt->setLE<uint32_t>(regs.attnStatus);
210 regs.attnStatus = 0;
211 break;
212 default:
213 panic("Read request to unknown register number: %#x\n", daddr);
214 }
215 pkt->makeAtomicResponse();
216 return pioDelay;
217 }
218
219
220 // Find which channel we're accessing
221 int chanid = 0;
222 daddr -= 0x80;
223 while (daddr >= 0x80) {
224 chanid++;
225 daddr -= 0x80;
226 }
227
228 if (chanid >= regs.chanCount)
229 panic("Access to channel %d (device only configured for %d channels)",
230 chanid, regs.chanCount);
231
232 ///
233 /// Channel registers are handled here
234 ///
235 chan[chanid]->channelRead(pkt, daddr, size);
236
237 pkt->makeAtomicResponse();
238 return pioDelay;
239 }
240
241 void
242 CopyEngine::CopyEngineChannel::channelRead(Packet *pkt, Addr daddr, int size)
243 {
244 switch (daddr) {
245 case CHAN_CONTROL:
246 assert(size == sizeof(uint16_t));
247 pkt->setLE<uint16_t>(cr.ctrl());
248 cr.ctrl.in_use(1);
249 break;
250 case CHAN_STATUS:
251 assert(size == sizeof(uint64_t));
252 pkt->setLE<uint64_t>(cr.status() | (busy ? 0 : 1));
253 break;
254 case CHAN_CHAINADDR:
255 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
256 if (size == sizeof(uint64_t))
257 pkt->setLE<uint64_t>(cr.descChainAddr);
258 else
259 pkt->setLE<uint32_t>(bits(cr.descChainAddr,0,31));
260 break;
261 case CHAN_CHAINADDR_HIGH:
262 assert(size == sizeof(uint32_t));
263 pkt->setLE<uint32_t>(bits(cr.descChainAddr,32,63));
264 break;
265 case CHAN_COMMAND:
266 assert(size == sizeof(uint8_t));
267 pkt->setLE<uint32_t>(cr.command());
268 break;
269 case CHAN_CMPLNADDR:
270 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
271 if (size == sizeof(uint64_t))
272 pkt->setLE<uint64_t>(cr.completionAddr);
273 else
274 pkt->setLE<uint32_t>(bits(cr.completionAddr,0,31));
275 break;
276 case CHAN_CMPLNADDR_HIGH:
277 assert(size == sizeof(uint32_t));
278 pkt->setLE<uint32_t>(bits(cr.completionAddr,32,63));
279 break;
280 case CHAN_ERROR:
281 assert(size == sizeof(uint32_t));
282 pkt->setLE<uint32_t>(cr.error());
283 break;
284 default:
285 panic("Read request to unknown channel register number: (%d)%#x\n",
286 channelId, daddr);
287 }
288 }
289
290
291 Tick
292 CopyEngine::write(PacketPtr pkt)
293 {
294 int bar;
295 Addr daddr;
296
297
298 if (!getBAR(pkt->getAddr(), bar, daddr))
299 panic("Invalid PCI memory access to unmapped memory.\n");
300
301 // Only Memory register BAR is allowed
302 assert(bar == 0);
303
304 int size = pkt->getSize();
305
306 ///
307 /// Handle write of register here
308 ///
309
310 if (size == sizeof(uint64_t)) {
311 uint64_t val M5_VAR_USED = pkt->getLE<uint64_t>();
312 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
313 daddr, val);
314 } else if (size == sizeof(uint32_t)) {
315 uint32_t val M5_VAR_USED = pkt->getLE<uint32_t>();
316 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
317 daddr, val);
318 } else if (size == sizeof(uint16_t)) {
319 uint16_t val M5_VAR_USED = pkt->getLE<uint16_t>();
320 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
321 daddr, val);
322 } else if (size == sizeof(uint8_t)) {
323 uint8_t val M5_VAR_USED = pkt->getLE<uint8_t>();
324 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n",
325 daddr, val);
326 } else {
327 panic("Unknown size for MMIO access: %d\n", size);
328 }
329
330 if (daddr < 0x80) {
331 switch (daddr) {
332 case GEN_CHANCOUNT:
333 case GEN_XFERCAP:
334 case GEN_ATTNSTATUS:
335 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
336 daddr);
337 break;
338 case GEN_INTRCTRL:
339 regs.intrctrl.master_int_enable(bits(pkt->getLE<uint8_t>(), 0, 1));
340 break;
341 default:
342 panic("Read request to unknown register number: %#x\n", daddr);
343 }
344 pkt->makeAtomicResponse();
345 return pioDelay;
346 }
347
348 // Find which channel we're accessing
349 int chanid = 0;
350 daddr -= 0x80;
351 while (daddr >= 0x80) {
352 chanid++;
353 daddr -= 0x80;
354 }
355
356 if (chanid >= regs.chanCount)
357 panic("Access to channel %d (device only configured for %d channels)",
358 chanid, regs.chanCount);
359
360 ///
361 /// Channel registers are handled here
362 ///
363 chan[chanid]->channelWrite(pkt, daddr, size);
364
365 pkt->makeAtomicResponse();
366 return pioDelay;
367 }
368
369 void
370 CopyEngine::CopyEngineChannel::channelWrite(Packet *pkt, Addr daddr, int size)
371 {
372 switch (daddr) {
373 case CHAN_CONTROL:
374 assert(size == sizeof(uint16_t));
375 int old_int_disable;
376 old_int_disable = cr.ctrl.interrupt_disable();
377 cr.ctrl(pkt->getLE<uint16_t>());
378 if (cr.ctrl.interrupt_disable())
379 cr.ctrl.interrupt_disable(0);
380 else
381 cr.ctrl.interrupt_disable(old_int_disable);
382 break;
383 case CHAN_STATUS:
384 assert(size == sizeof(uint64_t));
385 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
386 daddr);
387 break;
388 case CHAN_CHAINADDR:
389 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
390 if (size == sizeof(uint64_t))
391 cr.descChainAddr = pkt->getLE<uint64_t>();
392 else
393 cr.descChainAddr = (uint64_t)pkt->getLE<uint32_t>() |
394 (cr.descChainAddr & ~mask(32));
395 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
396 break;
397 case CHAN_CHAINADDR_HIGH:
398 assert(size == sizeof(uint32_t));
399 cr.descChainAddr = ((uint64_t)pkt->getLE<uint32_t>() << 32) |
400 (cr.descChainAddr & mask(32));
401 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
402 break;
403 case CHAN_COMMAND:
404 assert(size == sizeof(uint8_t));
405 cr.command(pkt->getLE<uint8_t>());
406 recvCommand();
407 break;
408 case CHAN_CMPLNADDR:
409 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
410 if (size == sizeof(uint64_t))
411 cr.completionAddr = pkt->getLE<uint64_t>();
412 else
413 cr.completionAddr = pkt->getLE<uint32_t>() |
414 (cr.completionAddr & ~mask(32));
415 break;
416 case CHAN_CMPLNADDR_HIGH:
417 assert(size == sizeof(uint32_t));
418 cr.completionAddr = ((uint64_t)pkt->getLE<uint32_t>() <<32) |
419 (cr.completionAddr & mask(32));
420 break;
421 case CHAN_ERROR:
422 assert(size == sizeof(uint32_t));
423 cr.error(~pkt->getLE<uint32_t>() & cr.error());
424 break;
425 default:
426 panic("Read request to unknown channel register number: (%d)%#x\n",
427 channelId, daddr);
428 }
429 }
430
431 void
432 CopyEngine::regStats()
433 {
434 PciDevice::regStats();
435
436 using namespace Stats;
437 bytesCopied
438 .init(regs.chanCount)
439 .name(name() + ".bytes_copied")
440 .desc("Number of bytes copied by each engine")
441 .flags(total)
442 ;
443 copiesProcessed
444 .init(regs.chanCount)
445 .name(name() + ".copies_processed")
446 .desc("Number of copies processed by each engine")
447 .flags(total)
448 ;
449 }
450
451 void
452 CopyEngine::CopyEngineChannel::fetchDescriptor(Addr address)
453 {
454 anDq();
455 anBegin("FetchDescriptor");
456 DPRINTF(DMACopyEngine, "Reading descriptor from at memory location %#x(%#x)\n",
457 address, ce->pciToDma(address));
458 assert(address);
459 busy = true;
460
461 DPRINTF(DMACopyEngine, "dmaAction: %#x, %d bytes, to addr %#x\n",
462 ce->pciToDma(address), sizeof(DmaDesc), curDmaDesc);
463
464 cePort.dmaAction(MemCmd::ReadReq, ce->pciToDma(address),
465 sizeof(DmaDesc), &fetchCompleteEvent,
466 (uint8_t*)curDmaDesc, latBeforeBegin);
467 lastDescriptorAddr = address;
468 }
469
470 void
471 CopyEngine::CopyEngineChannel::fetchDescComplete()
472 {
473 DPRINTF(DMACopyEngine, "Read of descriptor complete\n");
474
475 if ((curDmaDesc->command & DESC_CTRL_NULL)) {
476 DPRINTF(DMACopyEngine, "Got NULL descriptor, skipping\n");
477 assert(!(curDmaDesc->command & DESC_CTRL_CP_STS));
478 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
479 panic("Shouldn't be able to get here\n");
480 nextState = CompletionWrite;
481 if (inDrain()) return;
482 writeCompletionStatus();
483 } else {
484 anBegin("Idle");
485 anWait();
486 busy = false;
487 nextState = Idle;
488 inDrain();
489 }
490 return;
491 }
492
493 if (curDmaDesc->command & ~DESC_CTRL_CP_STS)
494 panic("Descriptor has flag other that completion status set\n");
495
496 nextState = DMARead;
497 if (inDrain()) return;
498 readCopyBytes();
499 }
500
501 void
502 CopyEngine::CopyEngineChannel::readCopyBytes()
503 {
504 anBegin("ReadCopyBytes");
505 DPRINTF(DMACopyEngine, "Reading %d bytes from buffer to memory location %#x(%#x)\n",
506 curDmaDesc->len, curDmaDesc->dest,
507 ce->pciToDma(curDmaDesc->src));
508 cePort.dmaAction(MemCmd::ReadReq, ce->pciToDma(curDmaDesc->src),
509 curDmaDesc->len, &readCompleteEvent, copyBuffer, 0);
510 }
511
512 void
513 CopyEngine::CopyEngineChannel::readCopyBytesComplete()
514 {
515 DPRINTF(DMACopyEngine, "Read of bytes to copy complete\n");
516
517 nextState = DMAWrite;
518 if (inDrain()) return;
519 writeCopyBytes();
520 }
521
522 void
523 CopyEngine::CopyEngineChannel::writeCopyBytes()
524 {
525 anBegin("WriteCopyBytes");
526 DPRINTF(DMACopyEngine, "Writing %d bytes from buffer to memory location %#x(%#x)\n",
527 curDmaDesc->len, curDmaDesc->dest,
528 ce->pciToDma(curDmaDesc->dest));
529
530 cePort.dmaAction(MemCmd::WriteReq, ce->pciToDma(curDmaDesc->dest),
531 curDmaDesc->len, &writeCompleteEvent, copyBuffer, 0);
532
533 ce->bytesCopied[channelId] += curDmaDesc->len;
534 ce->copiesProcessed[channelId]++;
535 }
536
537 void
538 CopyEngine::CopyEngineChannel::writeCopyBytesComplete()
539 {
540 DPRINTF(DMACopyEngine, "Write of bytes to copy complete user1: %#x\n",
541 curDmaDesc->user1);
542
543 cr.status.compl_desc_addr(lastDescriptorAddr >> 6);
544 completionDataReg = cr.status() | 1;
545
546 anQ("DMAUsedDescQ", channelId, 1);
547 anQ("AppRecvQ", curDmaDesc->user1, curDmaDesc->len);
548 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
549 nextState = CompletionWrite;
550 if (inDrain()) return;
551 writeCompletionStatus();
552 return;
553 }
554
555 continueProcessing();
556 }
557
558 void
559 CopyEngine::CopyEngineChannel::continueProcessing()
560 {
561 busy = false;
562
563 if (underReset) {
564 anBegin("Reset");
565 anWait();
566 underReset = false;
567 refreshNext = false;
568 busy = false;
569 nextState = Idle;
570 return;
571 }
572
573 if (curDmaDesc->next) {
574 nextState = DescriptorFetch;
575 fetchAddress = curDmaDesc->next;
576 if (inDrain()) return;
577 fetchDescriptor(curDmaDesc->next);
578 } else if (refreshNext) {
579 nextState = AddressFetch;
580 refreshNext = false;
581 if (inDrain()) return;
582 fetchNextAddr(lastDescriptorAddr);
583 } else {
584 inDrain();
585 nextState = Idle;
586 anWait();
587 anBegin("Idle");
588 }
589 }
590
591 void
592 CopyEngine::CopyEngineChannel::writeCompletionStatus()
593 {
594 anBegin("WriteCompletionStatus");
595 DPRINTF(DMACopyEngine, "Writing completion status %#x to address %#x(%#x)\n",
596 completionDataReg, cr.completionAddr,
597 ce->pciToDma(cr.completionAddr));
598
599 cePort.dmaAction(MemCmd::WriteReq,
600 ce->pciToDma(cr.completionAddr),
601 sizeof(completionDataReg), &statusCompleteEvent,
602 (uint8_t*)&completionDataReg, latAfterCompletion);
603 }
604
605 void
606 CopyEngine::CopyEngineChannel::writeStatusComplete()
607 {
608 DPRINTF(DMACopyEngine, "Writing completion status complete\n");
609 continueProcessing();
610 }
611
612 void
613 CopyEngine::CopyEngineChannel::fetchNextAddr(Addr address)
614 {
615 anBegin("FetchNextAddr");
616 DPRINTF(DMACopyEngine, "Fetching next address...\n");
617 busy = true;
618 cePort.dmaAction(MemCmd::ReadReq,
619 ce->pciToDma(address + offsetof(DmaDesc, next)),
620 sizeof(Addr), &addrCompleteEvent,
621 (uint8_t*)curDmaDesc + offsetof(DmaDesc, next), 0);
622 }
623
624 void
625 CopyEngine::CopyEngineChannel::fetchAddrComplete()
626 {
627 DPRINTF(DMACopyEngine, "Fetching next address complete: %#x\n",
628 curDmaDesc->next);
629 if (!curDmaDesc->next) {
630 DPRINTF(DMACopyEngine, "Got NULL descriptor, nothing more to do\n");
631 busy = false;
632 nextState = Idle;
633 anWait();
634 anBegin("Idle");
635 inDrain();
636 return;
637 }
638 nextState = DescriptorFetch;
639 fetchAddress = curDmaDesc->next;
640 if (inDrain()) return;
641 fetchDescriptor(curDmaDesc->next);
642 }
643
644 bool
645 CopyEngine::CopyEngineChannel::inDrain()
646 {
647 if (drainState() == DrainState::Draining) {
648 DPRINTF(Drain, "CopyEngine done draining, processing drain event\n");
649 signalDrainDone();
650 }
651
652 return ce->drainState() != DrainState::Running;
653 }
654
655 DrainState
656 CopyEngine::CopyEngineChannel::drain()
657 {
658 if (nextState == Idle || ce->drainState() != DrainState::Running) {
659 return DrainState::Drained;
660 } else {
661 DPRINTF(Drain, "CopyEngineChannel not drained\n");
662 return DrainState::Draining;
663 }
664 }
665
666 void
667 CopyEngine::serialize(CheckpointOut &cp) const
668 {
669 PciDevice::serialize(cp);
670 regs.serialize(cp);
671 for (int x =0; x < chan.size(); x++)
672 chan[x]->serializeSection(cp, csprintf("channel%d", x));
673 }
674
675 void
676 CopyEngine::unserialize(CheckpointIn &cp)
677 {
678 PciDevice::unserialize(cp);
679 regs.unserialize(cp);
680 for (int x = 0; x < chan.size(); x++)
681 chan[x]->unserializeSection(cp, csprintf("channel%d", x));
682 }
683
684 void
685 CopyEngine::CopyEngineChannel::serialize(CheckpointOut &cp) const
686 {
687 SERIALIZE_SCALAR(channelId);
688 SERIALIZE_SCALAR(busy);
689 SERIALIZE_SCALAR(underReset);
690 SERIALIZE_SCALAR(refreshNext);
691 SERIALIZE_SCALAR(lastDescriptorAddr);
692 SERIALIZE_SCALAR(completionDataReg);
693 SERIALIZE_SCALAR(fetchAddress);
694 int nextState = this->nextState;
695 SERIALIZE_SCALAR(nextState);
696 arrayParamOut(cp, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
697 SERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
698 cr.serialize(cp);
699
700 }
701 void
702 CopyEngine::CopyEngineChannel::unserialize(CheckpointIn &cp)
703 {
704 UNSERIALIZE_SCALAR(channelId);
705 UNSERIALIZE_SCALAR(busy);
706 UNSERIALIZE_SCALAR(underReset);
707 UNSERIALIZE_SCALAR(refreshNext);
708 UNSERIALIZE_SCALAR(lastDescriptorAddr);
709 UNSERIALIZE_SCALAR(completionDataReg);
710 UNSERIALIZE_SCALAR(fetchAddress);
711 int nextState;
712 UNSERIALIZE_SCALAR(nextState);
713 this->nextState = (ChannelState)nextState;
714 arrayParamIn(cp, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
715 UNSERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
716 cr.unserialize(cp);
717
718 }
719
720 void
721 CopyEngine::CopyEngineChannel::restartStateMachine()
722 {
723 switch(nextState) {
724 case AddressFetch:
725 fetchNextAddr(lastDescriptorAddr);
726 break;
727 case DescriptorFetch:
728 fetchDescriptor(fetchAddress);
729 break;
730 case DMARead:
731 readCopyBytes();
732 break;
733 case DMAWrite:
734 writeCopyBytes();
735 break;
736 case CompletionWrite:
737 writeCompletionStatus();
738 break;
739 case Idle:
740 break;
741 default:
742 panic("Unknown state for CopyEngineChannel\n");
743 }
744 }
745
746 void
747 CopyEngine::CopyEngineChannel::drainResume()
748 {
749 DPRINTF(DMACopyEngine, "Restarting state machine at state %d\n", nextState);
750 restartStateMachine();
751 }
752
753 CopyEngine *
754 CopyEngineParams::create()
755 {
756 return new CopyEngine(this);
757 }