7b17a86a342cb55e0b35c099c3e6b5f681af5e88
[gem5.git] / src / dev / copy_engine.cc
1 /*
2 * Copyright (c) 2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2008 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Ali Saidi
41 */
42
43 /* @file
44 * Device model for Intel's I/O AT DMA copy engine.
45 */
46
47 #include <algorithm>
48
49 #include "base/cp_annotate.hh"
50 #include "base/trace.hh"
51 #include "debug/DMACopyEngine.hh"
52 #include "dev/copy_engine.hh"
53 #include "mem/packet.hh"
54 #include "mem/packet_access.hh"
55 #include "params/CopyEngine.hh"
56 #include "sim/stats.hh"
57 #include "sim/system.hh"
58
59 using namespace CopyEngineReg;
60
61 CopyEngine::CopyEngine(const Params *p)
62 : PciDev(p)
63 {
64 // All Reg regs are initialized to 0 by default
65 regs.chanCount = p->ChanCnt;
66 regs.xferCap = findMsbSet(p->XferCap);
67 regs.attnStatus = 0;
68
69 if (regs.chanCount > 64)
70 fatal("CopyEngine interface doesn't support more than 64 DMA engines\n");
71
72 for (int x = 0; x < regs.chanCount; x++) {
73 CopyEngineChannel *ch = new CopyEngineChannel(this, x);
74 chan.push_back(ch);
75 }
76 }
77
78
79 CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine *_ce, int cid)
80 : cePort(_ce, _ce->sys, _ce->params()->min_backoff_delay,
81 _ce->params()->max_backoff_delay),
82 ce(_ce), channelId(cid), busy(false), underReset(false),
83 refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin),
84 latAfterCompletion(ce->params()->latAfterCompletion),
85 completionDataReg(0), nextState(Idle), drainEvent(NULL),
86 fetchCompleteEvent(this), addrCompleteEvent(this),
87 readCompleteEvent(this), writeCompleteEvent(this),
88 statusCompleteEvent(this)
89
90 {
91 cr.status.dma_transfer_status(3);
92 cr.descChainAddr = 0;
93 cr.completionAddr = 0;
94
95 curDmaDesc = new DmaDesc;
96 memset(curDmaDesc, 0, sizeof(DmaDesc));
97 copyBuffer = new uint8_t[ce->params()->XferCap];
98 }
99
100 CopyEngine::~CopyEngine()
101 {
102 for (int x = 0; x < chan.size(); x++) {
103 delete chan[x];
104 }
105 }
106
107 CopyEngine::CopyEngineChannel::~CopyEngineChannel()
108 {
109 delete curDmaDesc;
110 delete [] copyBuffer;
111 }
112
113 Port *
114 CopyEngine::getPort(const std::string &if_name, int idx)
115 {
116 if (if_name == "dma") {
117 if (idx < chan.size())
118 return chan[idx]->getPort();
119 }
120 return PciDev::getPort(if_name, idx);
121 }
122
123
124 Port *
125 CopyEngine::CopyEngineChannel::getPort()
126 {
127 return &cePort;
128 }
129
130 void
131 CopyEngine::CopyEngineChannel::recvCommand()
132 {
133 if (cr.command.start_dma()) {
134 assert(!busy);
135 cr.status.dma_transfer_status(0);
136 nextState = DescriptorFetch;
137 fetchAddress = cr.descChainAddr;
138 if (ce->getState() == SimObject::Running)
139 fetchDescriptor(cr.descChainAddr);
140 } else if (cr.command.append_dma()) {
141 if (!busy) {
142 nextState = AddressFetch;
143 if (ce->getState() == SimObject::Running)
144 fetchNextAddr(lastDescriptorAddr);
145 } else
146 refreshNext = true;
147 } else if (cr.command.reset_dma()) {
148 if (busy)
149 underReset = true;
150 else {
151 cr.status.dma_transfer_status(3);
152 nextState = Idle;
153 }
154 } else if (cr.command.resume_dma() || cr.command.abort_dma() ||
155 cr.command.suspend_dma())
156 panic("Resume, Abort, and Suspend are not supported\n");
157 cr.command(0);
158 }
159
160 Tick
161 CopyEngine::read(PacketPtr pkt)
162 {
163 int bar;
164 Addr daddr;
165
166 if (!getBAR(pkt->getAddr(), bar, daddr))
167 panic("Invalid PCI memory access to unmapped memory.\n");
168
169 // Only Memory register BAR is allowed
170 assert(bar == 0);
171
172 int size = pkt->getSize();
173 if (size != sizeof(uint64_t) && size != sizeof(uint32_t) &&
174 size != sizeof(uint16_t) && size != sizeof(uint8_t)) {
175 panic("Unknown size for MMIO access: %d\n", pkt->getSize());
176 }
177
178 DPRINTF(DMACopyEngine, "Read device register %#X size: %d\n", daddr, size);
179
180 pkt->allocate();
181
182 ///
183 /// Handle read of register here
184 ///
185
186 if (daddr < 0x80) {
187 switch (daddr) {
188 case GEN_CHANCOUNT:
189 assert(size == sizeof(regs.chanCount));
190 pkt->set<uint8_t>(regs.chanCount);
191 break;
192 case GEN_XFERCAP:
193 assert(size == sizeof(regs.xferCap));
194 pkt->set<uint8_t>(regs.xferCap);
195 break;
196 case GEN_INTRCTRL:
197 assert(size == sizeof(uint8_t));
198 pkt->set<uint8_t>(regs.intrctrl());
199 regs.intrctrl.master_int_enable(0);
200 break;
201 case GEN_ATTNSTATUS:
202 assert(size == sizeof(regs.attnStatus));
203 pkt->set<uint32_t>(regs.attnStatus);
204 regs.attnStatus = 0;
205 break;
206 default:
207 panic("Read request to unknown register number: %#x\n", daddr);
208 }
209 pkt->makeAtomicResponse();
210 return pioDelay;
211 }
212
213
214 // Find which channel we're accessing
215 int chanid = 0;
216 daddr -= 0x80;
217 while (daddr >= 0x80) {
218 chanid++;
219 daddr -= 0x80;
220 }
221
222 if (chanid >= regs.chanCount)
223 panic("Access to channel %d (device only configured for %d channels)",
224 chanid, regs.chanCount);
225
226 ///
227 /// Channel registers are handled here
228 ///
229 chan[chanid]->channelRead(pkt, daddr, size);
230
231 pkt->makeAtomicResponse();
232 return pioDelay;
233 }
234
235 void
236 CopyEngine::CopyEngineChannel::channelRead(Packet *pkt, Addr daddr, int size)
237 {
238 switch (daddr) {
239 case CHAN_CONTROL:
240 assert(size == sizeof(uint16_t));
241 pkt->set<uint16_t>(cr.ctrl());
242 cr.ctrl.in_use(1);
243 break;
244 case CHAN_STATUS:
245 assert(size == sizeof(uint64_t));
246 pkt->set<uint64_t>(cr.status() | ~busy);
247 break;
248 case CHAN_CHAINADDR:
249 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
250 if (size == sizeof(uint64_t))
251 pkt->set<uint64_t>(cr.descChainAddr);
252 else
253 pkt->set<uint32_t>(bits(cr.descChainAddr,0,31));
254 break;
255 case CHAN_CHAINADDR_HIGH:
256 assert(size == sizeof(uint32_t));
257 pkt->set<uint32_t>(bits(cr.descChainAddr,32,63));
258 break;
259 case CHAN_COMMAND:
260 assert(size == sizeof(uint8_t));
261 pkt->set<uint32_t>(cr.command());
262 break;
263 case CHAN_CMPLNADDR:
264 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
265 if (size == sizeof(uint64_t))
266 pkt->set<uint64_t>(cr.completionAddr);
267 else
268 pkt->set<uint32_t>(bits(cr.completionAddr,0,31));
269 break;
270 case CHAN_CMPLNADDR_HIGH:
271 assert(size == sizeof(uint32_t));
272 pkt->set<uint32_t>(bits(cr.completionAddr,32,63));
273 break;
274 case CHAN_ERROR:
275 assert(size == sizeof(uint32_t));
276 pkt->set<uint32_t>(cr.error());
277 break;
278 default:
279 panic("Read request to unknown channel register number: (%d)%#x\n",
280 channelId, daddr);
281 }
282 }
283
284
285 Tick
286 CopyEngine::write(PacketPtr pkt)
287 {
288 int bar;
289 Addr daddr;
290
291
292 if (!getBAR(pkt->getAddr(), bar, daddr))
293 panic("Invalid PCI memory access to unmapped memory.\n");
294
295 // Only Memory register BAR is allowed
296 assert(bar == 0);
297
298 int size = pkt->getSize();
299
300 ///
301 /// Handle write of register here
302 ///
303
304 if (size == sizeof(uint64_t)) {
305 uint64_t val M5_VAR_USED = pkt->get<uint64_t>();
306 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
307 } else if (size == sizeof(uint32_t)) {
308 uint32_t val M5_VAR_USED = pkt->get<uint32_t>();
309 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
310 } else if (size == sizeof(uint16_t)) {
311 uint16_t val M5_VAR_USED = pkt->get<uint16_t>();
312 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
313 } else if (size == sizeof(uint8_t)) {
314 uint8_t val M5_VAR_USED = pkt->get<uint8_t>();
315 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
316 } else {
317 panic("Unknown size for MMIO access: %d\n", size);
318 }
319
320 if (daddr < 0x80) {
321 switch (daddr) {
322 case GEN_CHANCOUNT:
323 case GEN_XFERCAP:
324 case GEN_ATTNSTATUS:
325 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
326 daddr);
327 break;
328 case GEN_INTRCTRL:
329 regs.intrctrl.master_int_enable(bits(pkt->get<uint8_t>(),0,1));
330 break;
331 default:
332 panic("Read request to unknown register number: %#x\n", daddr);
333 }
334 pkt->makeAtomicResponse();
335 return pioDelay;
336 }
337
338 // Find which channel we're accessing
339 int chanid = 0;
340 daddr -= 0x80;
341 while (daddr >= 0x80) {
342 chanid++;
343 daddr -= 0x80;
344 }
345
346 if (chanid >= regs.chanCount)
347 panic("Access to channel %d (device only configured for %d channels)",
348 chanid, regs.chanCount);
349
350 ///
351 /// Channel registers are handled here
352 ///
353 chan[chanid]->channelWrite(pkt, daddr, size);
354
355 pkt->makeAtomicResponse();
356 return pioDelay;
357 }
358
359 void
360 CopyEngine::CopyEngineChannel::channelWrite(Packet *pkt, Addr daddr, int size)
361 {
362 switch (daddr) {
363 case CHAN_CONTROL:
364 assert(size == sizeof(uint16_t));
365 int old_int_disable;
366 old_int_disable = cr.ctrl.interrupt_disable();
367 cr.ctrl(pkt->get<uint16_t>());
368 if (cr.ctrl.interrupt_disable())
369 cr.ctrl.interrupt_disable(0);
370 else
371 cr.ctrl.interrupt_disable(old_int_disable);
372 break;
373 case CHAN_STATUS:
374 assert(size == sizeof(uint64_t));
375 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
376 daddr);
377 break;
378 case CHAN_CHAINADDR:
379 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
380 if (size == sizeof(uint64_t))
381 cr.descChainAddr = pkt->get<uint64_t>();
382 else
383 cr.descChainAddr = (uint64_t)pkt->get<uint32_t>() |
384 (cr.descChainAddr & ~mask(32));
385 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
386 break;
387 case CHAN_CHAINADDR_HIGH:
388 assert(size == sizeof(uint32_t));
389 cr.descChainAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
390 (cr.descChainAddr & mask(32));
391 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
392 break;
393 case CHAN_COMMAND:
394 assert(size == sizeof(uint8_t));
395 cr.command(pkt->get<uint8_t>());
396 recvCommand();
397 break;
398 case CHAN_CMPLNADDR:
399 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
400 if (size == sizeof(uint64_t))
401 cr.completionAddr = pkt->get<uint64_t>();
402 else
403 cr.completionAddr = pkt->get<uint32_t>() |
404 (cr.completionAddr & ~mask(32));
405 break;
406 case CHAN_CMPLNADDR_HIGH:
407 assert(size == sizeof(uint32_t));
408 cr.completionAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
409 (cr.completionAddr & mask(32));
410 break;
411 case CHAN_ERROR:
412 assert(size == sizeof(uint32_t));
413 cr.error(~pkt->get<uint32_t>() & cr.error());
414 break;
415 default:
416 panic("Read request to unknown channel register number: (%d)%#x\n",
417 channelId, daddr);
418 }
419 }
420
421 void
422 CopyEngine::regStats()
423 {
424 using namespace Stats;
425 bytesCopied
426 .init(regs.chanCount)
427 .name(name() + ".bytes_copied")
428 .desc("Number of bytes copied by each engine")
429 .flags(total)
430 ;
431 copiesProcessed
432 .init(regs.chanCount)
433 .name(name() + ".copies_processed")
434 .desc("Number of copies processed by each engine")
435 .flags(total)
436 ;
437 }
438
439 void
440 CopyEngine::CopyEngineChannel::fetchDescriptor(Addr address)
441 {
442 anDq();
443 anBegin("FetchDescriptor");
444 DPRINTF(DMACopyEngine, "Reading descriptor from at memory location %#x(%#x)\n",
445 address, ce->platform->pciToDma(address));
446 assert(address);
447 busy = true;
448
449 DPRINTF(DMACopyEngine, "dmaAction: %#x, %d bytes, to addr %#x\n",
450 ce->platform->pciToDma(address), sizeof(DmaDesc), curDmaDesc);
451
452 cePort.dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(address),
453 sizeof(DmaDesc), &fetchCompleteEvent,
454 (uint8_t*)curDmaDesc, latBeforeBegin);
455 lastDescriptorAddr = address;
456 }
457
458 void
459 CopyEngine::CopyEngineChannel::fetchDescComplete()
460 {
461 DPRINTF(DMACopyEngine, "Read of descriptor complete\n");
462
463 if ((curDmaDesc->command & DESC_CTRL_NULL)) {
464 DPRINTF(DMACopyEngine, "Got NULL descriptor, skipping\n");
465 assert(!(curDmaDesc->command & DESC_CTRL_CP_STS));
466 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
467 panic("Shouldn't be able to get here\n");
468 nextState = CompletionWrite;
469 if (inDrain()) return;
470 writeCompletionStatus();
471 } else {
472 anBegin("Idle");
473 anWait();
474 busy = false;
475 nextState = Idle;
476 inDrain();
477 }
478 return;
479 }
480
481 if (curDmaDesc->command & ~DESC_CTRL_CP_STS)
482 panic("Descriptor has flag other that completion status set\n");
483
484 nextState = DMARead;
485 if (inDrain()) return;
486 readCopyBytes();
487 }
488
489 void
490 CopyEngine::CopyEngineChannel::readCopyBytes()
491 {
492 anBegin("ReadCopyBytes");
493 DPRINTF(DMACopyEngine, "Reading %d bytes from buffer to memory location %#x(%#x)\n",
494 curDmaDesc->len, curDmaDesc->dest,
495 ce->platform->pciToDma(curDmaDesc->src));
496 cePort.dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(curDmaDesc->src),
497 curDmaDesc->len, &readCompleteEvent, copyBuffer, 0);
498 }
499
500 void
501 CopyEngine::CopyEngineChannel::readCopyBytesComplete()
502 {
503 DPRINTF(DMACopyEngine, "Read of bytes to copy complete\n");
504
505 nextState = DMAWrite;
506 if (inDrain()) return;
507 writeCopyBytes();
508 }
509
510 void
511 CopyEngine::CopyEngineChannel::writeCopyBytes()
512 {
513 anBegin("WriteCopyBytes");
514 DPRINTF(DMACopyEngine, "Writing %d bytes from buffer to memory location %#x(%#x)\n",
515 curDmaDesc->len, curDmaDesc->dest,
516 ce->platform->pciToDma(curDmaDesc->dest));
517
518 cePort.dmaAction(MemCmd::WriteReq, ce->platform->pciToDma(curDmaDesc->dest),
519 curDmaDesc->len, &writeCompleteEvent, copyBuffer, 0);
520
521 ce->bytesCopied[channelId] += curDmaDesc->len;
522 ce->copiesProcessed[channelId]++;
523 }
524
525 void
526 CopyEngine::CopyEngineChannel::writeCopyBytesComplete()
527 {
528 DPRINTF(DMACopyEngine, "Write of bytes to copy complete user1: %#x\n",
529 curDmaDesc->user1);
530
531 cr.status.compl_desc_addr(lastDescriptorAddr >> 6);
532 completionDataReg = cr.status() | 1;
533
534 anQ("DMAUsedDescQ", channelId, 1);
535 anQ("AppRecvQ", curDmaDesc->user1, curDmaDesc->len);
536 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
537 nextState = CompletionWrite;
538 if (inDrain()) return;
539 writeCompletionStatus();
540 return;
541 }
542
543 continueProcessing();
544 }
545
546 void
547 CopyEngine::CopyEngineChannel::continueProcessing()
548 {
549 busy = false;
550
551 if (underReset) {
552 anBegin("Reset");
553 anWait();
554 underReset = false;
555 refreshNext = false;
556 busy = false;
557 nextState = Idle;
558 return;
559 }
560
561 if (curDmaDesc->next) {
562 nextState = DescriptorFetch;
563 fetchAddress = curDmaDesc->next;
564 if (inDrain()) return;
565 fetchDescriptor(curDmaDesc->next);
566 } else if (refreshNext) {
567 nextState = AddressFetch;
568 refreshNext = false;
569 if (inDrain()) return;
570 fetchNextAddr(lastDescriptorAddr);
571 } else {
572 inDrain();
573 nextState = Idle;
574 anWait();
575 anBegin("Idle");
576 }
577 }
578
579 void
580 CopyEngine::CopyEngineChannel::writeCompletionStatus()
581 {
582 anBegin("WriteCompletionStatus");
583 DPRINTF(DMACopyEngine, "Writing completion status %#x to address %#x(%#x)\n",
584 completionDataReg, cr.completionAddr,
585 ce->platform->pciToDma(cr.completionAddr));
586
587 cePort.dmaAction(MemCmd::WriteReq,
588 ce->platform->pciToDma(cr.completionAddr),
589 sizeof(completionDataReg), &statusCompleteEvent,
590 (uint8_t*)&completionDataReg, latAfterCompletion);
591 }
592
593 void
594 CopyEngine::CopyEngineChannel::writeStatusComplete()
595 {
596 DPRINTF(DMACopyEngine, "Writing completion status complete\n");
597 continueProcessing();
598 }
599
600 void
601 CopyEngine::CopyEngineChannel::fetchNextAddr(Addr address)
602 {
603 anBegin("FetchNextAddr");
604 DPRINTF(DMACopyEngine, "Fetching next address...\n");
605 busy = true;
606 cePort.dmaAction(MemCmd::ReadReq,
607 ce->platform->pciToDma(address + offsetof(DmaDesc, next)),
608 sizeof(Addr), &addrCompleteEvent,
609 (uint8_t*)curDmaDesc + offsetof(DmaDesc, next), 0);
610 }
611
612 void
613 CopyEngine::CopyEngineChannel::fetchAddrComplete()
614 {
615 DPRINTF(DMACopyEngine, "Fetching next address complete: %#x\n",
616 curDmaDesc->next);
617 if (!curDmaDesc->next) {
618 DPRINTF(DMACopyEngine, "Got NULL descriptor, nothing more to do\n");
619 busy = false;
620 nextState = Idle;
621 anWait();
622 anBegin("Idle");
623 inDrain();
624 return;
625 }
626 nextState = DescriptorFetch;
627 fetchAddress = curDmaDesc->next;
628 if (inDrain()) return;
629 fetchDescriptor(curDmaDesc->next);
630 }
631
632 bool
633 CopyEngine::CopyEngineChannel::inDrain()
634 {
635 if (ce->getState() == SimObject::Draining) {
636 DPRINTF(DMACopyEngine, "processing drain\n");
637 assert(drainEvent);
638 drainEvent->process();
639 drainEvent = NULL;
640 }
641
642 return ce->getState() != SimObject::Running;
643 }
644
645 unsigned int
646 CopyEngine::CopyEngineChannel::drain(Event *de)
647 {
648 if (nextState == Idle || ce->getState() != SimObject::Running)
649 return 0;
650 unsigned int count = 1;
651 count += cePort.drain(de);
652
653 DPRINTF(DMACopyEngine, "unable to drain, returning %d\n", count);
654 drainEvent = de;
655 return count;
656 }
657
658 unsigned int
659 CopyEngine::drain(Event *de)
660 {
661 unsigned int count;
662 count = pioPort.drain(de) + dmaPort.drain(de) + configPort.drain(de);
663 for (int x = 0;x < chan.size(); x++)
664 count += chan[x]->drain(de);
665
666 if (count)
667 changeState(Draining);
668 else
669 changeState(Drained);
670
671 DPRINTF(DMACopyEngine, "call to CopyEngine::drain() returning %d\n", count);
672 return count;
673 }
674
675 void
676 CopyEngine::serialize(std::ostream &os)
677 {
678 PciDev::serialize(os);
679 regs.serialize(os);
680 for (int x =0; x < chan.size(); x++) {
681 nameOut(os, csprintf("%s.channel%d", name(), x));
682 chan[x]->serialize(os);
683 }
684 }
685
686 void
687 CopyEngine::unserialize(Checkpoint *cp, const std::string &section)
688 {
689 PciDev::unserialize(cp, section);
690 regs.unserialize(cp, section);
691 for (int x = 0; x < chan.size(); x++)
692 chan[x]->unserialize(cp, csprintf("%s.channel%d", section, x));
693 }
694
695 void
696 CopyEngine::CopyEngineChannel::serialize(std::ostream &os)
697 {
698 SERIALIZE_SCALAR(channelId);
699 SERIALIZE_SCALAR(busy);
700 SERIALIZE_SCALAR(underReset);
701 SERIALIZE_SCALAR(refreshNext);
702 SERIALIZE_SCALAR(lastDescriptorAddr);
703 SERIALIZE_SCALAR(completionDataReg);
704 SERIALIZE_SCALAR(fetchAddress);
705 int nextState = this->nextState;
706 SERIALIZE_SCALAR(nextState);
707 arrayParamOut(os, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
708 SERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
709 cr.serialize(os);
710
711 }
712 void
713 CopyEngine::CopyEngineChannel::unserialize(Checkpoint *cp, const std::string &section)
714 {
715 UNSERIALIZE_SCALAR(channelId);
716 UNSERIALIZE_SCALAR(busy);
717 UNSERIALIZE_SCALAR(underReset);
718 UNSERIALIZE_SCALAR(refreshNext);
719 UNSERIALIZE_SCALAR(lastDescriptorAddr);
720 UNSERIALIZE_SCALAR(completionDataReg);
721 UNSERIALIZE_SCALAR(fetchAddress);
722 int nextState;
723 UNSERIALIZE_SCALAR(nextState);
724 this->nextState = (ChannelState)nextState;
725 arrayParamIn(cp, section, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
726 UNSERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
727 cr.unserialize(cp, section);
728
729 }
730
731 void
732 CopyEngine::CopyEngineChannel::restartStateMachine()
733 {
734 switch(nextState) {
735 case AddressFetch:
736 fetchNextAddr(lastDescriptorAddr);
737 break;
738 case DescriptorFetch:
739 fetchDescriptor(fetchAddress);
740 break;
741 case DMARead:
742 readCopyBytes();
743 break;
744 case DMAWrite:
745 writeCopyBytes();
746 break;
747 case CompletionWrite:
748 writeCompletionStatus();
749 break;
750 case Idle:
751 break;
752 default:
753 panic("Unknown state for CopyEngineChannel\n");
754 }
755 }
756
757 void
758 CopyEngine::resume()
759 {
760 SimObject::resume();
761 for (int x = 0;x < chan.size(); x++)
762 chan[x]->resume();
763 }
764
765
766 void
767 CopyEngine::CopyEngineChannel::resume()
768 {
769 DPRINTF(DMACopyEngine, "Restarting state machine at state %d\n", nextState);
770 restartStateMachine();
771 }
772
773 CopyEngine *
774 CopyEngineParams::create()
775 {
776 return new CopyEngine(this);
777 }