typo: correct spelling
[gem5.git] / src / dev / copy_engine.cc
1 /*
2 * Copyright (c) 2008 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's I/O AT DMA copy engine.
33 */
34
35 #include <algorithm>
36
37 #include "base/cp_annotate.hh"
38 #include "base/trace.hh"
39 #include "dev/copy_engine.hh"
40 #include "mem/packet.hh"
41 #include "mem/packet_access.hh"
42 #include "params/CopyEngine.hh"
43 #include "sim/stats.hh"
44 #include "sim/system.hh"
45
46 using namespace CopyEngineReg;
47 using namespace std;
48
49 CopyEngine::CopyEngine(const Params *p)
50 : PciDev(p)
51 {
52 // All Reg regs are initialized to 0 by default
53 regs.chanCount = p->ChanCnt;
54 regs.xferCap = findMsbSet(p->XferCap);
55 regs.attnStatus = 0;
56
57 if (regs.chanCount > 64)
58 fatal("CopyEngine interface doesn't support more than 64 DMA engines\n");
59
60 for (int x = 0; x < regs.chanCount; x++) {
61 CopyEngineChannel *ch = new CopyEngineChannel(this, x);
62 chan.push_back(ch);
63 }
64 }
65
66
67 CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine *_ce, int cid)
68 : ce(_ce), channelId(cid), busy(false), underReset(false),
69 refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin),
70 latAfterCompletion(ce->params()->latAfterCompletion),
71 completionDataReg(0), nextState(Idle), drainEvent(NULL),
72 fetchCompleteEvent(this), addrCompleteEvent(this),
73 readCompleteEvent(this), writeCompleteEvent(this),
74 statusCompleteEvent(this)
75
76 {
77 cr.status.dma_transfer_status(3);
78 cr.descChainAddr = 0;
79 cr.completionAddr = 0;
80
81 curDmaDesc = new DmaDesc;
82 memset(curDmaDesc, 0, sizeof(DmaDesc));
83 copyBuffer = new uint8_t[ce->params()->XferCap];
84 }
85
86 CopyEngine::~CopyEngine()
87 {
88 for (int x = 0; x < chan.size(); x++) {
89 delete chan[x];
90 }
91 }
92
93 CopyEngine::CopyEngineChannel::~CopyEngineChannel()
94 {
95 delete curDmaDesc;
96 delete [] copyBuffer;
97 delete cePort;
98 }
99
100 void
101 CopyEngine::init()
102 {
103 PciDev::init();
104 for (int x = 0; x < chan.size(); x++)
105 chan[x]->init();
106 }
107
108 void
109 CopyEngine::CopyEngineChannel::init()
110 {
111 Port *peer;
112
113 cePort = new DmaPort(ce, ce->sys);
114 peer = ce->dmaPort->getPeer()->getOwner()->getPort("");
115 peer->setPeer(cePort);
116 cePort->setPeer(peer);
117 }
118
119 void
120 CopyEngine::CopyEngineChannel::recvCommand()
121 {
122 if (cr.command.start_dma()) {
123 assert(!busy);
124 cr.status.dma_transfer_status(0);
125 nextState = DescriptorFetch;
126 fetchAddress = cr.descChainAddr;
127 if (ce->getState() == SimObject::Running)
128 fetchDescriptor(cr.descChainAddr);
129 } else if (cr.command.append_dma()) {
130 if (!busy) {
131 nextState = AddressFetch;
132 if (ce->getState() == SimObject::Running)
133 fetchNextAddr(lastDescriptorAddr);
134 } else
135 refreshNext = true;
136 } else if (cr.command.reset_dma()) {
137 if (busy)
138 underReset = true;
139 else {
140 cr.status.dma_transfer_status(3);
141 nextState = Idle;
142 }
143 } else if (cr.command.resume_dma() || cr.command.abort_dma() ||
144 cr.command.suspend_dma())
145 panic("Resume, Abort, and Suspend are not supported\n");
146 cr.command(0);
147 }
148
149 Tick
150 CopyEngine::read(PacketPtr pkt)
151 {
152 int bar;
153 Addr daddr;
154
155 if (!getBAR(pkt->getAddr(), bar, daddr))
156 panic("Invalid PCI memory access to unmapped memory.\n");
157
158 // Only Memory register BAR is allowed
159 assert(bar == 0);
160
161 int size = pkt->getSize();
162 if (size != sizeof(uint64_t) && size != sizeof(uint32_t) &&
163 size != sizeof(uint16_t) && size != sizeof(uint8_t)) {
164 panic("Unknown size for MMIO access: %d\n", pkt->getSize());
165 }
166
167 DPRINTF(DMACopyEngine, "Read device register %#X size: %d\n", daddr, size);
168
169 pkt->allocate();
170
171 ///
172 /// Handle read of register here
173 ///
174
175 if (daddr < 0x80) {
176 switch (daddr) {
177 case GEN_CHANCOUNT:
178 assert(size == sizeof(regs.chanCount));
179 pkt->set<uint8_t>(regs.chanCount);
180 break;
181 case GEN_XFERCAP:
182 assert(size == sizeof(regs.xferCap));
183 pkt->set<uint8_t>(regs.xferCap);
184 break;
185 case GEN_INTRCTRL:
186 assert(size == sizeof(uint8_t));
187 pkt->set<uint8_t>(regs.intrctrl());
188 regs.intrctrl.master_int_enable(0);
189 break;
190 case GEN_ATTNSTATUS:
191 assert(size == sizeof(regs.attnStatus));
192 pkt->set<uint32_t>(regs.attnStatus);
193 regs.attnStatus = 0;
194 break;
195 default:
196 panic("Read request to unknown register number: %#x\n", daddr);
197 }
198 pkt->makeAtomicResponse();
199 return pioDelay;
200 }
201
202
203 // Find which channel we're accessing
204 int chanid = 0;
205 daddr -= 0x80;
206 while (daddr >= 0x80) {
207 chanid++;
208 daddr -= 0x80;
209 }
210
211 if (chanid >= regs.chanCount)
212 panic("Access to channel %d (device only configured for %d channels)",
213 chanid, regs.chanCount);
214
215 ///
216 /// Channel registers are handled here
217 ///
218 chan[chanid]->channelRead(pkt, daddr, size);
219
220 pkt->makeAtomicResponse();
221 return pioDelay;
222 }
223
224 void
225 CopyEngine::CopyEngineChannel::channelRead(Packet *pkt, Addr daddr, int size)
226 {
227 switch (daddr) {
228 case CHAN_CONTROL:
229 assert(size == sizeof(uint16_t));
230 pkt->set<uint16_t>(cr.ctrl());
231 cr.ctrl.in_use(1);
232 break;
233 case CHAN_STATUS:
234 assert(size == sizeof(uint64_t));
235 pkt->set<uint64_t>(cr.status() | ~busy);
236 break;
237 case CHAN_CHAINADDR:
238 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
239 if (size == sizeof(uint64_t))
240 pkt->set<uint64_t>(cr.descChainAddr);
241 else
242 pkt->set<uint32_t>(bits(cr.descChainAddr,0,31));
243 break;
244 case CHAN_CHAINADDR_HIGH:
245 assert(size == sizeof(uint32_t));
246 pkt->set<uint32_t>(bits(cr.descChainAddr,32,63));
247 break;
248 case CHAN_COMMAND:
249 assert(size == sizeof(uint8_t));
250 pkt->set<uint32_t>(cr.command());
251 break;
252 case CHAN_CMPLNADDR:
253 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
254 if (size == sizeof(uint64_t))
255 pkt->set<uint64_t>(cr.completionAddr);
256 else
257 pkt->set<uint32_t>(bits(cr.completionAddr,0,31));
258 break;
259 case CHAN_CMPLNADDR_HIGH:
260 assert(size == sizeof(uint32_t));
261 pkt->set<uint32_t>(bits(cr.completionAddr,32,63));
262 break;
263 case CHAN_ERROR:
264 assert(size == sizeof(uint32_t));
265 pkt->set<uint32_t>(cr.error());
266 break;
267 default:
268 panic("Read request to unknown channel register number: (%d)%#x\n",
269 channelId, daddr);
270 }
271 }
272
273
274 Tick
275 CopyEngine::write(PacketPtr pkt)
276 {
277 int bar;
278 Addr daddr;
279
280
281 if (!getBAR(pkt->getAddr(), bar, daddr))
282 panic("Invalid PCI memory access to unmapped memory.\n");
283
284 // Only Memory register BAR is allowed
285 assert(bar == 0);
286
287 int size = pkt->getSize();
288
289 ///
290 /// Handle write of register here
291 ///
292
293 if (size == sizeof(uint64_t)) {
294 uint64_t val M5_VAR_USED = pkt->get<uint64_t>();
295 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
296 } else if (size == sizeof(uint32_t)) {
297 uint32_t val M5_VAR_USED = pkt->get<uint32_t>();
298 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
299 } else if (size == sizeof(uint16_t)) {
300 uint16_t val M5_VAR_USED = pkt->get<uint16_t>();
301 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
302 } else if (size == sizeof(uint8_t)) {
303 uint8_t val M5_VAR_USED = pkt->get<uint8_t>();
304 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
305 } else {
306 panic("Unknown size for MMIO access: %d\n", size);
307 }
308
309 if (daddr < 0x80) {
310 switch (daddr) {
311 case GEN_CHANCOUNT:
312 case GEN_XFERCAP:
313 case GEN_ATTNSTATUS:
314 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
315 daddr);
316 break;
317 case GEN_INTRCTRL:
318 regs.intrctrl.master_int_enable(bits(pkt->get<uint8_t>(),0,1));
319 break;
320 default:
321 panic("Read request to unknown register number: %#x\n", daddr);
322 }
323 pkt->makeAtomicResponse();
324 return pioDelay;
325 }
326
327 // Find which channel we're accessing
328 int chanid = 0;
329 daddr -= 0x80;
330 while (daddr >= 0x80) {
331 chanid++;
332 daddr -= 0x80;
333 }
334
335 if (chanid >= regs.chanCount)
336 panic("Access to channel %d (device only configured for %d channels)",
337 chanid, regs.chanCount);
338
339 ///
340 /// Channel registers are handled here
341 ///
342 chan[chanid]->channelWrite(pkt, daddr, size);
343
344 pkt->makeAtomicResponse();
345 return pioDelay;
346 }
347
348 void
349 CopyEngine::CopyEngineChannel::channelWrite(Packet *pkt, Addr daddr, int size)
350 {
351 switch (daddr) {
352 case CHAN_CONTROL:
353 assert(size == sizeof(uint16_t));
354 int old_int_disable;
355 old_int_disable = cr.ctrl.interrupt_disable();
356 cr.ctrl(pkt->get<uint16_t>());
357 if (cr.ctrl.interrupt_disable())
358 cr.ctrl.interrupt_disable(0);
359 else
360 cr.ctrl.interrupt_disable(old_int_disable);
361 break;
362 case CHAN_STATUS:
363 assert(size == sizeof(uint64_t));
364 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
365 daddr);
366 break;
367 case CHAN_CHAINADDR:
368 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
369 if (size == sizeof(uint64_t))
370 cr.descChainAddr = pkt->get<uint64_t>();
371 else
372 cr.descChainAddr = (uint64_t)pkt->get<uint32_t>() |
373 (cr.descChainAddr & ~mask(32));
374 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
375 break;
376 case CHAN_CHAINADDR_HIGH:
377 assert(size == sizeof(uint32_t));
378 cr.descChainAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
379 (cr.descChainAddr & mask(32));
380 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
381 break;
382 case CHAN_COMMAND:
383 assert(size == sizeof(uint8_t));
384 cr.command(pkt->get<uint8_t>());
385 recvCommand();
386 break;
387 case CHAN_CMPLNADDR:
388 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
389 if (size == sizeof(uint64_t))
390 cr.completionAddr = pkt->get<uint64_t>();
391 else
392 cr.completionAddr = pkt->get<uint32_t>() |
393 (cr.completionAddr & ~mask(32));
394 break;
395 case CHAN_CMPLNADDR_HIGH:
396 assert(size == sizeof(uint32_t));
397 cr.completionAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
398 (cr.completionAddr & mask(32));
399 break;
400 case CHAN_ERROR:
401 assert(size == sizeof(uint32_t));
402 cr.error(~pkt->get<uint32_t>() & cr.error());
403 break;
404 default:
405 panic("Read request to unknown channel register number: (%d)%#x\n",
406 channelId, daddr);
407 }
408 }
409
410 void
411 CopyEngine::regStats()
412 {
413 using namespace Stats;
414 bytesCopied
415 .init(regs.chanCount)
416 .name(name() + ".bytes_copied")
417 .desc("Number of bytes copied by each engine")
418 .flags(total)
419 ;
420 copiesProcessed
421 .init(regs.chanCount)
422 .name(name() + ".copies_processed")
423 .desc("Number of copies processed by each engine")
424 .flags(total)
425 ;
426 }
427
428 void
429 CopyEngine::CopyEngineChannel::fetchDescriptor(Addr address)
430 {
431 anDq();
432 anBegin("FetchDescriptor");
433 DPRINTF(DMACopyEngine, "Reading descriptor from at memory location %#x(%#x)\n",
434 address, ce->platform->pciToDma(address));
435 assert(address);
436 busy = true;
437
438 DPRINTF(DMACopyEngine, "dmaAction: %#x, %d bytes, to addr %#x\n",
439 ce->platform->pciToDma(address), sizeof(DmaDesc), curDmaDesc);
440
441 cePort->dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(address),
442 sizeof(DmaDesc), &fetchCompleteEvent, (uint8_t*)curDmaDesc,
443 latBeforeBegin);
444 lastDescriptorAddr = address;
445 }
446
447 void
448 CopyEngine::CopyEngineChannel::fetchDescComplete()
449 {
450 DPRINTF(DMACopyEngine, "Read of descriptor complete\n");
451
452 if ((curDmaDesc->command & DESC_CTRL_NULL)) {
453 DPRINTF(DMACopyEngine, "Got NULL descriptor, skipping\n");
454 assert(!(curDmaDesc->command & DESC_CTRL_CP_STS));
455 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
456 panic("Shouldn't be able to get here\n");
457 nextState = CompletionWrite;
458 if (inDrain()) return;
459 writeCompletionStatus();
460 } else {
461 anBegin("Idle");
462 anWait();
463 busy = false;
464 nextState = Idle;
465 inDrain();
466 }
467 return;
468 }
469
470 if (curDmaDesc->command & ~DESC_CTRL_CP_STS)
471 panic("Descriptor has flag other that completion status set\n");
472
473 nextState = DMARead;
474 if (inDrain()) return;
475 readCopyBytes();
476 }
477
478 void
479 CopyEngine::CopyEngineChannel::readCopyBytes()
480 {
481 anBegin("ReadCopyBytes");
482 DPRINTF(DMACopyEngine, "Reading %d bytes from buffer to memory location %#x(%#x)\n",
483 curDmaDesc->len, curDmaDesc->dest,
484 ce->platform->pciToDma(curDmaDesc->src));
485 cePort->dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(curDmaDesc->src),
486 curDmaDesc->len, &readCompleteEvent, copyBuffer, 0);
487 }
488
489 void
490 CopyEngine::CopyEngineChannel::readCopyBytesComplete()
491 {
492 DPRINTF(DMACopyEngine, "Read of bytes to copy complete\n");
493
494 nextState = DMAWrite;
495 if (inDrain()) return;
496 writeCopyBytes();
497 }
498
499 void
500 CopyEngine::CopyEngineChannel::writeCopyBytes()
501 {
502 anBegin("WriteCopyBytes");
503 DPRINTF(DMACopyEngine, "Writing %d bytes from buffer to memory location %#x(%#x)\n",
504 curDmaDesc->len, curDmaDesc->dest,
505 ce->platform->pciToDma(curDmaDesc->dest));
506
507 cePort->dmaAction(MemCmd::WriteReq, ce->platform->pciToDma(curDmaDesc->dest),
508 curDmaDesc->len, &writeCompleteEvent, copyBuffer, 0);
509
510 ce->bytesCopied[channelId] += curDmaDesc->len;
511 ce->copiesProcessed[channelId]++;
512 }
513
514 void
515 CopyEngine::CopyEngineChannel::writeCopyBytesComplete()
516 {
517 DPRINTF(DMACopyEngine, "Write of bytes to copy complete user1: %#x\n",
518 curDmaDesc->user1);
519
520 cr.status.compl_desc_addr(lastDescriptorAddr >> 6);
521 completionDataReg = cr.status() | 1;
522
523 anQ("DMAUsedDescQ", channelId, 1);
524 anQ("AppRecvQ", curDmaDesc->user1, curDmaDesc->len);
525 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
526 nextState = CompletionWrite;
527 if (inDrain()) return;
528 writeCompletionStatus();
529 return;
530 }
531
532 continueProcessing();
533 }
534
535 void
536 CopyEngine::CopyEngineChannel::continueProcessing()
537 {
538 busy = false;
539
540 if (underReset) {
541 anBegin("Reset");
542 anWait();
543 underReset = false;
544 refreshNext = false;
545 busy = false;
546 nextState = Idle;
547 return;
548 }
549
550 if (curDmaDesc->next) {
551 nextState = DescriptorFetch;
552 fetchAddress = curDmaDesc->next;
553 if (inDrain()) return;
554 fetchDescriptor(curDmaDesc->next);
555 } else if (refreshNext) {
556 nextState = AddressFetch;
557 refreshNext = false;
558 if (inDrain()) return;
559 fetchNextAddr(lastDescriptorAddr);
560 } else {
561 inDrain();
562 nextState = Idle;
563 anWait();
564 anBegin("Idle");
565 }
566 }
567
568 void
569 CopyEngine::CopyEngineChannel::writeCompletionStatus()
570 {
571 anBegin("WriteCompletionStatus");
572 DPRINTF(DMACopyEngine, "Writing completion status %#x to address %#x(%#x)\n",
573 completionDataReg, cr.completionAddr,
574 ce->platform->pciToDma(cr.completionAddr));
575
576 cePort->dmaAction(MemCmd::WriteReq, ce->platform->pciToDma(cr.completionAddr),
577 sizeof(completionDataReg), &statusCompleteEvent,
578 (uint8_t*)&completionDataReg, latAfterCompletion);
579 }
580
581 void
582 CopyEngine::CopyEngineChannel::writeStatusComplete()
583 {
584 DPRINTF(DMACopyEngine, "Writing completion status complete\n");
585 continueProcessing();
586 }
587
588 void
589 CopyEngine::CopyEngineChannel::fetchNextAddr(Addr address)
590 {
591 anBegin("FetchNextAddr");
592 DPRINTF(DMACopyEngine, "Fetching next address...\n");
593 busy = true;
594 cePort->dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(address +
595 offsetof(DmaDesc, next)), sizeof(Addr), &addrCompleteEvent,
596 (uint8_t*)curDmaDesc + offsetof(DmaDesc, next), 0);
597 }
598
599 void
600 CopyEngine::CopyEngineChannel::fetchAddrComplete()
601 {
602 DPRINTF(DMACopyEngine, "Fetching next address complete: %#x\n",
603 curDmaDesc->next);
604 if (!curDmaDesc->next) {
605 DPRINTF(DMACopyEngine, "Got NULL descriptor, nothing more to do\n");
606 busy = false;
607 nextState = Idle;
608 anWait();
609 anBegin("Idle");
610 inDrain();
611 return;
612 }
613 nextState = DescriptorFetch;
614 fetchAddress = curDmaDesc->next;
615 if (inDrain()) return;
616 fetchDescriptor(curDmaDesc->next);
617 }
618
619 bool
620 CopyEngine::CopyEngineChannel::inDrain()
621 {
622 if (ce->getState() == SimObject::Draining) {
623 DPRINTF(DMACopyEngine, "processing drain\n");
624 assert(drainEvent);
625 drainEvent->process();
626 drainEvent = NULL;
627 }
628
629 return ce->getState() != SimObject::Running;
630 }
631
632 unsigned int
633 CopyEngine::CopyEngineChannel::drain(Event *de)
634 {
635 if (nextState == Idle || ce->getState() != SimObject::Running)
636 return 0;
637 unsigned int count = 1;
638 count += cePort->drain(de);
639
640 DPRINTF(DMACopyEngine, "unable to drain, returning %d\n", count);
641 drainEvent = de;
642 return count;
643 }
644
645 unsigned int
646 CopyEngine::drain(Event *de)
647 {
648 unsigned int count;
649 count = pioPort->drain(de) + dmaPort->drain(de) + configPort->drain(de);
650 for (int x = 0;x < chan.size(); x++)
651 count += chan[x]->drain(de);
652
653 if (count)
654 changeState(Draining);
655 else
656 changeState(Drained);
657
658 DPRINTF(DMACopyEngine, "call to CopyEngine::drain() returning %d\n", count);
659 return count;
660 }
661
662 void
663 CopyEngine::serialize(std::ostream &os)
664 {
665 PciDev::serialize(os);
666 regs.serialize(os);
667 for (int x =0; x < chan.size(); x++) {
668 nameOut(os, csprintf("%s.channel%d", name(), x));
669 chan[x]->serialize(os);
670 }
671 }
672
673 void
674 CopyEngine::unserialize(Checkpoint *cp, const std::string &section)
675 {
676 PciDev::unserialize(cp, section);
677 regs.unserialize(cp, section);
678 for (int x = 0; x < chan.size(); x++)
679 chan[x]->unserialize(cp, csprintf("%s.channel%d", section, x));
680 }
681
682 void
683 CopyEngine::CopyEngineChannel::serialize(std::ostream &os)
684 {
685 SERIALIZE_SCALAR(channelId);
686 SERIALIZE_SCALAR(busy);
687 SERIALIZE_SCALAR(underReset);
688 SERIALIZE_SCALAR(refreshNext);
689 SERIALIZE_SCALAR(lastDescriptorAddr);
690 SERIALIZE_SCALAR(completionDataReg);
691 SERIALIZE_SCALAR(fetchAddress);
692 int nextState = this->nextState;
693 SERIALIZE_SCALAR(nextState);
694 arrayParamOut(os, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
695 SERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
696 cr.serialize(os);
697
698 }
699 void
700 CopyEngine::CopyEngineChannel::unserialize(Checkpoint *cp, const std::string &section)
701 {
702 UNSERIALIZE_SCALAR(channelId);
703 UNSERIALIZE_SCALAR(busy);
704 UNSERIALIZE_SCALAR(underReset);
705 UNSERIALIZE_SCALAR(refreshNext);
706 UNSERIALIZE_SCALAR(lastDescriptorAddr);
707 UNSERIALIZE_SCALAR(completionDataReg);
708 UNSERIALIZE_SCALAR(fetchAddress);
709 int nextState;
710 UNSERIALIZE_SCALAR(nextState);
711 this->nextState = (ChannelState)nextState;
712 arrayParamIn(cp, section, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
713 UNSERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
714 cr.unserialize(cp, section);
715
716 }
717
718 void
719 CopyEngine::CopyEngineChannel::restartStateMachine()
720 {
721 switch(nextState) {
722 case AddressFetch:
723 fetchNextAddr(lastDescriptorAddr);
724 break;
725 case DescriptorFetch:
726 fetchDescriptor(fetchAddress);
727 break;
728 case DMARead:
729 readCopyBytes();
730 break;
731 case DMAWrite:
732 writeCopyBytes();
733 break;
734 case CompletionWrite:
735 writeCompletionStatus();
736 break;
737 case Idle:
738 break;
739 default:
740 panic("Unknown state for CopyEngineChannel\n");
741 }
742 }
743
744 void
745 CopyEngine::resume()
746 {
747 SimObject::resume();
748 for (int x = 0;x < chan.size(); x++)
749 chan[x]->resume();
750 }
751
752
753 void
754 CopyEngine::CopyEngineChannel::resume()
755 {
756 DPRINTF(DMACopyEngine, "Restarting state machine at state %d\n", nextState);
757 restartStateMachine();
758 }
759
760 CopyEngine *
761 CopyEngineParams::create()
762 {
763 return new CopyEngine(this);
764 }