DMA: Make DmaPort generic enough to be used other places
[gem5.git] / src / dev / copy_engine.cc
1 /*
2 * Copyright (c) 2008 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's I/O AT DMA copy engine.
33 */
34
35 #include <algorithm>
36
37 #include "base/cp_annotate.hh"
38 #include "base/trace.hh"
39 #include "dev/copy_engine.hh"
40 #include "mem/packet.hh"
41 #include "mem/packet_access.hh"
42 #include "params/CopyEngine.hh"
43 #include "sim/stats.hh"
44 #include "sim/system.hh"
45
46 using namespace CopyEngineReg;
47 using namespace std;
48
49 CopyEngine::CopyEngine(const Params *p)
50 : PciDev(p)
51 {
52 // All Reg regs are initialized to 0 by default
53 regs.chanCount = p->ChanCnt;
54 regs.xferCap = findMsbSet(p->XferCap);
55 regs.attnStatus = 0;
56
57 if (regs.chanCount > 64)
58 fatal("CopyEngine interface doesn't support more than 64 DMA engines\n");
59
60 for (int x = 0; x < regs.chanCount; x++) {
61 CopyEngineChannel *ch = new CopyEngineChannel(this, x);
62 chan.push_back(ch);
63 }
64 }
65
66
67 CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine *_ce, int cid)
68 : ce(_ce), channelId(cid), busy(false), underReset(false),
69 refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin),
70 latAfterCompletion(ce->params()->latAfterCompletion),
71 completionDataReg(0), nextState(Idle), drainEvent(NULL),
72 fetchCompleteEvent(this), addrCompleteEvent(this),
73 readCompleteEvent(this), writeCompleteEvent(this),
74 statusCompleteEvent(this)
75
76 {
77 cr.status.dma_transfer_status(3);
78 cr.descChainAddr = 0;
79 cr.completionAddr = 0;
80
81 curDmaDesc = new DmaDesc;
82 memset(curDmaDesc, 0, sizeof(DmaDesc));
83 copyBuffer = new uint8_t[ce->params()->XferCap];
84 }
85
86 CopyEngine::~CopyEngine()
87 {
88 for (int x = 0; x < chan.size(); x++) {
89 delete chan[x];
90 }
91 }
92
93 CopyEngine::CopyEngineChannel::~CopyEngineChannel()
94 {
95 delete curDmaDesc;
96 delete [] copyBuffer;
97 delete cePort;
98 }
99
100 void
101 CopyEngine::init()
102 {
103 PciDev::init();
104 for (int x = 0; x < chan.size(); x++)
105 chan[x]->init();
106 }
107
108 void
109 CopyEngine::CopyEngineChannel::init()
110 {
111 Port *peer;
112
113 cePort = new DmaPort(ce, ce->sys, ce->params()->min_backoff_delay,
114 ce->params()->max_backoff_delay);
115 peer = ce->dmaPort->getPeer()->getOwner()->getPort("");
116 peer->setPeer(cePort);
117 cePort->setPeer(peer);
118 }
119
120 void
121 CopyEngine::CopyEngineChannel::recvCommand()
122 {
123 if (cr.command.start_dma()) {
124 assert(!busy);
125 cr.status.dma_transfer_status(0);
126 nextState = DescriptorFetch;
127 fetchAddress = cr.descChainAddr;
128 if (ce->getState() == SimObject::Running)
129 fetchDescriptor(cr.descChainAddr);
130 } else if (cr.command.append_dma()) {
131 if (!busy) {
132 nextState = AddressFetch;
133 if (ce->getState() == SimObject::Running)
134 fetchNextAddr(lastDescriptorAddr);
135 } else
136 refreshNext = true;
137 } else if (cr.command.reset_dma()) {
138 if (busy)
139 underReset = true;
140 else {
141 cr.status.dma_transfer_status(3);
142 nextState = Idle;
143 }
144 } else if (cr.command.resume_dma() || cr.command.abort_dma() ||
145 cr.command.suspend_dma())
146 panic("Resume, Abort, and Suspend are not supported\n");
147 cr.command(0);
148 }
149
150 Tick
151 CopyEngine::read(PacketPtr pkt)
152 {
153 int bar;
154 Addr daddr;
155
156 if (!getBAR(pkt->getAddr(), bar, daddr))
157 panic("Invalid PCI memory access to unmapped memory.\n");
158
159 // Only Memory register BAR is allowed
160 assert(bar == 0);
161
162 int size = pkt->getSize();
163 if (size != sizeof(uint64_t) && size != sizeof(uint32_t) &&
164 size != sizeof(uint16_t) && size != sizeof(uint8_t)) {
165 panic("Unknown size for MMIO access: %d\n", pkt->getSize());
166 }
167
168 DPRINTF(DMACopyEngine, "Read device register %#X size: %d\n", daddr, size);
169
170 pkt->allocate();
171
172 ///
173 /// Handle read of register here
174 ///
175
176 if (daddr < 0x80) {
177 switch (daddr) {
178 case GEN_CHANCOUNT:
179 assert(size == sizeof(regs.chanCount));
180 pkt->set<uint8_t>(regs.chanCount);
181 break;
182 case GEN_XFERCAP:
183 assert(size == sizeof(regs.xferCap));
184 pkt->set<uint8_t>(regs.xferCap);
185 break;
186 case GEN_INTRCTRL:
187 assert(size == sizeof(uint8_t));
188 pkt->set<uint8_t>(regs.intrctrl());
189 regs.intrctrl.master_int_enable(0);
190 break;
191 case GEN_ATTNSTATUS:
192 assert(size == sizeof(regs.attnStatus));
193 pkt->set<uint32_t>(regs.attnStatus);
194 regs.attnStatus = 0;
195 break;
196 default:
197 panic("Read request to unknown register number: %#x\n", daddr);
198 }
199 pkt->makeAtomicResponse();
200 return pioDelay;
201 }
202
203
204 // Find which channel we're accessing
205 int chanid = 0;
206 daddr -= 0x80;
207 while (daddr >= 0x80) {
208 chanid++;
209 daddr -= 0x80;
210 }
211
212 if (chanid >= regs.chanCount)
213 panic("Access to channel %d (device only configured for %d channels)",
214 chanid, regs.chanCount);
215
216 ///
217 /// Channel registers are handled here
218 ///
219 chan[chanid]->channelRead(pkt, daddr, size);
220
221 pkt->makeAtomicResponse();
222 return pioDelay;
223 }
224
225 void
226 CopyEngine::CopyEngineChannel::channelRead(Packet *pkt, Addr daddr, int size)
227 {
228 switch (daddr) {
229 case CHAN_CONTROL:
230 assert(size == sizeof(uint16_t));
231 pkt->set<uint16_t>(cr.ctrl());
232 cr.ctrl.in_use(1);
233 break;
234 case CHAN_STATUS:
235 assert(size == sizeof(uint64_t));
236 pkt->set<uint64_t>(cr.status() | ~busy);
237 break;
238 case CHAN_CHAINADDR:
239 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
240 if (size == sizeof(uint64_t))
241 pkt->set<uint64_t>(cr.descChainAddr);
242 else
243 pkt->set<uint32_t>(bits(cr.descChainAddr,0,31));
244 break;
245 case CHAN_CHAINADDR_HIGH:
246 assert(size == sizeof(uint32_t));
247 pkt->set<uint32_t>(bits(cr.descChainAddr,32,63));
248 break;
249 case CHAN_COMMAND:
250 assert(size == sizeof(uint8_t));
251 pkt->set<uint32_t>(cr.command());
252 break;
253 case CHAN_CMPLNADDR:
254 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
255 if (size == sizeof(uint64_t))
256 pkt->set<uint64_t>(cr.completionAddr);
257 else
258 pkt->set<uint32_t>(bits(cr.completionAddr,0,31));
259 break;
260 case CHAN_CMPLNADDR_HIGH:
261 assert(size == sizeof(uint32_t));
262 pkt->set<uint32_t>(bits(cr.completionAddr,32,63));
263 break;
264 case CHAN_ERROR:
265 assert(size == sizeof(uint32_t));
266 pkt->set<uint32_t>(cr.error());
267 break;
268 default:
269 panic("Read request to unknown channel register number: (%d)%#x\n",
270 channelId, daddr);
271 }
272 }
273
274
275 Tick
276 CopyEngine::write(PacketPtr pkt)
277 {
278 int bar;
279 Addr daddr;
280
281
282 if (!getBAR(pkt->getAddr(), bar, daddr))
283 panic("Invalid PCI memory access to unmapped memory.\n");
284
285 // Only Memory register BAR is allowed
286 assert(bar == 0);
287
288 int size = pkt->getSize();
289
290 ///
291 /// Handle write of register here
292 ///
293
294 if (size == sizeof(uint64_t)) {
295 uint64_t val M5_VAR_USED = pkt->get<uint64_t>();
296 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
297 } else if (size == sizeof(uint32_t)) {
298 uint32_t val M5_VAR_USED = pkt->get<uint32_t>();
299 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
300 } else if (size == sizeof(uint16_t)) {
301 uint16_t val M5_VAR_USED = pkt->get<uint16_t>();
302 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
303 } else if (size == sizeof(uint8_t)) {
304 uint8_t val M5_VAR_USED = pkt->get<uint8_t>();
305 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
306 } else {
307 panic("Unknown size for MMIO access: %d\n", size);
308 }
309
310 if (daddr < 0x80) {
311 switch (daddr) {
312 case GEN_CHANCOUNT:
313 case GEN_XFERCAP:
314 case GEN_ATTNSTATUS:
315 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
316 daddr);
317 break;
318 case GEN_INTRCTRL:
319 regs.intrctrl.master_int_enable(bits(pkt->get<uint8_t>(),0,1));
320 break;
321 default:
322 panic("Read request to unknown register number: %#x\n", daddr);
323 }
324 pkt->makeAtomicResponse();
325 return pioDelay;
326 }
327
328 // Find which channel we're accessing
329 int chanid = 0;
330 daddr -= 0x80;
331 while (daddr >= 0x80) {
332 chanid++;
333 daddr -= 0x80;
334 }
335
336 if (chanid >= regs.chanCount)
337 panic("Access to channel %d (device only configured for %d channels)",
338 chanid, regs.chanCount);
339
340 ///
341 /// Channel registers are handled here
342 ///
343 chan[chanid]->channelWrite(pkt, daddr, size);
344
345 pkt->makeAtomicResponse();
346 return pioDelay;
347 }
348
349 void
350 CopyEngine::CopyEngineChannel::channelWrite(Packet *pkt, Addr daddr, int size)
351 {
352 switch (daddr) {
353 case CHAN_CONTROL:
354 assert(size == sizeof(uint16_t));
355 int old_int_disable;
356 old_int_disable = cr.ctrl.interrupt_disable();
357 cr.ctrl(pkt->get<uint16_t>());
358 if (cr.ctrl.interrupt_disable())
359 cr.ctrl.interrupt_disable(0);
360 else
361 cr.ctrl.interrupt_disable(old_int_disable);
362 break;
363 case CHAN_STATUS:
364 assert(size == sizeof(uint64_t));
365 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
366 daddr);
367 break;
368 case CHAN_CHAINADDR:
369 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
370 if (size == sizeof(uint64_t))
371 cr.descChainAddr = pkt->get<uint64_t>();
372 else
373 cr.descChainAddr = (uint64_t)pkt->get<uint32_t>() |
374 (cr.descChainAddr & ~mask(32));
375 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
376 break;
377 case CHAN_CHAINADDR_HIGH:
378 assert(size == sizeof(uint32_t));
379 cr.descChainAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
380 (cr.descChainAddr & mask(32));
381 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
382 break;
383 case CHAN_COMMAND:
384 assert(size == sizeof(uint8_t));
385 cr.command(pkt->get<uint8_t>());
386 recvCommand();
387 break;
388 case CHAN_CMPLNADDR:
389 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
390 if (size == sizeof(uint64_t))
391 cr.completionAddr = pkt->get<uint64_t>();
392 else
393 cr.completionAddr = pkt->get<uint32_t>() |
394 (cr.completionAddr & ~mask(32));
395 break;
396 case CHAN_CMPLNADDR_HIGH:
397 assert(size == sizeof(uint32_t));
398 cr.completionAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
399 (cr.completionAddr & mask(32));
400 break;
401 case CHAN_ERROR:
402 assert(size == sizeof(uint32_t));
403 cr.error(~pkt->get<uint32_t>() & cr.error());
404 break;
405 default:
406 panic("Read request to unknown channel register number: (%d)%#x\n",
407 channelId, daddr);
408 }
409 }
410
411 void
412 CopyEngine::regStats()
413 {
414 using namespace Stats;
415 bytesCopied
416 .init(regs.chanCount)
417 .name(name() + ".bytes_copied")
418 .desc("Number of bytes copied by each engine")
419 .flags(total)
420 ;
421 copiesProcessed
422 .init(regs.chanCount)
423 .name(name() + ".copies_processed")
424 .desc("Number of copies processed by each engine")
425 .flags(total)
426 ;
427 }
428
429 void
430 CopyEngine::CopyEngineChannel::fetchDescriptor(Addr address)
431 {
432 anDq();
433 anBegin("FetchDescriptor");
434 DPRINTF(DMACopyEngine, "Reading descriptor from at memory location %#x(%#x)\n",
435 address, ce->platform->pciToDma(address));
436 assert(address);
437 busy = true;
438
439 DPRINTF(DMACopyEngine, "dmaAction: %#x, %d bytes, to addr %#x\n",
440 ce->platform->pciToDma(address), sizeof(DmaDesc), curDmaDesc);
441
442 cePort->dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(address),
443 sizeof(DmaDesc), &fetchCompleteEvent, (uint8_t*)curDmaDesc,
444 latBeforeBegin);
445 lastDescriptorAddr = address;
446 }
447
448 void
449 CopyEngine::CopyEngineChannel::fetchDescComplete()
450 {
451 DPRINTF(DMACopyEngine, "Read of descriptor complete\n");
452
453 if ((curDmaDesc->command & DESC_CTRL_NULL)) {
454 DPRINTF(DMACopyEngine, "Got NULL descriptor, skipping\n");
455 assert(!(curDmaDesc->command & DESC_CTRL_CP_STS));
456 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
457 panic("Shouldn't be able to get here\n");
458 nextState = CompletionWrite;
459 if (inDrain()) return;
460 writeCompletionStatus();
461 } else {
462 anBegin("Idle");
463 anWait();
464 busy = false;
465 nextState = Idle;
466 inDrain();
467 }
468 return;
469 }
470
471 if (curDmaDesc->command & ~DESC_CTRL_CP_STS)
472 panic("Descriptor has flag other that completion status set\n");
473
474 nextState = DMARead;
475 if (inDrain()) return;
476 readCopyBytes();
477 }
478
479 void
480 CopyEngine::CopyEngineChannel::readCopyBytes()
481 {
482 anBegin("ReadCopyBytes");
483 DPRINTF(DMACopyEngine, "Reading %d bytes from buffer to memory location %#x(%#x)\n",
484 curDmaDesc->len, curDmaDesc->dest,
485 ce->platform->pciToDma(curDmaDesc->src));
486 cePort->dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(curDmaDesc->src),
487 curDmaDesc->len, &readCompleteEvent, copyBuffer, 0);
488 }
489
490 void
491 CopyEngine::CopyEngineChannel::readCopyBytesComplete()
492 {
493 DPRINTF(DMACopyEngine, "Read of bytes to copy complete\n");
494
495 nextState = DMAWrite;
496 if (inDrain()) return;
497 writeCopyBytes();
498 }
499
500 void
501 CopyEngine::CopyEngineChannel::writeCopyBytes()
502 {
503 anBegin("WriteCopyBytes");
504 DPRINTF(DMACopyEngine, "Writing %d bytes from buffer to memory location %#x(%#x)\n",
505 curDmaDesc->len, curDmaDesc->dest,
506 ce->platform->pciToDma(curDmaDesc->dest));
507
508 cePort->dmaAction(MemCmd::WriteReq, ce->platform->pciToDma(curDmaDesc->dest),
509 curDmaDesc->len, &writeCompleteEvent, copyBuffer, 0);
510
511 ce->bytesCopied[channelId] += curDmaDesc->len;
512 ce->copiesProcessed[channelId]++;
513 }
514
515 void
516 CopyEngine::CopyEngineChannel::writeCopyBytesComplete()
517 {
518 DPRINTF(DMACopyEngine, "Write of bytes to copy complete user1: %#x\n",
519 curDmaDesc->user1);
520
521 cr.status.compl_desc_addr(lastDescriptorAddr >> 6);
522 completionDataReg = cr.status() | 1;
523
524 anQ("DMAUsedDescQ", channelId, 1);
525 anQ("AppRecvQ", curDmaDesc->user1, curDmaDesc->len);
526 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
527 nextState = CompletionWrite;
528 if (inDrain()) return;
529 writeCompletionStatus();
530 return;
531 }
532
533 continueProcessing();
534 }
535
536 void
537 CopyEngine::CopyEngineChannel::continueProcessing()
538 {
539 busy = false;
540
541 if (underReset) {
542 anBegin("Reset");
543 anWait();
544 underReset = false;
545 refreshNext = false;
546 busy = false;
547 nextState = Idle;
548 return;
549 }
550
551 if (curDmaDesc->next) {
552 nextState = DescriptorFetch;
553 fetchAddress = curDmaDesc->next;
554 if (inDrain()) return;
555 fetchDescriptor(curDmaDesc->next);
556 } else if (refreshNext) {
557 nextState = AddressFetch;
558 refreshNext = false;
559 if (inDrain()) return;
560 fetchNextAddr(lastDescriptorAddr);
561 } else {
562 inDrain();
563 nextState = Idle;
564 anWait();
565 anBegin("Idle");
566 }
567 }
568
569 void
570 CopyEngine::CopyEngineChannel::writeCompletionStatus()
571 {
572 anBegin("WriteCompletionStatus");
573 DPRINTF(DMACopyEngine, "Writing completion status %#x to address %#x(%#x)\n",
574 completionDataReg, cr.completionAddr,
575 ce->platform->pciToDma(cr.completionAddr));
576
577 cePort->dmaAction(MemCmd::WriteReq, ce->platform->pciToDma(cr.completionAddr),
578 sizeof(completionDataReg), &statusCompleteEvent,
579 (uint8_t*)&completionDataReg, latAfterCompletion);
580 }
581
582 void
583 CopyEngine::CopyEngineChannel::writeStatusComplete()
584 {
585 DPRINTF(DMACopyEngine, "Writing completion status complete\n");
586 continueProcessing();
587 }
588
589 void
590 CopyEngine::CopyEngineChannel::fetchNextAddr(Addr address)
591 {
592 anBegin("FetchNextAddr");
593 DPRINTF(DMACopyEngine, "Fetching next address...\n");
594 busy = true;
595 cePort->dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(address +
596 offsetof(DmaDesc, next)), sizeof(Addr), &addrCompleteEvent,
597 (uint8_t*)curDmaDesc + offsetof(DmaDesc, next), 0);
598 }
599
600 void
601 CopyEngine::CopyEngineChannel::fetchAddrComplete()
602 {
603 DPRINTF(DMACopyEngine, "Fetching next address complete: %#x\n",
604 curDmaDesc->next);
605 if (!curDmaDesc->next) {
606 DPRINTF(DMACopyEngine, "Got NULL descriptor, nothing more to do\n");
607 busy = false;
608 nextState = Idle;
609 anWait();
610 anBegin("Idle");
611 inDrain();
612 return;
613 }
614 nextState = DescriptorFetch;
615 fetchAddress = curDmaDesc->next;
616 if (inDrain()) return;
617 fetchDescriptor(curDmaDesc->next);
618 }
619
620 bool
621 CopyEngine::CopyEngineChannel::inDrain()
622 {
623 if (ce->getState() == SimObject::Draining) {
624 DPRINTF(DMACopyEngine, "processing drain\n");
625 assert(drainEvent);
626 drainEvent->process();
627 drainEvent = NULL;
628 }
629
630 return ce->getState() != SimObject::Running;
631 }
632
633 unsigned int
634 CopyEngine::CopyEngineChannel::drain(Event *de)
635 {
636 if (nextState == Idle || ce->getState() != SimObject::Running)
637 return 0;
638 unsigned int count = 1;
639 count += cePort->drain(de);
640
641 DPRINTF(DMACopyEngine, "unable to drain, returning %d\n", count);
642 drainEvent = de;
643 return count;
644 }
645
646 unsigned int
647 CopyEngine::drain(Event *de)
648 {
649 unsigned int count;
650 count = pioPort->drain(de) + dmaPort->drain(de) + configPort->drain(de);
651 for (int x = 0;x < chan.size(); x++)
652 count += chan[x]->drain(de);
653
654 if (count)
655 changeState(Draining);
656 else
657 changeState(Drained);
658
659 DPRINTF(DMACopyEngine, "call to CopyEngine::drain() returning %d\n", count);
660 return count;
661 }
662
663 void
664 CopyEngine::serialize(std::ostream &os)
665 {
666 PciDev::serialize(os);
667 regs.serialize(os);
668 for (int x =0; x < chan.size(); x++) {
669 nameOut(os, csprintf("%s.channel%d", name(), x));
670 chan[x]->serialize(os);
671 }
672 }
673
674 void
675 CopyEngine::unserialize(Checkpoint *cp, const std::string &section)
676 {
677 PciDev::unserialize(cp, section);
678 regs.unserialize(cp, section);
679 for (int x = 0; x < chan.size(); x++)
680 chan[x]->unserialize(cp, csprintf("%s.channel%d", section, x));
681 }
682
683 void
684 CopyEngine::CopyEngineChannel::serialize(std::ostream &os)
685 {
686 SERIALIZE_SCALAR(channelId);
687 SERIALIZE_SCALAR(busy);
688 SERIALIZE_SCALAR(underReset);
689 SERIALIZE_SCALAR(refreshNext);
690 SERIALIZE_SCALAR(lastDescriptorAddr);
691 SERIALIZE_SCALAR(completionDataReg);
692 SERIALIZE_SCALAR(fetchAddress);
693 int nextState = this->nextState;
694 SERIALIZE_SCALAR(nextState);
695 arrayParamOut(os, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
696 SERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
697 cr.serialize(os);
698
699 }
700 void
701 CopyEngine::CopyEngineChannel::unserialize(Checkpoint *cp, const std::string &section)
702 {
703 UNSERIALIZE_SCALAR(channelId);
704 UNSERIALIZE_SCALAR(busy);
705 UNSERIALIZE_SCALAR(underReset);
706 UNSERIALIZE_SCALAR(refreshNext);
707 UNSERIALIZE_SCALAR(lastDescriptorAddr);
708 UNSERIALIZE_SCALAR(completionDataReg);
709 UNSERIALIZE_SCALAR(fetchAddress);
710 int nextState;
711 UNSERIALIZE_SCALAR(nextState);
712 this->nextState = (ChannelState)nextState;
713 arrayParamIn(cp, section, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
714 UNSERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
715 cr.unserialize(cp, section);
716
717 }
718
719 void
720 CopyEngine::CopyEngineChannel::restartStateMachine()
721 {
722 switch(nextState) {
723 case AddressFetch:
724 fetchNextAddr(lastDescriptorAddr);
725 break;
726 case DescriptorFetch:
727 fetchDescriptor(fetchAddress);
728 break;
729 case DMARead:
730 readCopyBytes();
731 break;
732 case DMAWrite:
733 writeCopyBytes();
734 break;
735 case CompletionWrite:
736 writeCompletionStatus();
737 break;
738 case Idle:
739 break;
740 default:
741 panic("Unknown state for CopyEngineChannel\n");
742 }
743 }
744
745 void
746 CopyEngine::resume()
747 {
748 SimObject::resume();
749 for (int x = 0;x < chan.size(); x++)
750 chan[x]->resume();
751 }
752
753
754 void
755 CopyEngine::CopyEngineChannel::resume()
756 {
757 DPRINTF(DMACopyEngine, "Restarting state machine at state %d\n", nextState);
758 restartStateMachine();
759 }
760
761 CopyEngine *
762 CopyEngineParams::create()
763 {
764 return new CopyEngine(this);
765 }