Thread: Use inherited baseCpu rather than cpu in SimpleThread
[gem5.git] / src / dev / copy_engine.cc
1 /*
2 * Copyright (c) 2008 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Ali Saidi
29 */
30
31 /* @file
32 * Device model for Intel's I/O AT DMA copy engine.
33 */
34
35 #include <algorithm>
36
37 #include "base/cp_annotate.hh"
38 #include "base/trace.hh"
39 #include "debug/DMACopyEngine.hh"
40 #include "dev/copy_engine.hh"
41 #include "mem/packet.hh"
42 #include "mem/packet_access.hh"
43 #include "params/CopyEngine.hh"
44 #include "sim/stats.hh"
45 #include "sim/system.hh"
46
47 using namespace CopyEngineReg;
48 using namespace std;
49
50 CopyEngine::CopyEngine(const Params *p)
51 : PciDev(p)
52 {
53 // All Reg regs are initialized to 0 by default
54 regs.chanCount = p->ChanCnt;
55 regs.xferCap = findMsbSet(p->XferCap);
56 regs.attnStatus = 0;
57
58 if (regs.chanCount > 64)
59 fatal("CopyEngine interface doesn't support more than 64 DMA engines\n");
60
61 for (int x = 0; x < regs.chanCount; x++) {
62 CopyEngineChannel *ch = new CopyEngineChannel(this, x);
63 chan.push_back(ch);
64 }
65 }
66
67
68 CopyEngine::CopyEngineChannel::CopyEngineChannel(CopyEngine *_ce, int cid)
69 : ce(_ce), channelId(cid), busy(false), underReset(false),
70 refreshNext(false), latBeforeBegin(ce->params()->latBeforeBegin),
71 latAfterCompletion(ce->params()->latAfterCompletion),
72 completionDataReg(0), nextState(Idle), drainEvent(NULL),
73 fetchCompleteEvent(this), addrCompleteEvent(this),
74 readCompleteEvent(this), writeCompleteEvent(this),
75 statusCompleteEvent(this)
76
77 {
78 cr.status.dma_transfer_status(3);
79 cr.descChainAddr = 0;
80 cr.completionAddr = 0;
81
82 curDmaDesc = new DmaDesc;
83 memset(curDmaDesc, 0, sizeof(DmaDesc));
84 copyBuffer = new uint8_t[ce->params()->XferCap];
85 }
86
87 CopyEngine::~CopyEngine()
88 {
89 for (int x = 0; x < chan.size(); x++) {
90 delete chan[x];
91 }
92 }
93
94 CopyEngine::CopyEngineChannel::~CopyEngineChannel()
95 {
96 delete curDmaDesc;
97 delete [] copyBuffer;
98 delete cePort;
99 }
100
101 void
102 CopyEngine::init()
103 {
104 PciDev::init();
105 for (int x = 0; x < chan.size(); x++)
106 chan[x]->init();
107 }
108
109 void
110 CopyEngine::CopyEngineChannel::init()
111 {
112 Port *peer;
113
114 cePort = new DmaPort(ce, ce->sys, ce->params()->min_backoff_delay,
115 ce->params()->max_backoff_delay);
116 peer = ce->dmaPort->getPeer()->getOwner()->getPort("");
117 peer->setPeer(cePort);
118 cePort->setPeer(peer);
119 }
120
121 void
122 CopyEngine::CopyEngineChannel::recvCommand()
123 {
124 if (cr.command.start_dma()) {
125 assert(!busy);
126 cr.status.dma_transfer_status(0);
127 nextState = DescriptorFetch;
128 fetchAddress = cr.descChainAddr;
129 if (ce->getState() == SimObject::Running)
130 fetchDescriptor(cr.descChainAddr);
131 } else if (cr.command.append_dma()) {
132 if (!busy) {
133 nextState = AddressFetch;
134 if (ce->getState() == SimObject::Running)
135 fetchNextAddr(lastDescriptorAddr);
136 } else
137 refreshNext = true;
138 } else if (cr.command.reset_dma()) {
139 if (busy)
140 underReset = true;
141 else {
142 cr.status.dma_transfer_status(3);
143 nextState = Idle;
144 }
145 } else if (cr.command.resume_dma() || cr.command.abort_dma() ||
146 cr.command.suspend_dma())
147 panic("Resume, Abort, and Suspend are not supported\n");
148 cr.command(0);
149 }
150
151 Tick
152 CopyEngine::read(PacketPtr pkt)
153 {
154 int bar;
155 Addr daddr;
156
157 if (!getBAR(pkt->getAddr(), bar, daddr))
158 panic("Invalid PCI memory access to unmapped memory.\n");
159
160 // Only Memory register BAR is allowed
161 assert(bar == 0);
162
163 int size = pkt->getSize();
164 if (size != sizeof(uint64_t) && size != sizeof(uint32_t) &&
165 size != sizeof(uint16_t) && size != sizeof(uint8_t)) {
166 panic("Unknown size for MMIO access: %d\n", pkt->getSize());
167 }
168
169 DPRINTF(DMACopyEngine, "Read device register %#X size: %d\n", daddr, size);
170
171 pkt->allocate();
172
173 ///
174 /// Handle read of register here
175 ///
176
177 if (daddr < 0x80) {
178 switch (daddr) {
179 case GEN_CHANCOUNT:
180 assert(size == sizeof(regs.chanCount));
181 pkt->set<uint8_t>(regs.chanCount);
182 break;
183 case GEN_XFERCAP:
184 assert(size == sizeof(regs.xferCap));
185 pkt->set<uint8_t>(regs.xferCap);
186 break;
187 case GEN_INTRCTRL:
188 assert(size == sizeof(uint8_t));
189 pkt->set<uint8_t>(regs.intrctrl());
190 regs.intrctrl.master_int_enable(0);
191 break;
192 case GEN_ATTNSTATUS:
193 assert(size == sizeof(regs.attnStatus));
194 pkt->set<uint32_t>(regs.attnStatus);
195 regs.attnStatus = 0;
196 break;
197 default:
198 panic("Read request to unknown register number: %#x\n", daddr);
199 }
200 pkt->makeAtomicResponse();
201 return pioDelay;
202 }
203
204
205 // Find which channel we're accessing
206 int chanid = 0;
207 daddr -= 0x80;
208 while (daddr >= 0x80) {
209 chanid++;
210 daddr -= 0x80;
211 }
212
213 if (chanid >= regs.chanCount)
214 panic("Access to channel %d (device only configured for %d channels)",
215 chanid, regs.chanCount);
216
217 ///
218 /// Channel registers are handled here
219 ///
220 chan[chanid]->channelRead(pkt, daddr, size);
221
222 pkt->makeAtomicResponse();
223 return pioDelay;
224 }
225
226 void
227 CopyEngine::CopyEngineChannel::channelRead(Packet *pkt, Addr daddr, int size)
228 {
229 switch (daddr) {
230 case CHAN_CONTROL:
231 assert(size == sizeof(uint16_t));
232 pkt->set<uint16_t>(cr.ctrl());
233 cr.ctrl.in_use(1);
234 break;
235 case CHAN_STATUS:
236 assert(size == sizeof(uint64_t));
237 pkt->set<uint64_t>(cr.status() | ~busy);
238 break;
239 case CHAN_CHAINADDR:
240 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
241 if (size == sizeof(uint64_t))
242 pkt->set<uint64_t>(cr.descChainAddr);
243 else
244 pkt->set<uint32_t>(bits(cr.descChainAddr,0,31));
245 break;
246 case CHAN_CHAINADDR_HIGH:
247 assert(size == sizeof(uint32_t));
248 pkt->set<uint32_t>(bits(cr.descChainAddr,32,63));
249 break;
250 case CHAN_COMMAND:
251 assert(size == sizeof(uint8_t));
252 pkt->set<uint32_t>(cr.command());
253 break;
254 case CHAN_CMPLNADDR:
255 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
256 if (size == sizeof(uint64_t))
257 pkt->set<uint64_t>(cr.completionAddr);
258 else
259 pkt->set<uint32_t>(bits(cr.completionAddr,0,31));
260 break;
261 case CHAN_CMPLNADDR_HIGH:
262 assert(size == sizeof(uint32_t));
263 pkt->set<uint32_t>(bits(cr.completionAddr,32,63));
264 break;
265 case CHAN_ERROR:
266 assert(size == sizeof(uint32_t));
267 pkt->set<uint32_t>(cr.error());
268 break;
269 default:
270 panic("Read request to unknown channel register number: (%d)%#x\n",
271 channelId, daddr);
272 }
273 }
274
275
276 Tick
277 CopyEngine::write(PacketPtr pkt)
278 {
279 int bar;
280 Addr daddr;
281
282
283 if (!getBAR(pkt->getAddr(), bar, daddr))
284 panic("Invalid PCI memory access to unmapped memory.\n");
285
286 // Only Memory register BAR is allowed
287 assert(bar == 0);
288
289 int size = pkt->getSize();
290
291 ///
292 /// Handle write of register here
293 ///
294
295 if (size == sizeof(uint64_t)) {
296 uint64_t val M5_VAR_USED = pkt->get<uint64_t>();
297 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
298 } else if (size == sizeof(uint32_t)) {
299 uint32_t val M5_VAR_USED = pkt->get<uint32_t>();
300 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
301 } else if (size == sizeof(uint16_t)) {
302 uint16_t val M5_VAR_USED = pkt->get<uint16_t>();
303 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
304 } else if (size == sizeof(uint8_t)) {
305 uint8_t val M5_VAR_USED = pkt->get<uint8_t>();
306 DPRINTF(DMACopyEngine, "Wrote device register %#X value %#X\n", daddr, val);
307 } else {
308 panic("Unknown size for MMIO access: %d\n", size);
309 }
310
311 if (daddr < 0x80) {
312 switch (daddr) {
313 case GEN_CHANCOUNT:
314 case GEN_XFERCAP:
315 case GEN_ATTNSTATUS:
316 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
317 daddr);
318 break;
319 case GEN_INTRCTRL:
320 regs.intrctrl.master_int_enable(bits(pkt->get<uint8_t>(),0,1));
321 break;
322 default:
323 panic("Read request to unknown register number: %#x\n", daddr);
324 }
325 pkt->makeAtomicResponse();
326 return pioDelay;
327 }
328
329 // Find which channel we're accessing
330 int chanid = 0;
331 daddr -= 0x80;
332 while (daddr >= 0x80) {
333 chanid++;
334 daddr -= 0x80;
335 }
336
337 if (chanid >= regs.chanCount)
338 panic("Access to channel %d (device only configured for %d channels)",
339 chanid, regs.chanCount);
340
341 ///
342 /// Channel registers are handled here
343 ///
344 chan[chanid]->channelWrite(pkt, daddr, size);
345
346 pkt->makeAtomicResponse();
347 return pioDelay;
348 }
349
350 void
351 CopyEngine::CopyEngineChannel::channelWrite(Packet *pkt, Addr daddr, int size)
352 {
353 switch (daddr) {
354 case CHAN_CONTROL:
355 assert(size == sizeof(uint16_t));
356 int old_int_disable;
357 old_int_disable = cr.ctrl.interrupt_disable();
358 cr.ctrl(pkt->get<uint16_t>());
359 if (cr.ctrl.interrupt_disable())
360 cr.ctrl.interrupt_disable(0);
361 else
362 cr.ctrl.interrupt_disable(old_int_disable);
363 break;
364 case CHAN_STATUS:
365 assert(size == sizeof(uint64_t));
366 DPRINTF(DMACopyEngine, "Warning, ignorning write to register %x\n",
367 daddr);
368 break;
369 case CHAN_CHAINADDR:
370 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
371 if (size == sizeof(uint64_t))
372 cr.descChainAddr = pkt->get<uint64_t>();
373 else
374 cr.descChainAddr = (uint64_t)pkt->get<uint32_t>() |
375 (cr.descChainAddr & ~mask(32));
376 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
377 break;
378 case CHAN_CHAINADDR_HIGH:
379 assert(size == sizeof(uint32_t));
380 cr.descChainAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
381 (cr.descChainAddr & mask(32));
382 DPRINTF(DMACopyEngine, "Chain Address %x\n", cr.descChainAddr);
383 break;
384 case CHAN_COMMAND:
385 assert(size == sizeof(uint8_t));
386 cr.command(pkt->get<uint8_t>());
387 recvCommand();
388 break;
389 case CHAN_CMPLNADDR:
390 assert(size == sizeof(uint64_t) || size == sizeof(uint32_t));
391 if (size == sizeof(uint64_t))
392 cr.completionAddr = pkt->get<uint64_t>();
393 else
394 cr.completionAddr = pkt->get<uint32_t>() |
395 (cr.completionAddr & ~mask(32));
396 break;
397 case CHAN_CMPLNADDR_HIGH:
398 assert(size == sizeof(uint32_t));
399 cr.completionAddr = ((uint64_t)pkt->get<uint32_t>() <<32) |
400 (cr.completionAddr & mask(32));
401 break;
402 case CHAN_ERROR:
403 assert(size == sizeof(uint32_t));
404 cr.error(~pkt->get<uint32_t>() & cr.error());
405 break;
406 default:
407 panic("Read request to unknown channel register number: (%d)%#x\n",
408 channelId, daddr);
409 }
410 }
411
412 void
413 CopyEngine::regStats()
414 {
415 using namespace Stats;
416 bytesCopied
417 .init(regs.chanCount)
418 .name(name() + ".bytes_copied")
419 .desc("Number of bytes copied by each engine")
420 .flags(total)
421 ;
422 copiesProcessed
423 .init(regs.chanCount)
424 .name(name() + ".copies_processed")
425 .desc("Number of copies processed by each engine")
426 .flags(total)
427 ;
428 }
429
430 void
431 CopyEngine::CopyEngineChannel::fetchDescriptor(Addr address)
432 {
433 anDq();
434 anBegin("FetchDescriptor");
435 DPRINTF(DMACopyEngine, "Reading descriptor from at memory location %#x(%#x)\n",
436 address, ce->platform->pciToDma(address));
437 assert(address);
438 busy = true;
439
440 DPRINTF(DMACopyEngine, "dmaAction: %#x, %d bytes, to addr %#x\n",
441 ce->platform->pciToDma(address), sizeof(DmaDesc), curDmaDesc);
442
443 cePort->dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(address),
444 sizeof(DmaDesc), &fetchCompleteEvent, (uint8_t*)curDmaDesc,
445 latBeforeBegin);
446 lastDescriptorAddr = address;
447 }
448
449 void
450 CopyEngine::CopyEngineChannel::fetchDescComplete()
451 {
452 DPRINTF(DMACopyEngine, "Read of descriptor complete\n");
453
454 if ((curDmaDesc->command & DESC_CTRL_NULL)) {
455 DPRINTF(DMACopyEngine, "Got NULL descriptor, skipping\n");
456 assert(!(curDmaDesc->command & DESC_CTRL_CP_STS));
457 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
458 panic("Shouldn't be able to get here\n");
459 nextState = CompletionWrite;
460 if (inDrain()) return;
461 writeCompletionStatus();
462 } else {
463 anBegin("Idle");
464 anWait();
465 busy = false;
466 nextState = Idle;
467 inDrain();
468 }
469 return;
470 }
471
472 if (curDmaDesc->command & ~DESC_CTRL_CP_STS)
473 panic("Descriptor has flag other that completion status set\n");
474
475 nextState = DMARead;
476 if (inDrain()) return;
477 readCopyBytes();
478 }
479
480 void
481 CopyEngine::CopyEngineChannel::readCopyBytes()
482 {
483 anBegin("ReadCopyBytes");
484 DPRINTF(DMACopyEngine, "Reading %d bytes from buffer to memory location %#x(%#x)\n",
485 curDmaDesc->len, curDmaDesc->dest,
486 ce->platform->pciToDma(curDmaDesc->src));
487 cePort->dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(curDmaDesc->src),
488 curDmaDesc->len, &readCompleteEvent, copyBuffer, 0);
489 }
490
491 void
492 CopyEngine::CopyEngineChannel::readCopyBytesComplete()
493 {
494 DPRINTF(DMACopyEngine, "Read of bytes to copy complete\n");
495
496 nextState = DMAWrite;
497 if (inDrain()) return;
498 writeCopyBytes();
499 }
500
501 void
502 CopyEngine::CopyEngineChannel::writeCopyBytes()
503 {
504 anBegin("WriteCopyBytes");
505 DPRINTF(DMACopyEngine, "Writing %d bytes from buffer to memory location %#x(%#x)\n",
506 curDmaDesc->len, curDmaDesc->dest,
507 ce->platform->pciToDma(curDmaDesc->dest));
508
509 cePort->dmaAction(MemCmd::WriteReq, ce->platform->pciToDma(curDmaDesc->dest),
510 curDmaDesc->len, &writeCompleteEvent, copyBuffer, 0);
511
512 ce->bytesCopied[channelId] += curDmaDesc->len;
513 ce->copiesProcessed[channelId]++;
514 }
515
516 void
517 CopyEngine::CopyEngineChannel::writeCopyBytesComplete()
518 {
519 DPRINTF(DMACopyEngine, "Write of bytes to copy complete user1: %#x\n",
520 curDmaDesc->user1);
521
522 cr.status.compl_desc_addr(lastDescriptorAddr >> 6);
523 completionDataReg = cr.status() | 1;
524
525 anQ("DMAUsedDescQ", channelId, 1);
526 anQ("AppRecvQ", curDmaDesc->user1, curDmaDesc->len);
527 if (curDmaDesc->command & DESC_CTRL_CP_STS) {
528 nextState = CompletionWrite;
529 if (inDrain()) return;
530 writeCompletionStatus();
531 return;
532 }
533
534 continueProcessing();
535 }
536
537 void
538 CopyEngine::CopyEngineChannel::continueProcessing()
539 {
540 busy = false;
541
542 if (underReset) {
543 anBegin("Reset");
544 anWait();
545 underReset = false;
546 refreshNext = false;
547 busy = false;
548 nextState = Idle;
549 return;
550 }
551
552 if (curDmaDesc->next) {
553 nextState = DescriptorFetch;
554 fetchAddress = curDmaDesc->next;
555 if (inDrain()) return;
556 fetchDescriptor(curDmaDesc->next);
557 } else if (refreshNext) {
558 nextState = AddressFetch;
559 refreshNext = false;
560 if (inDrain()) return;
561 fetchNextAddr(lastDescriptorAddr);
562 } else {
563 inDrain();
564 nextState = Idle;
565 anWait();
566 anBegin("Idle");
567 }
568 }
569
570 void
571 CopyEngine::CopyEngineChannel::writeCompletionStatus()
572 {
573 anBegin("WriteCompletionStatus");
574 DPRINTF(DMACopyEngine, "Writing completion status %#x to address %#x(%#x)\n",
575 completionDataReg, cr.completionAddr,
576 ce->platform->pciToDma(cr.completionAddr));
577
578 cePort->dmaAction(MemCmd::WriteReq, ce->platform->pciToDma(cr.completionAddr),
579 sizeof(completionDataReg), &statusCompleteEvent,
580 (uint8_t*)&completionDataReg, latAfterCompletion);
581 }
582
583 void
584 CopyEngine::CopyEngineChannel::writeStatusComplete()
585 {
586 DPRINTF(DMACopyEngine, "Writing completion status complete\n");
587 continueProcessing();
588 }
589
590 void
591 CopyEngine::CopyEngineChannel::fetchNextAddr(Addr address)
592 {
593 anBegin("FetchNextAddr");
594 DPRINTF(DMACopyEngine, "Fetching next address...\n");
595 busy = true;
596 cePort->dmaAction(MemCmd::ReadReq, ce->platform->pciToDma(address +
597 offsetof(DmaDesc, next)), sizeof(Addr), &addrCompleteEvent,
598 (uint8_t*)curDmaDesc + offsetof(DmaDesc, next), 0);
599 }
600
601 void
602 CopyEngine::CopyEngineChannel::fetchAddrComplete()
603 {
604 DPRINTF(DMACopyEngine, "Fetching next address complete: %#x\n",
605 curDmaDesc->next);
606 if (!curDmaDesc->next) {
607 DPRINTF(DMACopyEngine, "Got NULL descriptor, nothing more to do\n");
608 busy = false;
609 nextState = Idle;
610 anWait();
611 anBegin("Idle");
612 inDrain();
613 return;
614 }
615 nextState = DescriptorFetch;
616 fetchAddress = curDmaDesc->next;
617 if (inDrain()) return;
618 fetchDescriptor(curDmaDesc->next);
619 }
620
621 bool
622 CopyEngine::CopyEngineChannel::inDrain()
623 {
624 if (ce->getState() == SimObject::Draining) {
625 DPRINTF(DMACopyEngine, "processing drain\n");
626 assert(drainEvent);
627 drainEvent->process();
628 drainEvent = NULL;
629 }
630
631 return ce->getState() != SimObject::Running;
632 }
633
634 unsigned int
635 CopyEngine::CopyEngineChannel::drain(Event *de)
636 {
637 if (nextState == Idle || ce->getState() != SimObject::Running)
638 return 0;
639 unsigned int count = 1;
640 count += cePort->drain(de);
641
642 DPRINTF(DMACopyEngine, "unable to drain, returning %d\n", count);
643 drainEvent = de;
644 return count;
645 }
646
647 unsigned int
648 CopyEngine::drain(Event *de)
649 {
650 unsigned int count;
651 count = pioPort->drain(de) + dmaPort->drain(de) + configPort->drain(de);
652 for (int x = 0;x < chan.size(); x++)
653 count += chan[x]->drain(de);
654
655 if (count)
656 changeState(Draining);
657 else
658 changeState(Drained);
659
660 DPRINTF(DMACopyEngine, "call to CopyEngine::drain() returning %d\n", count);
661 return count;
662 }
663
664 void
665 CopyEngine::serialize(std::ostream &os)
666 {
667 PciDev::serialize(os);
668 regs.serialize(os);
669 for (int x =0; x < chan.size(); x++) {
670 nameOut(os, csprintf("%s.channel%d", name(), x));
671 chan[x]->serialize(os);
672 }
673 }
674
675 void
676 CopyEngine::unserialize(Checkpoint *cp, const std::string &section)
677 {
678 PciDev::unserialize(cp, section);
679 regs.unserialize(cp, section);
680 for (int x = 0; x < chan.size(); x++)
681 chan[x]->unserialize(cp, csprintf("%s.channel%d", section, x));
682 }
683
684 void
685 CopyEngine::CopyEngineChannel::serialize(std::ostream &os)
686 {
687 SERIALIZE_SCALAR(channelId);
688 SERIALIZE_SCALAR(busy);
689 SERIALIZE_SCALAR(underReset);
690 SERIALIZE_SCALAR(refreshNext);
691 SERIALIZE_SCALAR(lastDescriptorAddr);
692 SERIALIZE_SCALAR(completionDataReg);
693 SERIALIZE_SCALAR(fetchAddress);
694 int nextState = this->nextState;
695 SERIALIZE_SCALAR(nextState);
696 arrayParamOut(os, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
697 SERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
698 cr.serialize(os);
699
700 }
701 void
702 CopyEngine::CopyEngineChannel::unserialize(Checkpoint *cp, const std::string &section)
703 {
704 UNSERIALIZE_SCALAR(channelId);
705 UNSERIALIZE_SCALAR(busy);
706 UNSERIALIZE_SCALAR(underReset);
707 UNSERIALIZE_SCALAR(refreshNext);
708 UNSERIALIZE_SCALAR(lastDescriptorAddr);
709 UNSERIALIZE_SCALAR(completionDataReg);
710 UNSERIALIZE_SCALAR(fetchAddress);
711 int nextState;
712 UNSERIALIZE_SCALAR(nextState);
713 this->nextState = (ChannelState)nextState;
714 arrayParamIn(cp, section, "curDmaDesc", (uint8_t*)curDmaDesc, sizeof(DmaDesc));
715 UNSERIALIZE_ARRAY(copyBuffer, ce->params()->XferCap);
716 cr.unserialize(cp, section);
717
718 }
719
720 void
721 CopyEngine::CopyEngineChannel::restartStateMachine()
722 {
723 switch(nextState) {
724 case AddressFetch:
725 fetchNextAddr(lastDescriptorAddr);
726 break;
727 case DescriptorFetch:
728 fetchDescriptor(fetchAddress);
729 break;
730 case DMARead:
731 readCopyBytes();
732 break;
733 case DMAWrite:
734 writeCopyBytes();
735 break;
736 case CompletionWrite:
737 writeCompletionStatus();
738 break;
739 case Idle:
740 break;
741 default:
742 panic("Unknown state for CopyEngineChannel\n");
743 }
744 }
745
746 void
747 CopyEngine::resume()
748 {
749 SimObject::resume();
750 for (int x = 0;x < chan.size(); x++)
751 chan[x]->resume();
752 }
753
754
755 void
756 CopyEngine::CopyEngineChannel::resume()
757 {
758 DPRINTF(DMACopyEngine, "Restarting state machine at state %d\n", nextState);
759 restartStateMachine();
760 }
761
762 CopyEngine *
763 CopyEngineParams::create()
764 {
765 return new CopyEngine(this);
766 }