dev-arm: Move GICv3 (Re)Ditributor address in Realview.py
[gem5.git] / src / dev / net / sinic.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 */
30
31 #include "dev/net/sinic.hh"
32
33 #include <deque>
34 #include <limits>
35 #include <string>
36
37 #ifdef SINIC_VTOPHYS
38 #include "arch/vtophys.hh"
39
40 #endif
41 #include "base/compiler.hh"
42 #include "base/debug.hh"
43 #include "base/inet.hh"
44 #include "base/types.hh"
45 #include "config/the_isa.hh"
46 #include "debug/EthernetAll.hh"
47 #include "dev/net/etherlink.hh"
48 #include "mem/packet.hh"
49 #include "mem/packet_access.hh"
50 #include "sim/eventq.hh"
51 #include "sim/stats.hh"
52
53 using namespace std;
54 using namespace Net;
55 using namespace TheISA;
56
57 namespace Sinic {
58
59 const char *RxStateStrings[] =
60 {
61 "rxIdle",
62 "rxFifoBlock",
63 "rxBeginCopy",
64 "rxCopy",
65 "rxCopyDone"
66 };
67
68 const char *TxStateStrings[] =
69 {
70 "txIdle",
71 "txFifoBlock",
72 "txBeginCopy",
73 "txCopy",
74 "txCopyDone"
75 };
76
77
78 ///////////////////////////////////////////////////////////////////////
79 //
80 // Sinic PCI Device
81 //
82 Base::Base(const Params *p)
83 : EtherDevBase(p), rxEnable(false), txEnable(false),
84 intrDelay(p->intr_delay), intrTick(0), cpuIntrEnable(false),
85 cpuPendingIntr(false), intrEvent(0), interface(NULL)
86 {
87 }
88
89 Device::Device(const Params *p)
90 : Base(p), rxUnique(0), txUnique(0),
91 virtualRegs(p->virtual_count < 1 ? 1 : p->virtual_count),
92 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size),
93 rxKickTick(0), txKickTick(0),
94 txEvent([this]{ txEventTransmit(); }, name()),
95 rxDmaEvent([this]{ rxDmaDone(); }, name()),
96 txDmaEvent([this]{ txDmaDone(); }, name()),
97 dmaReadDelay(p->dma_read_delay), dmaReadFactor(p->dma_read_factor),
98 dmaWriteDelay(p->dma_write_delay), dmaWriteFactor(p->dma_write_factor)
99 {
100 interface = new Interface(name() + ".int0", this);
101 reset();
102
103 }
104
105 Device::~Device()
106 {}
107
108 void
109 Device::regStats()
110 {
111 Base::regStats();
112
113 _maxVnicDistance = 0;
114
115 maxVnicDistance
116 .name(name() + ".maxVnicDistance")
117 .desc("maximum vnic distance")
118 ;
119
120 totalVnicDistance
121 .name(name() + ".totalVnicDistance")
122 .desc("total vnic distance")
123 ;
124 numVnicDistance
125 .name(name() + ".numVnicDistance")
126 .desc("number of vnic distance measurements")
127 ;
128
129 avgVnicDistance
130 .name(name() + ".avgVnicDistance")
131 .desc("average vnic distance")
132 ;
133
134 avgVnicDistance = totalVnicDistance / numVnicDistance;
135 }
136
137 void
138 Device::resetStats()
139 {
140 Base::resetStats();
141
142 _maxVnicDistance = 0;
143 }
144
145 Port &
146 Device::getPort(const std::string &if_name, PortID idx)
147 {
148 if (if_name == "interface")
149 return *interface;
150 return EtherDevBase::getPort(if_name, idx);
151 }
152
153
154 void
155 Device::prepareIO(ContextID cpu, int index)
156 {
157 int size = virtualRegs.size();
158 if (index > size)
159 panic("Trying to access a vnic that doesn't exist %d > %d\n",
160 index, size);
161 }
162
163 //add stats for head of line blocking
164 //add stats for average fifo length
165 //add stats for average number of vnics busy
166
167 void
168 Device::prepareRead(ContextID cpu, int index)
169 {
170 using namespace Regs;
171 prepareIO(cpu, index);
172
173 VirtualReg &vnic = virtualRegs[index];
174
175 // update rx registers
176 uint64_t rxdone = vnic.RxDone;
177 rxdone = set_RxDone_Packets(rxdone, rxFifo.countPacketsAfter(rxFifoPtr));
178 rxdone = set_RxDone_Empty(rxdone, rxFifo.empty());
179 rxdone = set_RxDone_High(rxdone, rxFifo.size() > regs.RxFifoHigh);
180 rxdone = set_RxDone_NotHigh(rxdone, rxLow);
181 regs.RxData = vnic.RxData;
182 regs.RxDone = rxdone;
183 regs.RxWait = rxdone;
184
185 // update tx regsiters
186 uint64_t txdone = vnic.TxDone;
187 txdone = set_TxDone_Packets(txdone, txFifo.packets());
188 txdone = set_TxDone_Full(txdone, txFifo.avail() < regs.TxMaxCopy);
189 txdone = set_TxDone_Low(txdone, txFifo.size() < regs.TxFifoLow);
190 regs.TxData = vnic.TxData;
191 regs.TxDone = txdone;
192 regs.TxWait = txdone;
193
194 int head = 0xffff;
195
196 if (!rxFifo.empty()) {
197 int vnic = rxFifo.begin()->priv;
198 if (vnic != -1 && virtualRegs[vnic].rxPacketOffset > 0)
199 head = vnic;
200 }
201
202 regs.RxStatus = set_RxStatus_Head(regs.RxStatus, head);
203 regs.RxStatus = set_RxStatus_Busy(regs.RxStatus, rxBusyCount);
204 regs.RxStatus = set_RxStatus_Mapped(regs.RxStatus, rxMappedCount);
205 regs.RxStatus = set_RxStatus_Dirty(regs.RxStatus, rxDirtyCount);
206 }
207
208 void
209 Device::prepareWrite(ContextID cpu, int index)
210 {
211 prepareIO(cpu, index);
212 }
213
214 /**
215 * I/O read of device register
216 */
217 Tick
218 Device::read(PacketPtr pkt)
219 {
220 assert(config.command & PCI_CMD_MSE);
221 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]);
222
223 ContextID cpu = pkt->req->contextId();
224 Addr daddr = pkt->getAddr() - BARAddrs[0];
225 Addr index = daddr >> Regs::VirtualShift;
226 Addr raddr = daddr & Regs::VirtualMask;
227
228 if (!regValid(raddr))
229 panic("invalid register: cpu=%d vnic=%d da=%#x pa=%#x size=%d",
230 cpu, index, daddr, pkt->getAddr(), pkt->getSize());
231
232 const Regs::Info &info = regInfo(raddr);
233 if (!info.read)
234 panic("read %s (write only): "
235 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
236 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
237
238 panic("read %s (invalid size): "
239 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
240 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
241
242 prepareRead(cpu, index);
243
244 uint64_t value M5_VAR_USED = 0;
245 if (pkt->getSize() == 4) {
246 uint32_t reg = regData32(raddr);
247 pkt->setLE(reg);
248 value = reg;
249 }
250
251 if (pkt->getSize() == 8) {
252 uint64_t reg = regData64(raddr);
253 pkt->setLE(reg);
254 value = reg;
255 }
256
257 DPRINTF(EthernetPIO,
258 "read %s: cpu=%d vnic=%d da=%#x pa=%#x size=%d val=%#x\n",
259 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize(), value);
260
261 // reading the interrupt status register has the side effect of
262 // clearing it
263 if (raddr == Regs::IntrStatus)
264 devIntrClear();
265
266 return pioDelay;
267 }
268
269 /**
270 * IPR read of device register
271
272 Fault
273 Device::iprRead(Addr daddr, ContextID cpu, uint64_t &result)
274 {
275 if (!regValid(daddr))
276 panic("invalid address: da=%#x", daddr);
277
278 const Regs::Info &info = regInfo(daddr);
279 if (!info.read)
280 panic("reading %s (write only): cpu=%d da=%#x", info.name, cpu, daddr);
281
282 DPRINTF(EthernetPIO, "IPR read %s: cpu=%d da=%#x\n",
283 info.name, cpu, daddr);
284
285 prepareRead(cpu, 0);
286
287 if (info.size == 4)
288 result = regData32(daddr);
289
290 if (info.size == 8)
291 result = regData64(daddr);
292
293 DPRINTF(EthernetPIO, "IPR read %s: cpu=%s da=%#x val=%#x\n",
294 info.name, cpu, result);
295
296 return NoFault;
297 }
298 */
299 /**
300 * I/O write of device register
301 */
302 Tick
303 Device::write(PacketPtr pkt)
304 {
305 assert(config.command & PCI_CMD_MSE);
306 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]);
307
308 ContextID cpu = pkt->req->contextId();
309 Addr daddr = pkt->getAddr() - BARAddrs[0];
310 Addr index = daddr >> Regs::VirtualShift;
311 Addr raddr = daddr & Regs::VirtualMask;
312
313 if (!regValid(raddr))
314 panic("invalid register: cpu=%d, da=%#x pa=%#x size=%d",
315 cpu, daddr, pkt->getAddr(), pkt->getSize());
316
317 const Regs::Info &info = regInfo(raddr);
318 if (!info.write)
319 panic("write %s (read only): "
320 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
321 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
322
323 if (pkt->getSize() != info.size)
324 panic("write %s (invalid size): "
325 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
326 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
327
328 VirtualReg &vnic = virtualRegs[index];
329
330 DPRINTF(EthernetPIO,
331 "write %s vnic %d: cpu=%d val=%#x da=%#x pa=%#x size=%d\n",
332 info.name, index, cpu, info.size == 4 ?
333 pkt->getLE<uint32_t>() : pkt->getLE<uint64_t>(),
334 daddr, pkt->getAddr(), pkt->getSize());
335
336 prepareWrite(cpu, index);
337
338 switch (raddr) {
339 case Regs::Config:
340 changeConfig(pkt->getLE<uint32_t>());
341 break;
342
343 case Regs::Command:
344 command(pkt->getLE<uint32_t>());
345 break;
346
347 case Regs::IntrStatus:
348 devIntrClear(regs.IntrStatus &
349 pkt->getLE<uint32_t>());
350 break;
351
352 case Regs::IntrMask:
353 devIntrChangeMask(pkt->getLE<uint32_t>());
354 break;
355
356 case Regs::RxData:
357 if (Regs::get_RxDone_Busy(vnic.RxDone))
358 panic("receive machine busy with another request! rxState=%s",
359 RxStateStrings[rxState]);
360
361 vnic.rxUnique = rxUnique++;
362 vnic.RxDone = Regs::RxDone_Busy;
363 vnic.RxData = pkt->getLE<uint64_t>();
364 rxBusyCount++;
365
366 if (Regs::get_RxData_Vaddr(pkt->getLE<uint64_t>())) {
367 panic("vtophys not implemented in newmem");
368 #ifdef SINIC_VTOPHYS
369 Addr vaddr = Regs::get_RxData_Addr(reg64);
370 Addr paddr = vtophys(req->xc, vaddr);
371 DPRINTF(EthernetPIO, "write RxData vnic %d (rxunique %d): "
372 "vaddr=%#x, paddr=%#x\n",
373 index, vnic.rxUnique, vaddr, paddr);
374
375 vnic.RxData = Regs::set_RxData_Addr(vnic.RxData, paddr);
376 #endif
377 } else {
378 DPRINTF(EthernetPIO, "write RxData vnic %d (rxunique %d)\n",
379 index, vnic.rxUnique);
380 }
381
382 if (vnic.rxIndex == rxFifo.end()) {
383 DPRINTF(EthernetPIO, "request new packet...appending to rxList\n");
384 rxList.push_back(index);
385 } else {
386 DPRINTF(EthernetPIO, "packet exists...appending to rxBusy\n");
387 rxBusy.push_back(index);
388 }
389
390 if (rxEnable && (rxState == rxIdle || rxState == rxFifoBlock)) {
391 rxState = rxFifoBlock;
392 rxKick();
393 }
394 break;
395
396 case Regs::TxData:
397 if (Regs::get_TxDone_Busy(vnic.TxDone))
398 panic("transmit machine busy with another request! txState=%s",
399 TxStateStrings[txState]);
400
401 vnic.txUnique = txUnique++;
402 vnic.TxDone = Regs::TxDone_Busy;
403
404 if (Regs::get_TxData_Vaddr(pkt->getLE<uint64_t>())) {
405 panic("vtophys won't work here in newmem.\n");
406 #ifdef SINIC_VTOPHYS
407 Addr vaddr = Regs::get_TxData_Addr(reg64);
408 Addr paddr = vtophys(req->xc, vaddr);
409 DPRINTF(EthernetPIO, "write TxData vnic %d (txunique %d): "
410 "vaddr=%#x, paddr=%#x\n",
411 index, vnic.txUnique, vaddr, paddr);
412
413 vnic.TxData = Regs::set_TxData_Addr(vnic.TxData, paddr);
414 #endif
415 } else {
416 DPRINTF(EthernetPIO, "write TxData vnic %d (txunique %d)\n",
417 index, vnic.txUnique);
418 }
419
420 if (txList.empty() || txList.front() != index)
421 txList.push_back(index);
422 if (txEnable && txState == txIdle && txList.front() == index) {
423 txState = txFifoBlock;
424 txKick();
425 }
426 break;
427 }
428
429 return pioDelay;
430 }
431
432 void
433 Device::devIntrPost(uint32_t interrupts)
434 {
435 if ((interrupts & Regs::Intr_Res))
436 panic("Cannot set a reserved interrupt");
437
438 regs.IntrStatus |= interrupts;
439
440 DPRINTF(EthernetIntr,
441 "interrupt written to intStatus: intr=%#x status=%#x mask=%#x\n",
442 interrupts, regs.IntrStatus, regs.IntrMask);
443
444 interrupts = regs.IntrStatus & regs.IntrMask;
445
446 // Intr_RxHigh is special, we only signal it if we've emptied the fifo
447 // and then filled it above the high watermark
448 if (rxEmpty)
449 rxEmpty = false;
450 else
451 interrupts &= ~Regs::Intr_RxHigh;
452
453 // Intr_TxLow is special, we only signal it if we've filled up the fifo
454 // and then dropped below the low watermark
455 if (txFull)
456 txFull = false;
457 else
458 interrupts &= ~Regs::Intr_TxLow;
459
460 if (interrupts) {
461 Tick when = curTick();
462 if ((interrupts & Regs::Intr_NoDelay) == 0)
463 when += intrDelay;
464 cpuIntrPost(when);
465 }
466 }
467
468 void
469 Device::devIntrClear(uint32_t interrupts)
470 {
471 if ((interrupts & Regs::Intr_Res))
472 panic("Cannot clear a reserved interrupt");
473
474 regs.IntrStatus &= ~interrupts;
475
476 DPRINTF(EthernetIntr,
477 "interrupt cleared from intStatus: intr=%x status=%x mask=%x\n",
478 interrupts, regs.IntrStatus, regs.IntrMask);
479
480 if (!(regs.IntrStatus & regs.IntrMask))
481 cpuIntrClear();
482 }
483
484 void
485 Device::devIntrChangeMask(uint32_t newmask)
486 {
487 if (regs.IntrMask == newmask)
488 return;
489
490 regs.IntrMask = newmask;
491
492 DPRINTF(EthernetIntr,
493 "interrupt mask changed: intStatus=%x intMask=%x masked=%x\n",
494 regs.IntrStatus, regs.IntrMask, regs.IntrStatus & regs.IntrMask);
495
496 if (regs.IntrStatus & regs.IntrMask)
497 cpuIntrPost(curTick());
498 else
499 cpuIntrClear();
500 }
501
502 void
503 Base::cpuIntrPost(Tick when)
504 {
505 // If the interrupt you want to post is later than an interrupt
506 // already scheduled, just let it post in the coming one and don't
507 // schedule another.
508 // HOWEVER, must be sure that the scheduled intrTick is in the
509 // future (this was formerly the source of a bug)
510 /**
511 * @todo this warning should be removed and the intrTick code should
512 * be fixed.
513 */
514 assert(when >= curTick());
515 assert(intrTick >= curTick() || intrTick == 0);
516 if (!cpuIntrEnable) {
517 DPRINTF(EthernetIntr, "interrupts not enabled.\n",
518 intrTick);
519 return;
520 }
521
522 if (when > intrTick && intrTick != 0) {
523 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
524 intrTick);
525 return;
526 }
527
528 intrTick = when;
529 if (intrTick < curTick()) {
530 intrTick = curTick();
531 }
532
533 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
534 intrTick);
535
536 if (intrEvent)
537 intrEvent->squash();
538
539 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
540 name(), true);
541 schedule(intrEvent, intrTick);
542 }
543
544 void
545 Base::cpuInterrupt()
546 {
547 assert(intrTick == curTick());
548
549 // Whether or not there's a pending interrupt, we don't care about
550 // it anymore
551 intrEvent = 0;
552 intrTick = 0;
553
554 // Don't send an interrupt if there's already one
555 if (cpuPendingIntr) {
556 DPRINTF(EthernetIntr,
557 "would send an interrupt now, but there's already pending\n");
558 } else {
559 // Send interrupt
560 cpuPendingIntr = true;
561
562 DPRINTF(EthernetIntr, "posting interrupt\n");
563 intrPost();
564 }
565 }
566
567 void
568 Base::cpuIntrClear()
569 {
570 if (!cpuPendingIntr)
571 return;
572
573 if (intrEvent) {
574 intrEvent->squash();
575 intrEvent = 0;
576 }
577
578 intrTick = 0;
579
580 cpuPendingIntr = false;
581
582 DPRINTF(EthernetIntr, "clearing cchip interrupt\n");
583 intrClear();
584 }
585
586 bool
587 Base::cpuIntrPending() const
588 { return cpuPendingIntr; }
589
590 void
591 Device::changeConfig(uint32_t newconf)
592 {
593 uint32_t changed = regs.Config ^ newconf;
594 if (!changed)
595 return;
596
597 regs.Config = newconf;
598
599 if ((changed & Regs::Config_IntEn)) {
600 cpuIntrEnable = regs.Config & Regs::Config_IntEn;
601 if (cpuIntrEnable) {
602 if (regs.IntrStatus & regs.IntrMask)
603 cpuIntrPost(curTick());
604 } else {
605 cpuIntrClear();
606 }
607 }
608
609 if ((changed & Regs::Config_TxEn)) {
610 txEnable = regs.Config & Regs::Config_TxEn;
611 if (txEnable)
612 txKick();
613 }
614
615 if ((changed & Regs::Config_RxEn)) {
616 rxEnable = regs.Config & Regs::Config_RxEn;
617 if (rxEnable)
618 rxKick();
619 }
620 }
621
622 void
623 Device::command(uint32_t command)
624 {
625 if (command & Regs::Command_Intr)
626 devIntrPost(Regs::Intr_Soft);
627
628 if (command & Regs::Command_Reset)
629 reset();
630 }
631
632 void
633 Device::reset()
634 {
635 using namespace Regs;
636
637 memset(&regs, 0, sizeof(regs));
638
639 regs.Config = 0;
640 if (params()->rx_thread)
641 regs.Config |= Config_RxThread;
642 if (params()->tx_thread)
643 regs.Config |= Config_TxThread;
644 if (params()->rss)
645 regs.Config |= Config_RSS;
646 if (params()->zero_copy)
647 regs.Config |= Config_ZeroCopy;
648 if (params()->delay_copy)
649 regs.Config |= Config_DelayCopy;
650 if (params()->virtual_addr)
651 regs.Config |= Config_Vaddr;
652
653 if (params()->delay_copy && params()->zero_copy)
654 panic("Can't delay copy and zero copy");
655
656 regs.IntrMask = Intr_Soft | Intr_RxHigh | Intr_RxPacket | Intr_TxLow;
657 regs.RxMaxCopy = params()->rx_max_copy;
658 regs.TxMaxCopy = params()->tx_max_copy;
659 regs.ZeroCopySize = params()->zero_copy_size;
660 regs.ZeroCopyMark = params()->zero_copy_threshold;
661 regs.VirtualCount = params()->virtual_count;
662 regs.RxMaxIntr = params()->rx_max_intr;
663 regs.RxFifoSize = params()->rx_fifo_size;
664 regs.TxFifoSize = params()->tx_fifo_size;
665 regs.RxFifoLow = params()->rx_fifo_low_mark;
666 regs.TxFifoLow = params()->tx_fifo_threshold;
667 regs.RxFifoHigh = params()->rx_fifo_threshold;
668 regs.TxFifoHigh = params()->tx_fifo_high_mark;
669 regs.HwAddr = params()->hardware_address;
670
671 if (regs.RxMaxCopy < regs.ZeroCopyMark)
672 panic("Must be able to copy at least as many bytes as the threshold");
673
674 if (regs.ZeroCopySize >= regs.ZeroCopyMark)
675 panic("The number of bytes to copy must be less than the threshold");
676
677 rxList.clear();
678 rxBusy.clear();
679 rxActive = -1;
680 txList.clear();
681 rxBusyCount = 0;
682 rxDirtyCount = 0;
683 rxMappedCount = 0;
684
685 rxState = rxIdle;
686 txState = txIdle;
687
688 rxFifo.clear();
689 rxFifoPtr = rxFifo.end();
690 txFifo.clear();
691 rxEmpty = false;
692 rxLow = true;
693 txFull = false;
694
695 int size = virtualRegs.size();
696 virtualRegs.clear();
697 virtualRegs.resize(size);
698 for (int i = 0; i < size; ++i)
699 virtualRegs[i].rxIndex = rxFifo.end();
700 }
701
702 void
703 Device::rxDmaDone()
704 {
705 assert(rxState == rxCopy);
706 rxState = rxCopyDone;
707 DPRINTF(EthernetDMA, "end rx dma write paddr=%#x len=%d\n",
708 rxDmaAddr, rxDmaLen);
709 DDUMP(EthernetData, rxDmaData, rxDmaLen);
710
711 // If the transmit state machine has a pending DMA, let it go first
712 if (txState == txBeginCopy)
713 txKick();
714
715 rxKick();
716 }
717
718 void
719 Device::rxKick()
720 {
721 VirtualReg *vnic = NULL;
722
723 DPRINTF(EthernetSM, "rxKick: rxState=%s (rxFifo.size=%d)\n",
724 RxStateStrings[rxState], rxFifo.size());
725
726 if (rxKickTick > curTick()) {
727 DPRINTF(EthernetSM, "rxKick: exiting, can't run till %d\n",
728 rxKickTick);
729 return;
730 }
731
732 next:
733 rxFifo.check();
734 if (rxState == rxIdle)
735 goto exit;
736
737 if (rxActive == -1) {
738 if (rxState != rxFifoBlock)
739 panic("no active vnic while in state %s", RxStateStrings[rxState]);
740
741 DPRINTF(EthernetSM, "processing rxState=%s\n",
742 RxStateStrings[rxState]);
743 } else {
744 vnic = &virtualRegs[rxActive];
745 DPRINTF(EthernetSM,
746 "processing rxState=%s for vnic %d (rxunique %d)\n",
747 RxStateStrings[rxState], rxActive, vnic->rxUnique);
748 }
749
750 switch (rxState) {
751 case rxFifoBlock:
752 if (DTRACE(EthernetSM)) {
753 PacketFifo::iterator end = rxFifo.end();
754 int size = virtualRegs.size();
755 for (int i = 0; i < size; ++i) {
756 VirtualReg *vn = &virtualRegs[i];
757 bool busy = Regs::get_RxDone_Busy(vn->RxDone);
758 if (vn->rxIndex != end) {
759 #ifndef NDEBUG
760 bool dirty = vn->rxPacketOffset > 0;
761 const char *status;
762
763 if (busy && dirty)
764 status = "busy,dirty";
765 else if (busy)
766 status = "busy";
767 else if (dirty)
768 status = "dirty";
769 else
770 status = "mapped";
771
772 DPRINTF(EthernetSM,
773 "vnic %d %s (rxunique %d), packet %d, slack %d\n",
774 i, status, vn->rxUnique,
775 rxFifo.countPacketsBefore(vn->rxIndex),
776 vn->rxIndex->slack);
777 #endif
778 } else if (busy) {
779 DPRINTF(EthernetSM, "vnic %d unmapped (rxunique %d)\n",
780 i, vn->rxUnique);
781 }
782 }
783 }
784
785 if (!rxBusy.empty()) {
786 rxActive = rxBusy.front();
787 rxBusy.pop_front();
788 vnic = &virtualRegs[rxActive];
789
790 if (vnic->rxIndex == rxFifo.end())
791 panic("continuing vnic without packet\n");
792
793 DPRINTF(EthernetSM,
794 "continue processing for vnic %d (rxunique %d)\n",
795 rxActive, vnic->rxUnique);
796
797 rxState = rxBeginCopy;
798
799 int vnic_distance = rxFifo.countPacketsBefore(vnic->rxIndex);
800 totalVnicDistance += vnic_distance;
801 numVnicDistance += 1;
802 if (vnic_distance > _maxVnicDistance) {
803 maxVnicDistance = vnic_distance;
804 _maxVnicDistance = vnic_distance;
805 }
806
807 break;
808 }
809
810 if (rxFifoPtr == rxFifo.end()) {
811 DPRINTF(EthernetSM, "receive waiting for data. Nothing to do.\n");
812 goto exit;
813 }
814
815 if (rxList.empty())
816 panic("Not idle, but nothing to do!");
817
818 assert(!rxFifo.empty());
819
820 rxActive = rxList.front();
821 rxList.pop_front();
822 vnic = &virtualRegs[rxActive];
823
824 DPRINTF(EthernetSM,
825 "processing new packet for vnic %d (rxunique %d)\n",
826 rxActive, vnic->rxUnique);
827
828 // Grab a new packet from the fifo.
829 vnic->rxIndex = rxFifoPtr++;
830 vnic->rxIndex->priv = rxActive;
831 vnic->rxPacketOffset = 0;
832 vnic->rxPacketBytes = vnic->rxIndex->packet->length;
833 assert(vnic->rxPacketBytes);
834 rxMappedCount++;
835
836 vnic->rxDoneData = 0;
837 /* scope for variables */ {
838 IpPtr ip(vnic->rxIndex->packet);
839 if (ip) {
840 DPRINTF(Ethernet, "ID is %d\n", ip->id());
841 vnic->rxDoneData |= Regs::RxDone_IpPacket;
842 rxIpChecksums++;
843 if (cksum(ip) != 0) {
844 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
845 vnic->rxDoneData |= Regs::RxDone_IpError;
846 }
847 TcpPtr tcp(ip);
848 UdpPtr udp(ip);
849 if (tcp) {
850 DPRINTF(Ethernet,
851 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
852 tcp->sport(), tcp->dport(), tcp->seq(),
853 tcp->ack());
854 vnic->rxDoneData |= Regs::RxDone_TcpPacket;
855 rxTcpChecksums++;
856 if (cksum(tcp) != 0) {
857 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
858 vnic->rxDoneData |= Regs::RxDone_TcpError;
859 }
860 } else if (udp) {
861 vnic->rxDoneData |= Regs::RxDone_UdpPacket;
862 rxUdpChecksums++;
863 if (cksum(udp) != 0) {
864 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
865 vnic->rxDoneData |= Regs::RxDone_UdpError;
866 }
867 }
868 }
869 }
870 rxState = rxBeginCopy;
871 break;
872
873 case rxBeginCopy:
874 if (dmaPending() || drainState() != DrainState::Running)
875 goto exit;
876
877 rxDmaAddr = pciToDma(Regs::get_RxData_Addr(vnic->RxData));
878 rxDmaLen = min<unsigned>(Regs::get_RxData_Len(vnic->RxData),
879 vnic->rxPacketBytes);
880
881 /*
882 * if we're doing zero/delay copy and we're below the fifo
883 * threshold, see if we should try to do the zero/defer copy
884 */
885 if ((Regs::get_Config_ZeroCopy(regs.Config) ||
886 Regs::get_Config_DelayCopy(regs.Config)) &&
887 !Regs::get_RxData_NoDelay(vnic->RxData) && rxLow) {
888 if (rxDmaLen > regs.ZeroCopyMark)
889 rxDmaLen = regs.ZeroCopySize;
890 }
891 rxDmaData = vnic->rxIndex->packet->data + vnic->rxPacketOffset;
892 rxState = rxCopy;
893 if (rxDmaAddr == 1LL) {
894 rxState = rxCopyDone;
895 break;
896 }
897
898 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaEvent, rxDmaData);
899 break;
900
901 case rxCopy:
902 DPRINTF(EthernetSM, "receive machine still copying\n");
903 goto exit;
904
905 case rxCopyDone:
906 vnic->RxDone = vnic->rxDoneData;
907 vnic->RxDone |= Regs::RxDone_Complete;
908 rxBusyCount--;
909
910 if (vnic->rxPacketBytes == rxDmaLen) {
911 if (vnic->rxPacketOffset)
912 rxDirtyCount--;
913
914 // Packet is complete. Indicate how many bytes were copied
915 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone, rxDmaLen);
916
917 DPRINTF(EthernetSM,
918 "rxKick: packet complete on vnic %d (rxunique %d)\n",
919 rxActive, vnic->rxUnique);
920 rxFifo.remove(vnic->rxIndex);
921 vnic->rxIndex = rxFifo.end();
922 rxMappedCount--;
923 } else {
924 if (!vnic->rxPacketOffset)
925 rxDirtyCount++;
926
927 vnic->rxPacketBytes -= rxDmaLen;
928 vnic->rxPacketOffset += rxDmaLen;
929 vnic->RxDone |= Regs::RxDone_More;
930 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone,
931 vnic->rxPacketBytes);
932 DPRINTF(EthernetSM,
933 "rxKick: packet not complete on vnic %d (rxunique %d): "
934 "%d bytes left\n",
935 rxActive, vnic->rxUnique, vnic->rxPacketBytes);
936 }
937
938 rxActive = -1;
939 rxState = rxBusy.empty() && rxList.empty() ? rxIdle : rxFifoBlock;
940
941 if (rxFifo.empty()) {
942 devIntrPost(Regs::Intr_RxEmpty);
943 rxEmpty = true;
944 }
945
946 if (rxFifo.size() < regs.RxFifoLow)
947 rxLow = true;
948
949 if (rxFifo.size() > regs.RxFifoHigh)
950 rxLow = false;
951
952 devIntrPost(Regs::Intr_RxDMA);
953 break;
954
955 default:
956 panic("Invalid rxState!");
957 }
958
959 DPRINTF(EthernetSM, "entering next rxState=%s\n",
960 RxStateStrings[rxState]);
961
962 goto next;
963
964 exit:
965 /**
966 * @todo do we want to schedule a future kick?
967 */
968 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
969 RxStateStrings[rxState]);
970 }
971
972 void
973 Device::txDmaDone()
974 {
975 assert(txState == txCopy);
976 txState = txCopyDone;
977 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
978 txDmaAddr, txDmaLen);
979 DDUMP(EthernetData, txDmaData, txDmaLen);
980
981 // If the receive state machine has a pending DMA, let it go first
982 if (rxState == rxBeginCopy)
983 rxKick();
984
985 txKick();
986 }
987
988 void
989 Device::transmit()
990 {
991 if (txFifo.empty()) {
992 DPRINTF(Ethernet, "nothing to transmit\n");
993 return;
994 }
995
996 uint32_t interrupts;
997 EthPacketPtr packet = txFifo.front();
998 if (!interface->sendPacket(packet)) {
999 DPRINTF(Ethernet, "Packet Transmit: failed txFifo available %d\n",
1000 txFifo.avail());
1001 return;
1002 }
1003
1004 txFifo.pop();
1005 #if TRACING_ON
1006 if (DTRACE(Ethernet)) {
1007 IpPtr ip(packet);
1008 if (ip) {
1009 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1010 TcpPtr tcp(ip);
1011 if (tcp) {
1012 DPRINTF(Ethernet,
1013 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1014 tcp->sport(), tcp->dport(), tcp->seq(),
1015 tcp->ack());
1016 }
1017 }
1018 }
1019 #endif
1020
1021 DDUMP(EthernetData, packet->data, packet->length);
1022 txBytes += packet->length;
1023 txPackets++;
1024
1025 DPRINTF(Ethernet, "Packet Transmit: successful txFifo Available %d\n",
1026 txFifo.avail());
1027
1028 interrupts = Regs::Intr_TxPacket;
1029 if (txFifo.size() < regs.TxFifoLow)
1030 interrupts |= Regs::Intr_TxLow;
1031 devIntrPost(interrupts);
1032 }
1033
1034 void
1035 Device::txKick()
1036 {
1037 VirtualReg *vnic;
1038 DPRINTF(EthernetSM, "txKick: txState=%s (txFifo.size=%d)\n",
1039 TxStateStrings[txState], txFifo.size());
1040
1041 if (txKickTick > curTick()) {
1042 DPRINTF(EthernetSM, "txKick: exiting, can't run till %d\n",
1043 txKickTick);
1044 return;
1045 }
1046
1047 next:
1048 if (txState == txIdle)
1049 goto exit;
1050
1051 assert(!txList.empty());
1052 vnic = &virtualRegs[txList.front()];
1053
1054 switch (txState) {
1055 case txFifoBlock:
1056 assert(Regs::get_TxDone_Busy(vnic->TxDone));
1057 if (!txPacket) {
1058 // Grab a new packet from the fifo.
1059 txPacket = make_shared<EthPacketData>(16384);
1060 txPacketOffset = 0;
1061 }
1062
1063 if (txFifo.avail() - txPacket->length <
1064 Regs::get_TxData_Len(vnic->TxData)) {
1065 DPRINTF(EthernetSM, "transmit fifo full. Nothing to do.\n");
1066 goto exit;
1067 }
1068
1069 txState = txBeginCopy;
1070 break;
1071
1072 case txBeginCopy:
1073 if (dmaPending() || drainState() != DrainState::Running)
1074 goto exit;
1075
1076 txDmaAddr = pciToDma(Regs::get_TxData_Addr(vnic->TxData));
1077 txDmaLen = Regs::get_TxData_Len(vnic->TxData);
1078 txDmaData = txPacket->data + txPacketOffset;
1079 txState = txCopy;
1080
1081 dmaRead(txDmaAddr, txDmaLen, &txDmaEvent, txDmaData);
1082 break;
1083
1084 case txCopy:
1085 DPRINTF(EthernetSM, "transmit machine still copying\n");
1086 goto exit;
1087
1088 case txCopyDone:
1089 vnic->TxDone = txDmaLen | Regs::TxDone_Complete;
1090 txPacket->simLength += txDmaLen;
1091 txPacket->length += txDmaLen;
1092 if ((vnic->TxData & Regs::TxData_More)) {
1093 txPacketOffset += txDmaLen;
1094 txState = txIdle;
1095 devIntrPost(Regs::Intr_TxDMA);
1096 break;
1097 }
1098
1099 assert(txPacket->length <= txFifo.avail());
1100 if ((vnic->TxData & Regs::TxData_Checksum)) {
1101 IpPtr ip(txPacket);
1102 if (ip) {
1103 TcpPtr tcp(ip);
1104 if (tcp) {
1105 tcp->sum(0);
1106 tcp->sum(cksum(tcp));
1107 txTcpChecksums++;
1108 }
1109
1110 UdpPtr udp(ip);
1111 if (udp) {
1112 udp->sum(0);
1113 udp->sum(cksum(udp));
1114 txUdpChecksums++;
1115 }
1116
1117 ip->sum(0);
1118 ip->sum(cksum(ip));
1119 txIpChecksums++;
1120 }
1121 }
1122
1123 txFifo.push(txPacket);
1124 if (txFifo.avail() < regs.TxMaxCopy) {
1125 devIntrPost(Regs::Intr_TxFull);
1126 txFull = true;
1127 }
1128 txPacket = 0;
1129 transmit();
1130 txList.pop_front();
1131 txState = txList.empty() ? txIdle : txFifoBlock;
1132 devIntrPost(Regs::Intr_TxDMA);
1133 break;
1134
1135 default:
1136 panic("Invalid txState!");
1137 }
1138
1139 DPRINTF(EthernetSM, "entering next txState=%s\n",
1140 TxStateStrings[txState]);
1141
1142 goto next;
1143
1144 exit:
1145 /**
1146 * @todo do we want to schedule a future kick?
1147 */
1148 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1149 TxStateStrings[txState]);
1150 }
1151
1152 void
1153 Device::transferDone()
1154 {
1155 if (txFifo.empty()) {
1156 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1157 return;
1158 }
1159
1160 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
1161
1162 reschedule(txEvent, clockEdge(Cycles(1)), true);
1163 }
1164
1165 bool
1166 Device::rxFilter(const EthPacketPtr &packet)
1167 {
1168 if (!Regs::get_Config_Filter(regs.Config))
1169 return false;
1170
1171 panic("receive filter not implemented\n");
1172 bool drop = true;
1173
1174 #if 0
1175 string type;
1176
1177 EthHdr *eth = packet->eth();
1178 if (eth->unicast()) {
1179 // If we're accepting all unicast addresses
1180 if (acceptUnicast)
1181 drop = false;
1182
1183 // If we make a perfect match
1184 if (acceptPerfect && params->eaddr == eth.dst())
1185 drop = false;
1186
1187 if (acceptArp && eth->type() == ETH_TYPE_ARP)
1188 drop = false;
1189
1190 } else if (eth->broadcast()) {
1191 // if we're accepting broadcasts
1192 if (acceptBroadcast)
1193 drop = false;
1194
1195 } else if (eth->multicast()) {
1196 // if we're accepting all multicasts
1197 if (acceptMulticast)
1198 drop = false;
1199
1200 }
1201
1202 if (drop) {
1203 DPRINTF(Ethernet, "rxFilter drop\n");
1204 DDUMP(EthernetData, packet->data, packet->length);
1205 }
1206 #endif
1207 return drop;
1208 }
1209
1210 bool
1211 Device::recvPacket(EthPacketPtr packet)
1212 {
1213 rxBytes += packet->length;
1214 rxPackets++;
1215
1216 DPRINTF(Ethernet, "Receiving packet from wire, rxFifo Available is %d\n",
1217 rxFifo.avail());
1218
1219 if (!rxEnable) {
1220 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1221 return true;
1222 }
1223
1224 if (rxFilter(packet)) {
1225 DPRINTF(Ethernet, "packet filtered...dropped\n");
1226 return true;
1227 }
1228
1229 if (rxFifo.size() >= regs.RxFifoHigh)
1230 devIntrPost(Regs::Intr_RxHigh);
1231
1232 if (!rxFifo.push(packet)) {
1233 DPRINTF(Ethernet,
1234 "packet will not fit in receive buffer...packet dropped\n");
1235 return false;
1236 }
1237
1238 // If we were at the last element, back up one ot go to the new
1239 // last element of the list.
1240 if (rxFifoPtr == rxFifo.end())
1241 --rxFifoPtr;
1242
1243 devIntrPost(Regs::Intr_RxPacket);
1244 rxKick();
1245 return true;
1246 }
1247
1248 void
1249 Device::drainResume()
1250 {
1251 Drainable::drainResume();
1252
1253 // During drain we could have left the state machines in a waiting state and
1254 // they wouldn't get out until some other event occured to kick them.
1255 // This way they'll get out immediately
1256 txKick();
1257 rxKick();
1258 }
1259
1260 //=====================================================================
1261 //
1262 //
1263 void
1264 Base::serialize(CheckpointOut &cp) const
1265 {
1266 // Serialize the PciDevice base class
1267 PciDevice::serialize(cp);
1268
1269 SERIALIZE_SCALAR(rxEnable);
1270 SERIALIZE_SCALAR(txEnable);
1271 SERIALIZE_SCALAR(cpuIntrEnable);
1272
1273 /*
1274 * Keep track of pending interrupt status.
1275 */
1276 SERIALIZE_SCALAR(intrTick);
1277 SERIALIZE_SCALAR(cpuPendingIntr);
1278 Tick intrEventTick = 0;
1279 if (intrEvent)
1280 intrEventTick = intrEvent->when();
1281 SERIALIZE_SCALAR(intrEventTick);
1282 }
1283
1284 void
1285 Base::unserialize(CheckpointIn &cp)
1286 {
1287 // Unserialize the PciDevice base class
1288 PciDevice::unserialize(cp);
1289
1290 UNSERIALIZE_SCALAR(rxEnable);
1291 UNSERIALIZE_SCALAR(txEnable);
1292 UNSERIALIZE_SCALAR(cpuIntrEnable);
1293
1294 /*
1295 * Keep track of pending interrupt status.
1296 */
1297 UNSERIALIZE_SCALAR(intrTick);
1298 UNSERIALIZE_SCALAR(cpuPendingIntr);
1299 Tick intrEventTick;
1300 UNSERIALIZE_SCALAR(intrEventTick);
1301 if (intrEventTick) {
1302 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
1303 name(), true);
1304 schedule(intrEvent, intrEventTick);
1305 }
1306 }
1307
1308 void
1309 Device::serialize(CheckpointOut &cp) const
1310 {
1311 int count;
1312
1313 // Serialize the PciDevice base class
1314 Base::serialize(cp);
1315
1316 if (rxState == rxCopy)
1317 panic("can't serialize with an in flight dma request rxState=%s",
1318 RxStateStrings[rxState]);
1319
1320 if (txState == txCopy)
1321 panic("can't serialize with an in flight dma request txState=%s",
1322 TxStateStrings[txState]);
1323
1324 /*
1325 * Serialize the device registers that could be modified by the OS.
1326 */
1327 SERIALIZE_SCALAR(regs.Config);
1328 SERIALIZE_SCALAR(regs.IntrStatus);
1329 SERIALIZE_SCALAR(regs.IntrMask);
1330 SERIALIZE_SCALAR(regs.RxData);
1331 SERIALIZE_SCALAR(regs.TxData);
1332
1333 /*
1334 * Serialize the virtual nic state
1335 */
1336 int virtualRegsSize = virtualRegs.size();
1337 SERIALIZE_SCALAR(virtualRegsSize);
1338 for (int i = 0; i < virtualRegsSize; ++i) {
1339 const VirtualReg *vnic = &virtualRegs[i];
1340
1341 std::string reg = csprintf("vnic%d", i);
1342 paramOut(cp, reg + ".RxData", vnic->RxData);
1343 paramOut(cp, reg + ".RxDone", vnic->RxDone);
1344 paramOut(cp, reg + ".TxData", vnic->TxData);
1345 paramOut(cp, reg + ".TxDone", vnic->TxDone);
1346
1347 bool rxPacketExists = vnic->rxIndex != rxFifo.end();
1348 paramOut(cp, reg + ".rxPacketExists", rxPacketExists);
1349 if (rxPacketExists) {
1350 int rxPacket = 0;
1351 auto i = rxFifo.begin();
1352 while (i != vnic->rxIndex) {
1353 assert(i != rxFifo.end());
1354 ++i;
1355 ++rxPacket;
1356 }
1357
1358 paramOut(cp, reg + ".rxPacket", rxPacket);
1359 paramOut(cp, reg + ".rxPacketOffset", vnic->rxPacketOffset);
1360 paramOut(cp, reg + ".rxPacketBytes", vnic->rxPacketBytes);
1361 }
1362 paramOut(cp, reg + ".rxDoneData", vnic->rxDoneData);
1363 }
1364
1365 int rxFifoPtr = -1;
1366 if (this->rxFifoPtr != rxFifo.end())
1367 rxFifoPtr = rxFifo.countPacketsBefore(this->rxFifoPtr);
1368 SERIALIZE_SCALAR(rxFifoPtr);
1369
1370 SERIALIZE_SCALAR(rxActive);
1371 SERIALIZE_SCALAR(rxBusyCount);
1372 SERIALIZE_SCALAR(rxDirtyCount);
1373 SERIALIZE_SCALAR(rxMappedCount);
1374
1375 VirtualList::const_iterator i, end;
1376 for (count = 0, i = rxList.begin(), end = rxList.end(); i != end; ++i)
1377 paramOut(cp, csprintf("rxList%d", count++), *i);
1378 int rxListSize = count;
1379 SERIALIZE_SCALAR(rxListSize);
1380
1381 for (count = 0, i = rxBusy.begin(), end = rxBusy.end(); i != end; ++i)
1382 paramOut(cp, csprintf("rxBusy%d", count++), *i);
1383 int rxBusySize = count;
1384 SERIALIZE_SCALAR(rxBusySize);
1385
1386 for (count = 0, i = txList.begin(), end = txList.end(); i != end; ++i)
1387 paramOut(cp, csprintf("txList%d", count++), *i);
1388 int txListSize = count;
1389 SERIALIZE_SCALAR(txListSize);
1390
1391 /*
1392 * Serialize rx state machine
1393 */
1394 int rxState = this->rxState;
1395 SERIALIZE_SCALAR(rxState);
1396 SERIALIZE_SCALAR(rxEmpty);
1397 SERIALIZE_SCALAR(rxLow);
1398 rxFifo.serialize("rxFifo", cp);
1399
1400 /*
1401 * Serialize tx state machine
1402 */
1403 int txState = this->txState;
1404 SERIALIZE_SCALAR(txState);
1405 SERIALIZE_SCALAR(txFull);
1406 txFifo.serialize("txFifo", cp);
1407 bool txPacketExists = txPacket != nullptr;
1408 SERIALIZE_SCALAR(txPacketExists);
1409 if (txPacketExists) {
1410 txPacket->serialize("txPacket", cp);
1411 SERIALIZE_SCALAR(txPacketOffset);
1412 SERIALIZE_SCALAR(txPacketBytes);
1413 }
1414
1415 /*
1416 * If there's a pending transmit, store the time so we can
1417 * reschedule it later
1418 */
1419 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
1420 SERIALIZE_SCALAR(transmitTick);
1421 }
1422
1423 void
1424 Device::unserialize(CheckpointIn &cp)
1425 {
1426 // Unserialize the PciDevice base class
1427 Base::unserialize(cp);
1428
1429 /*
1430 * Unserialize the device registers that may have been written by the OS.
1431 */
1432 UNSERIALIZE_SCALAR(regs.Config);
1433 UNSERIALIZE_SCALAR(regs.IntrStatus);
1434 UNSERIALIZE_SCALAR(regs.IntrMask);
1435 UNSERIALIZE_SCALAR(regs.RxData);
1436 UNSERIALIZE_SCALAR(regs.TxData);
1437
1438 UNSERIALIZE_SCALAR(rxActive);
1439 UNSERIALIZE_SCALAR(rxBusyCount);
1440 UNSERIALIZE_SCALAR(rxDirtyCount);
1441 UNSERIALIZE_SCALAR(rxMappedCount);
1442
1443 int rxListSize;
1444 UNSERIALIZE_SCALAR(rxListSize);
1445 rxList.clear();
1446 for (int i = 0; i < rxListSize; ++i) {
1447 int value;
1448 paramIn(cp, csprintf("rxList%d", i), value);
1449 rxList.push_back(value);
1450 }
1451
1452 int rxBusySize;
1453 UNSERIALIZE_SCALAR(rxBusySize);
1454 rxBusy.clear();
1455 for (int i = 0; i < rxBusySize; ++i) {
1456 int value;
1457 paramIn(cp, csprintf("rxBusy%d", i), value);
1458 rxBusy.push_back(value);
1459 }
1460
1461 int txListSize;
1462 UNSERIALIZE_SCALAR(txListSize);
1463 txList.clear();
1464 for (int i = 0; i < txListSize; ++i) {
1465 int value;
1466 paramIn(cp, csprintf("txList%d", i), value);
1467 txList.push_back(value);
1468 }
1469
1470 /*
1471 * Unserialize rx state machine
1472 */
1473 int rxState;
1474 UNSERIALIZE_SCALAR(rxState);
1475 UNSERIALIZE_SCALAR(rxEmpty);
1476 UNSERIALIZE_SCALAR(rxLow);
1477 this->rxState = (RxState) rxState;
1478 rxFifo.unserialize("rxFifo", cp);
1479
1480 int rxFifoPtr;
1481 UNSERIALIZE_SCALAR(rxFifoPtr);
1482 if (rxFifoPtr >= 0) {
1483 this->rxFifoPtr = rxFifo.begin();
1484 for (int i = 0; i < rxFifoPtr; ++i)
1485 ++this->rxFifoPtr;
1486 } else {
1487 this->rxFifoPtr = rxFifo.end();
1488 }
1489
1490 /*
1491 * Unserialize tx state machine
1492 */
1493 int txState;
1494 UNSERIALIZE_SCALAR(txState);
1495 UNSERIALIZE_SCALAR(txFull);
1496 this->txState = (TxState) txState;
1497 txFifo.unserialize("txFifo", cp);
1498 bool txPacketExists;
1499 UNSERIALIZE_SCALAR(txPacketExists);
1500 txPacket = 0;
1501 if (txPacketExists) {
1502 txPacket = make_shared<EthPacketData>(16384);
1503 txPacket->unserialize("txPacket", cp);
1504 UNSERIALIZE_SCALAR(txPacketOffset);
1505 UNSERIALIZE_SCALAR(txPacketBytes);
1506 }
1507
1508 /*
1509 * unserialize the virtual nic registers/state
1510 *
1511 * this must be done after the unserialization of the rxFifo
1512 * because the packet iterators depend on the fifo being populated
1513 */
1514 int virtualRegsSize;
1515 UNSERIALIZE_SCALAR(virtualRegsSize);
1516 virtualRegs.clear();
1517 virtualRegs.resize(virtualRegsSize);
1518 for (int i = 0; i < virtualRegsSize; ++i) {
1519 VirtualReg *vnic = &virtualRegs[i];
1520 std::string reg = csprintf("vnic%d", i);
1521
1522 paramIn(cp, reg + ".RxData", vnic->RxData);
1523 paramIn(cp, reg + ".RxDone", vnic->RxDone);
1524 paramIn(cp, reg + ".TxData", vnic->TxData);
1525 paramIn(cp, reg + ".TxDone", vnic->TxDone);
1526
1527 vnic->rxUnique = rxUnique++;
1528 vnic->txUnique = txUnique++;
1529
1530 bool rxPacketExists;
1531 paramIn(cp, reg + ".rxPacketExists", rxPacketExists);
1532 if (rxPacketExists) {
1533 int rxPacket;
1534 paramIn(cp, reg + ".rxPacket", rxPacket);
1535 vnic->rxIndex = rxFifo.begin();
1536 while (rxPacket--)
1537 ++vnic->rxIndex;
1538
1539 paramIn(cp, reg + ".rxPacketOffset",
1540 vnic->rxPacketOffset);
1541 paramIn(cp, reg + ".rxPacketBytes", vnic->rxPacketBytes);
1542 } else {
1543 vnic->rxIndex = rxFifo.end();
1544 }
1545 paramIn(cp, reg + ".rxDoneData", vnic->rxDoneData);
1546 }
1547
1548 /*
1549 * If there's a pending transmit, reschedule it now
1550 */
1551 Tick transmitTick;
1552 UNSERIALIZE_SCALAR(transmitTick);
1553 if (transmitTick)
1554 schedule(txEvent, curTick() + transmitTick);
1555
1556 pioPort.sendRangeChange();
1557
1558 }
1559
1560 } // namespace Sinic
1561
1562 Sinic::Device *
1563 SinicParams::create()
1564 {
1565 return new Sinic::Device(this);
1566 }