mem-cache: Add multiple eviction stats
[gem5.git] / src / dev / net / sinic.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 */
30
31 #include "dev/net/sinic.hh"
32
33 #include <deque>
34 #include <limits>
35 #include <string>
36
37 #include "base/compiler.hh"
38 #include "base/debug.hh"
39 #include "base/inet.hh"
40 #include "base/types.hh"
41 #include "debug/EthernetAll.hh"
42 #include "dev/net/etherlink.hh"
43 #include "mem/packet.hh"
44 #include "mem/packet_access.hh"
45 #include "sim/eventq.hh"
46 #include "sim/stats.hh"
47
48 using namespace std;
49 using namespace Net;
50
51 namespace Sinic {
52
53 const char *RxStateStrings[] =
54 {
55 "rxIdle",
56 "rxFifoBlock",
57 "rxBeginCopy",
58 "rxCopy",
59 "rxCopyDone"
60 };
61
62 const char *TxStateStrings[] =
63 {
64 "txIdle",
65 "txFifoBlock",
66 "txBeginCopy",
67 "txCopy",
68 "txCopyDone"
69 };
70
71
72 ///////////////////////////////////////////////////////////////////////
73 //
74 // Sinic PCI Device
75 //
76 Base::Base(const Params *p)
77 : EtherDevBase(p), rxEnable(false), txEnable(false),
78 intrDelay(p->intr_delay), intrTick(0), cpuIntrEnable(false),
79 cpuPendingIntr(false), intrEvent(0), interface(NULL)
80 {
81 }
82
83 Device::Device(const Params *p)
84 : Base(p), rxUnique(0), txUnique(0),
85 virtualRegs(p->virtual_count < 1 ? 1 : p->virtual_count),
86 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size),
87 rxKickTick(0), txKickTick(0),
88 txEvent([this]{ txEventTransmit(); }, name()),
89 rxDmaEvent([this]{ rxDmaDone(); }, name()),
90 txDmaEvent([this]{ txDmaDone(); }, name()),
91 dmaReadDelay(p->dma_read_delay), dmaReadFactor(p->dma_read_factor),
92 dmaWriteDelay(p->dma_write_delay), dmaWriteFactor(p->dma_write_factor)
93 {
94 interface = new Interface(name() + ".int0", this);
95 reset();
96
97 }
98
99 Device::~Device()
100 {}
101
102 void
103 Device::regStats()
104 {
105 Base::regStats();
106
107 _maxVnicDistance = 0;
108
109 maxVnicDistance
110 .name(name() + ".maxVnicDistance")
111 .desc("maximum vnic distance")
112 ;
113
114 totalVnicDistance
115 .name(name() + ".totalVnicDistance")
116 .desc("total vnic distance")
117 ;
118 numVnicDistance
119 .name(name() + ".numVnicDistance")
120 .desc("number of vnic distance measurements")
121 ;
122
123 avgVnicDistance
124 .name(name() + ".avgVnicDistance")
125 .desc("average vnic distance")
126 ;
127
128 avgVnicDistance = totalVnicDistance / numVnicDistance;
129 }
130
131 void
132 Device::resetStats()
133 {
134 Base::resetStats();
135
136 _maxVnicDistance = 0;
137 }
138
139 Port &
140 Device::getPort(const std::string &if_name, PortID idx)
141 {
142 if (if_name == "interface")
143 return *interface;
144 return EtherDevBase::getPort(if_name, idx);
145 }
146
147
148 void
149 Device::prepareIO(ContextID cpu, int index)
150 {
151 int size = virtualRegs.size();
152 if (index > size)
153 panic("Trying to access a vnic that doesn't exist %d > %d\n",
154 index, size);
155 }
156
157 //add stats for head of line blocking
158 //add stats for average fifo length
159 //add stats for average number of vnics busy
160
161 void
162 Device::prepareRead(ContextID cpu, int index)
163 {
164 using namespace Regs;
165 prepareIO(cpu, index);
166
167 VirtualReg &vnic = virtualRegs[index];
168
169 // update rx registers
170 uint64_t rxdone = vnic.RxDone;
171 rxdone = set_RxDone_Packets(rxdone, rxFifo.countPacketsAfter(rxFifoPtr));
172 rxdone = set_RxDone_Empty(rxdone, rxFifo.empty());
173 rxdone = set_RxDone_High(rxdone, rxFifo.size() > regs.RxFifoHigh);
174 rxdone = set_RxDone_NotHigh(rxdone, rxLow);
175 regs.RxData = vnic.RxData;
176 regs.RxDone = rxdone;
177 regs.RxWait = rxdone;
178
179 // update tx regsiters
180 uint64_t txdone = vnic.TxDone;
181 txdone = set_TxDone_Packets(txdone, txFifo.packets());
182 txdone = set_TxDone_Full(txdone, txFifo.avail() < regs.TxMaxCopy);
183 txdone = set_TxDone_Low(txdone, txFifo.size() < regs.TxFifoLow);
184 regs.TxData = vnic.TxData;
185 regs.TxDone = txdone;
186 regs.TxWait = txdone;
187
188 int head = 0xffff;
189
190 if (!rxFifo.empty()) {
191 int vnic = rxFifo.begin()->priv;
192 if (vnic != -1 && virtualRegs[vnic].rxPacketOffset > 0)
193 head = vnic;
194 }
195
196 regs.RxStatus = set_RxStatus_Head(regs.RxStatus, head);
197 regs.RxStatus = set_RxStatus_Busy(regs.RxStatus, rxBusyCount);
198 regs.RxStatus = set_RxStatus_Mapped(regs.RxStatus, rxMappedCount);
199 regs.RxStatus = set_RxStatus_Dirty(regs.RxStatus, rxDirtyCount);
200 }
201
202 void
203 Device::prepareWrite(ContextID cpu, int index)
204 {
205 prepareIO(cpu, index);
206 }
207
208 /**
209 * I/O read of device register
210 */
211 Tick
212 Device::read(PacketPtr pkt)
213 {
214 assert(config.command & PCI_CMD_MSE);
215 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]);
216
217 ContextID cpu = pkt->req->contextId();
218 Addr daddr = pkt->getAddr() - BARAddrs[0];
219 Addr index = daddr >> Regs::VirtualShift;
220 Addr raddr = daddr & Regs::VirtualMask;
221
222 if (!regValid(raddr))
223 panic("invalid register: cpu=%d vnic=%d da=%#x pa=%#x size=%d",
224 cpu, index, daddr, pkt->getAddr(), pkt->getSize());
225
226 const Regs::Info &info = regInfo(raddr);
227 if (!info.read)
228 panic("read %s (write only): "
229 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
230 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
231
232 panic("read %s (invalid size): "
233 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
234 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
235
236 prepareRead(cpu, index);
237
238 uint64_t value M5_VAR_USED = 0;
239 if (pkt->getSize() == 4) {
240 uint32_t reg = regData32(raddr);
241 pkt->setLE(reg);
242 value = reg;
243 }
244
245 if (pkt->getSize() == 8) {
246 uint64_t reg = regData64(raddr);
247 pkt->setLE(reg);
248 value = reg;
249 }
250
251 DPRINTF(EthernetPIO,
252 "read %s: cpu=%d vnic=%d da=%#x pa=%#x size=%d val=%#x\n",
253 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize(), value);
254
255 // reading the interrupt status register has the side effect of
256 // clearing it
257 if (raddr == Regs::IntrStatus)
258 devIntrClear();
259
260 return pioDelay;
261 }
262
263 /**
264 * IPR read of device register
265
266 Fault
267 Device::iprRead(Addr daddr, ContextID cpu, uint64_t &result)
268 {
269 if (!regValid(daddr))
270 panic("invalid address: da=%#x", daddr);
271
272 const Regs::Info &info = regInfo(daddr);
273 if (!info.read)
274 panic("reading %s (write only): cpu=%d da=%#x", info.name, cpu, daddr);
275
276 DPRINTF(EthernetPIO, "IPR read %s: cpu=%d da=%#x\n",
277 info.name, cpu, daddr);
278
279 prepareRead(cpu, 0);
280
281 if (info.size == 4)
282 result = regData32(daddr);
283
284 if (info.size == 8)
285 result = regData64(daddr);
286
287 DPRINTF(EthernetPIO, "IPR read %s: cpu=%s da=%#x val=%#x\n",
288 info.name, cpu, result);
289
290 return NoFault;
291 }
292 */
293 /**
294 * I/O write of device register
295 */
296 Tick
297 Device::write(PacketPtr pkt)
298 {
299 assert(config.command & PCI_CMD_MSE);
300 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]);
301
302 ContextID cpu = pkt->req->contextId();
303 Addr daddr = pkt->getAddr() - BARAddrs[0];
304 Addr index = daddr >> Regs::VirtualShift;
305 Addr raddr = daddr & Regs::VirtualMask;
306
307 if (!regValid(raddr))
308 panic("invalid register: cpu=%d, da=%#x pa=%#x size=%d",
309 cpu, daddr, pkt->getAddr(), pkt->getSize());
310
311 const Regs::Info &info = regInfo(raddr);
312 if (!info.write)
313 panic("write %s (read only): "
314 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
315 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
316
317 if (pkt->getSize() != info.size)
318 panic("write %s (invalid size): "
319 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
320 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
321
322 VirtualReg &vnic = virtualRegs[index];
323
324 DPRINTF(EthernetPIO,
325 "write %s vnic %d: cpu=%d val=%#x da=%#x pa=%#x size=%d\n",
326 info.name, index, cpu, info.size == 4 ?
327 pkt->getLE<uint32_t>() : pkt->getLE<uint64_t>(),
328 daddr, pkt->getAddr(), pkt->getSize());
329
330 prepareWrite(cpu, index);
331
332 switch (raddr) {
333 case Regs::Config:
334 changeConfig(pkt->getLE<uint32_t>());
335 break;
336
337 case Regs::Command:
338 command(pkt->getLE<uint32_t>());
339 break;
340
341 case Regs::IntrStatus:
342 devIntrClear(regs.IntrStatus &
343 pkt->getLE<uint32_t>());
344 break;
345
346 case Regs::IntrMask:
347 devIntrChangeMask(pkt->getLE<uint32_t>());
348 break;
349
350 case Regs::RxData:
351 if (Regs::get_RxDone_Busy(vnic.RxDone))
352 panic("receive machine busy with another request! rxState=%s",
353 RxStateStrings[rxState]);
354
355 vnic.rxUnique = rxUnique++;
356 vnic.RxDone = Regs::RxDone_Busy;
357 vnic.RxData = pkt->getLE<uint64_t>();
358 rxBusyCount++;
359
360 if (Regs::get_RxData_Vaddr(pkt->getLE<uint64_t>())) {
361 panic("vtophys not implemented in newmem");
362 } else {
363 DPRINTF(EthernetPIO, "write RxData vnic %d (rxunique %d)\n",
364 index, vnic.rxUnique);
365 }
366
367 if (vnic.rxIndex == rxFifo.end()) {
368 DPRINTF(EthernetPIO, "request new packet...appending to rxList\n");
369 rxList.push_back(index);
370 } else {
371 DPRINTF(EthernetPIO, "packet exists...appending to rxBusy\n");
372 rxBusy.push_back(index);
373 }
374
375 if (rxEnable && (rxState == rxIdle || rxState == rxFifoBlock)) {
376 rxState = rxFifoBlock;
377 rxKick();
378 }
379 break;
380
381 case Regs::TxData:
382 if (Regs::get_TxDone_Busy(vnic.TxDone))
383 panic("transmit machine busy with another request! txState=%s",
384 TxStateStrings[txState]);
385
386 vnic.txUnique = txUnique++;
387 vnic.TxDone = Regs::TxDone_Busy;
388
389 if (Regs::get_TxData_Vaddr(pkt->getLE<uint64_t>())) {
390 panic("vtophys won't work here in newmem.\n");
391 } else {
392 DPRINTF(EthernetPIO, "write TxData vnic %d (txunique %d)\n",
393 index, vnic.txUnique);
394 }
395
396 if (txList.empty() || txList.front() != index)
397 txList.push_back(index);
398 if (txEnable && txState == txIdle && txList.front() == index) {
399 txState = txFifoBlock;
400 txKick();
401 }
402 break;
403 }
404
405 return pioDelay;
406 }
407
408 void
409 Device::devIntrPost(uint32_t interrupts)
410 {
411 if ((interrupts & Regs::Intr_Res))
412 panic("Cannot set a reserved interrupt");
413
414 regs.IntrStatus |= interrupts;
415
416 DPRINTF(EthernetIntr,
417 "interrupt written to intStatus: intr=%#x status=%#x mask=%#x\n",
418 interrupts, regs.IntrStatus, regs.IntrMask);
419
420 interrupts = regs.IntrStatus & regs.IntrMask;
421
422 // Intr_RxHigh is special, we only signal it if we've emptied the fifo
423 // and then filled it above the high watermark
424 if (rxEmpty)
425 rxEmpty = false;
426 else
427 interrupts &= ~Regs::Intr_RxHigh;
428
429 // Intr_TxLow is special, we only signal it if we've filled up the fifo
430 // and then dropped below the low watermark
431 if (txFull)
432 txFull = false;
433 else
434 interrupts &= ~Regs::Intr_TxLow;
435
436 if (interrupts) {
437 Tick when = curTick();
438 if ((interrupts & Regs::Intr_NoDelay) == 0)
439 when += intrDelay;
440 cpuIntrPost(when);
441 }
442 }
443
444 void
445 Device::devIntrClear(uint32_t interrupts)
446 {
447 if ((interrupts & Regs::Intr_Res))
448 panic("Cannot clear a reserved interrupt");
449
450 regs.IntrStatus &= ~interrupts;
451
452 DPRINTF(EthernetIntr,
453 "interrupt cleared from intStatus: intr=%x status=%x mask=%x\n",
454 interrupts, regs.IntrStatus, regs.IntrMask);
455
456 if (!(regs.IntrStatus & regs.IntrMask))
457 cpuIntrClear();
458 }
459
460 void
461 Device::devIntrChangeMask(uint32_t newmask)
462 {
463 if (regs.IntrMask == newmask)
464 return;
465
466 regs.IntrMask = newmask;
467
468 DPRINTF(EthernetIntr,
469 "interrupt mask changed: intStatus=%x intMask=%x masked=%x\n",
470 regs.IntrStatus, regs.IntrMask, regs.IntrStatus & regs.IntrMask);
471
472 if (regs.IntrStatus & regs.IntrMask)
473 cpuIntrPost(curTick());
474 else
475 cpuIntrClear();
476 }
477
478 void
479 Base::cpuIntrPost(Tick when)
480 {
481 // If the interrupt you want to post is later than an interrupt
482 // already scheduled, just let it post in the coming one and don't
483 // schedule another.
484 // HOWEVER, must be sure that the scheduled intrTick is in the
485 // future (this was formerly the source of a bug)
486 /**
487 * @todo this warning should be removed and the intrTick code should
488 * be fixed.
489 */
490 assert(when >= curTick());
491 assert(intrTick >= curTick() || intrTick == 0);
492 if (!cpuIntrEnable) {
493 DPRINTF(EthernetIntr, "interrupts not enabled.\n",
494 intrTick);
495 return;
496 }
497
498 if (when > intrTick && intrTick != 0) {
499 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
500 intrTick);
501 return;
502 }
503
504 intrTick = when;
505 if (intrTick < curTick()) {
506 intrTick = curTick();
507 }
508
509 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
510 intrTick);
511
512 if (intrEvent)
513 intrEvent->squash();
514
515 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
516 name(), true);
517 schedule(intrEvent, intrTick);
518 }
519
520 void
521 Base::cpuInterrupt()
522 {
523 assert(intrTick == curTick());
524
525 // Whether or not there's a pending interrupt, we don't care about
526 // it anymore
527 intrEvent = 0;
528 intrTick = 0;
529
530 // Don't send an interrupt if there's already one
531 if (cpuPendingIntr) {
532 DPRINTF(EthernetIntr,
533 "would send an interrupt now, but there's already pending\n");
534 } else {
535 // Send interrupt
536 cpuPendingIntr = true;
537
538 DPRINTF(EthernetIntr, "posting interrupt\n");
539 intrPost();
540 }
541 }
542
543 void
544 Base::cpuIntrClear()
545 {
546 if (!cpuPendingIntr)
547 return;
548
549 if (intrEvent) {
550 intrEvent->squash();
551 intrEvent = 0;
552 }
553
554 intrTick = 0;
555
556 cpuPendingIntr = false;
557
558 DPRINTF(EthernetIntr, "clearing cchip interrupt\n");
559 intrClear();
560 }
561
562 bool
563 Base::cpuIntrPending() const
564 { return cpuPendingIntr; }
565
566 void
567 Device::changeConfig(uint32_t newconf)
568 {
569 uint32_t changed = regs.Config ^ newconf;
570 if (!changed)
571 return;
572
573 regs.Config = newconf;
574
575 if ((changed & Regs::Config_IntEn)) {
576 cpuIntrEnable = regs.Config & Regs::Config_IntEn;
577 if (cpuIntrEnable) {
578 if (regs.IntrStatus & regs.IntrMask)
579 cpuIntrPost(curTick());
580 } else {
581 cpuIntrClear();
582 }
583 }
584
585 if ((changed & Regs::Config_TxEn)) {
586 txEnable = regs.Config & Regs::Config_TxEn;
587 if (txEnable)
588 txKick();
589 }
590
591 if ((changed & Regs::Config_RxEn)) {
592 rxEnable = regs.Config & Regs::Config_RxEn;
593 if (rxEnable)
594 rxKick();
595 }
596 }
597
598 void
599 Device::command(uint32_t command)
600 {
601 if (command & Regs::Command_Intr)
602 devIntrPost(Regs::Intr_Soft);
603
604 if (command & Regs::Command_Reset)
605 reset();
606 }
607
608 void
609 Device::reset()
610 {
611 using namespace Regs;
612
613 memset(&regs, 0, sizeof(regs));
614
615 regs.Config = 0;
616 if (params()->rx_thread)
617 regs.Config |= Config_RxThread;
618 if (params()->tx_thread)
619 regs.Config |= Config_TxThread;
620 if (params()->rss)
621 regs.Config |= Config_RSS;
622 if (params()->zero_copy)
623 regs.Config |= Config_ZeroCopy;
624 if (params()->delay_copy)
625 regs.Config |= Config_DelayCopy;
626 if (params()->virtual_addr)
627 regs.Config |= Config_Vaddr;
628
629 if (params()->delay_copy && params()->zero_copy)
630 panic("Can't delay copy and zero copy");
631
632 regs.IntrMask = Intr_Soft | Intr_RxHigh | Intr_RxPacket | Intr_TxLow;
633 regs.RxMaxCopy = params()->rx_max_copy;
634 regs.TxMaxCopy = params()->tx_max_copy;
635 regs.ZeroCopySize = params()->zero_copy_size;
636 regs.ZeroCopyMark = params()->zero_copy_threshold;
637 regs.VirtualCount = params()->virtual_count;
638 regs.RxMaxIntr = params()->rx_max_intr;
639 regs.RxFifoSize = params()->rx_fifo_size;
640 regs.TxFifoSize = params()->tx_fifo_size;
641 regs.RxFifoLow = params()->rx_fifo_low_mark;
642 regs.TxFifoLow = params()->tx_fifo_threshold;
643 regs.RxFifoHigh = params()->rx_fifo_threshold;
644 regs.TxFifoHigh = params()->tx_fifo_high_mark;
645 regs.HwAddr = params()->hardware_address;
646
647 if (regs.RxMaxCopy < regs.ZeroCopyMark)
648 panic("Must be able to copy at least as many bytes as the threshold");
649
650 if (regs.ZeroCopySize >= regs.ZeroCopyMark)
651 panic("The number of bytes to copy must be less than the threshold");
652
653 rxList.clear();
654 rxBusy.clear();
655 rxActive = -1;
656 txList.clear();
657 rxBusyCount = 0;
658 rxDirtyCount = 0;
659 rxMappedCount = 0;
660
661 rxState = rxIdle;
662 txState = txIdle;
663
664 rxFifo.clear();
665 rxFifoPtr = rxFifo.end();
666 txFifo.clear();
667 rxEmpty = false;
668 rxLow = true;
669 txFull = false;
670
671 int size = virtualRegs.size();
672 virtualRegs.clear();
673 virtualRegs.resize(size);
674 for (int i = 0; i < size; ++i)
675 virtualRegs[i].rxIndex = rxFifo.end();
676 }
677
678 void
679 Device::rxDmaDone()
680 {
681 assert(rxState == rxCopy);
682 rxState = rxCopyDone;
683 DPRINTF(EthernetDMA, "end rx dma write paddr=%#x len=%d\n",
684 rxDmaAddr, rxDmaLen);
685 DDUMP(EthernetData, rxDmaData, rxDmaLen);
686
687 // If the transmit state machine has a pending DMA, let it go first
688 if (txState == txBeginCopy)
689 txKick();
690
691 rxKick();
692 }
693
694 void
695 Device::rxKick()
696 {
697 VirtualReg *vnic = NULL;
698
699 DPRINTF(EthernetSM, "rxKick: rxState=%s (rxFifo.size=%d)\n",
700 RxStateStrings[rxState], rxFifo.size());
701
702 if (rxKickTick > curTick()) {
703 DPRINTF(EthernetSM, "rxKick: exiting, can't run till %d\n",
704 rxKickTick);
705 return;
706 }
707
708 next:
709 rxFifo.check();
710 if (rxState == rxIdle)
711 goto exit;
712
713 if (rxActive == -1) {
714 if (rxState != rxFifoBlock)
715 panic("no active vnic while in state %s", RxStateStrings[rxState]);
716
717 DPRINTF(EthernetSM, "processing rxState=%s\n",
718 RxStateStrings[rxState]);
719 } else {
720 vnic = &virtualRegs[rxActive];
721 DPRINTF(EthernetSM,
722 "processing rxState=%s for vnic %d (rxunique %d)\n",
723 RxStateStrings[rxState], rxActive, vnic->rxUnique);
724 }
725
726 switch (rxState) {
727 case rxFifoBlock:
728 if (DTRACE(EthernetSM)) {
729 PacketFifo::iterator end = rxFifo.end();
730 int size = virtualRegs.size();
731 for (int i = 0; i < size; ++i) {
732 VirtualReg *vn = &virtualRegs[i];
733 bool busy = Regs::get_RxDone_Busy(vn->RxDone);
734 if (vn->rxIndex != end) {
735 #ifndef NDEBUG
736 bool dirty = vn->rxPacketOffset > 0;
737 const char *status;
738
739 if (busy && dirty)
740 status = "busy,dirty";
741 else if (busy)
742 status = "busy";
743 else if (dirty)
744 status = "dirty";
745 else
746 status = "mapped";
747
748 DPRINTF(EthernetSM,
749 "vnic %d %s (rxunique %d), packet %d, slack %d\n",
750 i, status, vn->rxUnique,
751 rxFifo.countPacketsBefore(vn->rxIndex),
752 vn->rxIndex->slack);
753 #endif
754 } else if (busy) {
755 DPRINTF(EthernetSM, "vnic %d unmapped (rxunique %d)\n",
756 i, vn->rxUnique);
757 }
758 }
759 }
760
761 if (!rxBusy.empty()) {
762 rxActive = rxBusy.front();
763 rxBusy.pop_front();
764 vnic = &virtualRegs[rxActive];
765
766 if (vnic->rxIndex == rxFifo.end())
767 panic("continuing vnic without packet\n");
768
769 DPRINTF(EthernetSM,
770 "continue processing for vnic %d (rxunique %d)\n",
771 rxActive, vnic->rxUnique);
772
773 rxState = rxBeginCopy;
774
775 int vnic_distance = rxFifo.countPacketsBefore(vnic->rxIndex);
776 totalVnicDistance += vnic_distance;
777 numVnicDistance += 1;
778 if (vnic_distance > _maxVnicDistance) {
779 maxVnicDistance = vnic_distance;
780 _maxVnicDistance = vnic_distance;
781 }
782
783 break;
784 }
785
786 if (rxFifoPtr == rxFifo.end()) {
787 DPRINTF(EthernetSM, "receive waiting for data. Nothing to do.\n");
788 goto exit;
789 }
790
791 if (rxList.empty())
792 panic("Not idle, but nothing to do!");
793
794 assert(!rxFifo.empty());
795
796 rxActive = rxList.front();
797 rxList.pop_front();
798 vnic = &virtualRegs[rxActive];
799
800 DPRINTF(EthernetSM,
801 "processing new packet for vnic %d (rxunique %d)\n",
802 rxActive, vnic->rxUnique);
803
804 // Grab a new packet from the fifo.
805 vnic->rxIndex = rxFifoPtr++;
806 vnic->rxIndex->priv = rxActive;
807 vnic->rxPacketOffset = 0;
808 vnic->rxPacketBytes = vnic->rxIndex->packet->length;
809 assert(vnic->rxPacketBytes);
810 rxMappedCount++;
811
812 vnic->rxDoneData = 0;
813 /* scope for variables */ {
814 IpPtr ip(vnic->rxIndex->packet);
815 if (ip) {
816 DPRINTF(Ethernet, "ID is %d\n", ip->id());
817 vnic->rxDoneData |= Regs::RxDone_IpPacket;
818 rxIpChecksums++;
819 if (cksum(ip) != 0) {
820 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
821 vnic->rxDoneData |= Regs::RxDone_IpError;
822 }
823 TcpPtr tcp(ip);
824 UdpPtr udp(ip);
825 if (tcp) {
826 DPRINTF(Ethernet,
827 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
828 tcp->sport(), tcp->dport(), tcp->seq(),
829 tcp->ack());
830 vnic->rxDoneData |= Regs::RxDone_TcpPacket;
831 rxTcpChecksums++;
832 if (cksum(tcp) != 0) {
833 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
834 vnic->rxDoneData |= Regs::RxDone_TcpError;
835 }
836 } else if (udp) {
837 vnic->rxDoneData |= Regs::RxDone_UdpPacket;
838 rxUdpChecksums++;
839 if (cksum(udp) != 0) {
840 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
841 vnic->rxDoneData |= Regs::RxDone_UdpError;
842 }
843 }
844 }
845 }
846 rxState = rxBeginCopy;
847 break;
848
849 case rxBeginCopy:
850 if (dmaPending() || drainState() != DrainState::Running)
851 goto exit;
852
853 rxDmaAddr = pciToDma(Regs::get_RxData_Addr(vnic->RxData));
854 rxDmaLen = min<unsigned>(Regs::get_RxData_Len(vnic->RxData),
855 vnic->rxPacketBytes);
856
857 /*
858 * if we're doing zero/delay copy and we're below the fifo
859 * threshold, see if we should try to do the zero/defer copy
860 */
861 if ((Regs::get_Config_ZeroCopy(regs.Config) ||
862 Regs::get_Config_DelayCopy(regs.Config)) &&
863 !Regs::get_RxData_NoDelay(vnic->RxData) && rxLow) {
864 if (rxDmaLen > regs.ZeroCopyMark)
865 rxDmaLen = regs.ZeroCopySize;
866 }
867 rxDmaData = vnic->rxIndex->packet->data + vnic->rxPacketOffset;
868 rxState = rxCopy;
869 if (rxDmaAddr == 1LL) {
870 rxState = rxCopyDone;
871 break;
872 }
873
874 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaEvent, rxDmaData);
875 break;
876
877 case rxCopy:
878 DPRINTF(EthernetSM, "receive machine still copying\n");
879 goto exit;
880
881 case rxCopyDone:
882 vnic->RxDone = vnic->rxDoneData;
883 vnic->RxDone |= Regs::RxDone_Complete;
884 rxBusyCount--;
885
886 if (vnic->rxPacketBytes == rxDmaLen) {
887 if (vnic->rxPacketOffset)
888 rxDirtyCount--;
889
890 // Packet is complete. Indicate how many bytes were copied
891 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone, rxDmaLen);
892
893 DPRINTF(EthernetSM,
894 "rxKick: packet complete on vnic %d (rxunique %d)\n",
895 rxActive, vnic->rxUnique);
896 rxFifo.remove(vnic->rxIndex);
897 vnic->rxIndex = rxFifo.end();
898 rxMappedCount--;
899 } else {
900 if (!vnic->rxPacketOffset)
901 rxDirtyCount++;
902
903 vnic->rxPacketBytes -= rxDmaLen;
904 vnic->rxPacketOffset += rxDmaLen;
905 vnic->RxDone |= Regs::RxDone_More;
906 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone,
907 vnic->rxPacketBytes);
908 DPRINTF(EthernetSM,
909 "rxKick: packet not complete on vnic %d (rxunique %d): "
910 "%d bytes left\n",
911 rxActive, vnic->rxUnique, vnic->rxPacketBytes);
912 }
913
914 rxActive = -1;
915 rxState = rxBusy.empty() && rxList.empty() ? rxIdle : rxFifoBlock;
916
917 if (rxFifo.empty()) {
918 devIntrPost(Regs::Intr_RxEmpty);
919 rxEmpty = true;
920 }
921
922 if (rxFifo.size() < regs.RxFifoLow)
923 rxLow = true;
924
925 if (rxFifo.size() > regs.RxFifoHigh)
926 rxLow = false;
927
928 devIntrPost(Regs::Intr_RxDMA);
929 break;
930
931 default:
932 panic("Invalid rxState!");
933 }
934
935 DPRINTF(EthernetSM, "entering next rxState=%s\n",
936 RxStateStrings[rxState]);
937
938 goto next;
939
940 exit:
941 /**
942 * @todo do we want to schedule a future kick?
943 */
944 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
945 RxStateStrings[rxState]);
946 }
947
948 void
949 Device::txDmaDone()
950 {
951 assert(txState == txCopy);
952 txState = txCopyDone;
953 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
954 txDmaAddr, txDmaLen);
955 DDUMP(EthernetData, txDmaData, txDmaLen);
956
957 // If the receive state machine has a pending DMA, let it go first
958 if (rxState == rxBeginCopy)
959 rxKick();
960
961 txKick();
962 }
963
964 void
965 Device::transmit()
966 {
967 if (txFifo.empty()) {
968 DPRINTF(Ethernet, "nothing to transmit\n");
969 return;
970 }
971
972 uint32_t interrupts;
973 EthPacketPtr packet = txFifo.front();
974 if (!interface->sendPacket(packet)) {
975 DPRINTF(Ethernet, "Packet Transmit: failed txFifo available %d\n",
976 txFifo.avail());
977 return;
978 }
979
980 txFifo.pop();
981 #if TRACING_ON
982 if (DTRACE(Ethernet)) {
983 IpPtr ip(packet);
984 if (ip) {
985 DPRINTF(Ethernet, "ID is %d\n", ip->id());
986 TcpPtr tcp(ip);
987 if (tcp) {
988 DPRINTF(Ethernet,
989 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
990 tcp->sport(), tcp->dport(), tcp->seq(),
991 tcp->ack());
992 }
993 }
994 }
995 #endif
996
997 DDUMP(EthernetData, packet->data, packet->length);
998 txBytes += packet->length;
999 txPackets++;
1000
1001 DPRINTF(Ethernet, "Packet Transmit: successful txFifo Available %d\n",
1002 txFifo.avail());
1003
1004 interrupts = Regs::Intr_TxPacket;
1005 if (txFifo.size() < regs.TxFifoLow)
1006 interrupts |= Regs::Intr_TxLow;
1007 devIntrPost(interrupts);
1008 }
1009
1010 void
1011 Device::txKick()
1012 {
1013 VirtualReg *vnic;
1014 DPRINTF(EthernetSM, "txKick: txState=%s (txFifo.size=%d)\n",
1015 TxStateStrings[txState], txFifo.size());
1016
1017 if (txKickTick > curTick()) {
1018 DPRINTF(EthernetSM, "txKick: exiting, can't run till %d\n",
1019 txKickTick);
1020 return;
1021 }
1022
1023 next:
1024 if (txState == txIdle)
1025 goto exit;
1026
1027 assert(!txList.empty());
1028 vnic = &virtualRegs[txList.front()];
1029
1030 switch (txState) {
1031 case txFifoBlock:
1032 assert(Regs::get_TxDone_Busy(vnic->TxDone));
1033 if (!txPacket) {
1034 // Grab a new packet from the fifo.
1035 txPacket = make_shared<EthPacketData>(16384);
1036 txPacketOffset = 0;
1037 }
1038
1039 if (txFifo.avail() - txPacket->length <
1040 Regs::get_TxData_Len(vnic->TxData)) {
1041 DPRINTF(EthernetSM, "transmit fifo full. Nothing to do.\n");
1042 goto exit;
1043 }
1044
1045 txState = txBeginCopy;
1046 break;
1047
1048 case txBeginCopy:
1049 if (dmaPending() || drainState() != DrainState::Running)
1050 goto exit;
1051
1052 txDmaAddr = pciToDma(Regs::get_TxData_Addr(vnic->TxData));
1053 txDmaLen = Regs::get_TxData_Len(vnic->TxData);
1054 txDmaData = txPacket->data + txPacketOffset;
1055 txState = txCopy;
1056
1057 dmaRead(txDmaAddr, txDmaLen, &txDmaEvent, txDmaData);
1058 break;
1059
1060 case txCopy:
1061 DPRINTF(EthernetSM, "transmit machine still copying\n");
1062 goto exit;
1063
1064 case txCopyDone:
1065 vnic->TxDone = txDmaLen | Regs::TxDone_Complete;
1066 txPacket->simLength += txDmaLen;
1067 txPacket->length += txDmaLen;
1068 if ((vnic->TxData & Regs::TxData_More)) {
1069 txPacketOffset += txDmaLen;
1070 txState = txIdle;
1071 devIntrPost(Regs::Intr_TxDMA);
1072 break;
1073 }
1074
1075 assert(txPacket->length <= txFifo.avail());
1076 if ((vnic->TxData & Regs::TxData_Checksum)) {
1077 IpPtr ip(txPacket);
1078 if (ip) {
1079 TcpPtr tcp(ip);
1080 if (tcp) {
1081 tcp->sum(0);
1082 tcp->sum(cksum(tcp));
1083 txTcpChecksums++;
1084 }
1085
1086 UdpPtr udp(ip);
1087 if (udp) {
1088 udp->sum(0);
1089 udp->sum(cksum(udp));
1090 txUdpChecksums++;
1091 }
1092
1093 ip->sum(0);
1094 ip->sum(cksum(ip));
1095 txIpChecksums++;
1096 }
1097 }
1098
1099 txFifo.push(txPacket);
1100 if (txFifo.avail() < regs.TxMaxCopy) {
1101 devIntrPost(Regs::Intr_TxFull);
1102 txFull = true;
1103 }
1104 txPacket = 0;
1105 transmit();
1106 txList.pop_front();
1107 txState = txList.empty() ? txIdle : txFifoBlock;
1108 devIntrPost(Regs::Intr_TxDMA);
1109 break;
1110
1111 default:
1112 panic("Invalid txState!");
1113 }
1114
1115 DPRINTF(EthernetSM, "entering next txState=%s\n",
1116 TxStateStrings[txState]);
1117
1118 goto next;
1119
1120 exit:
1121 /**
1122 * @todo do we want to schedule a future kick?
1123 */
1124 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1125 TxStateStrings[txState]);
1126 }
1127
1128 void
1129 Device::transferDone()
1130 {
1131 if (txFifo.empty()) {
1132 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1133 return;
1134 }
1135
1136 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
1137
1138 reschedule(txEvent, clockEdge(Cycles(1)), true);
1139 }
1140
1141 bool
1142 Device::rxFilter(const EthPacketPtr &packet)
1143 {
1144 if (!Regs::get_Config_Filter(regs.Config))
1145 return false;
1146
1147 panic("receive filter not implemented\n");
1148 bool drop = true;
1149 return drop;
1150 }
1151
1152 bool
1153 Device::recvPacket(EthPacketPtr packet)
1154 {
1155 rxBytes += packet->length;
1156 rxPackets++;
1157
1158 DPRINTF(Ethernet, "Receiving packet from wire, rxFifo Available is %d\n",
1159 rxFifo.avail());
1160
1161 if (!rxEnable) {
1162 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1163 return true;
1164 }
1165
1166 if (rxFilter(packet)) {
1167 DPRINTF(Ethernet, "packet filtered...dropped\n");
1168 return true;
1169 }
1170
1171 if (rxFifo.size() >= regs.RxFifoHigh)
1172 devIntrPost(Regs::Intr_RxHigh);
1173
1174 if (!rxFifo.push(packet)) {
1175 DPRINTF(Ethernet,
1176 "packet will not fit in receive buffer...packet dropped\n");
1177 return false;
1178 }
1179
1180 // If we were at the last element, back up one ot go to the new
1181 // last element of the list.
1182 if (rxFifoPtr == rxFifo.end())
1183 --rxFifoPtr;
1184
1185 devIntrPost(Regs::Intr_RxPacket);
1186 rxKick();
1187 return true;
1188 }
1189
1190 void
1191 Device::drainResume()
1192 {
1193 Drainable::drainResume();
1194
1195 // During drain we could have left the state machines in a waiting state and
1196 // they wouldn't get out until some other event occured to kick them.
1197 // This way they'll get out immediately
1198 txKick();
1199 rxKick();
1200 }
1201
1202 //=====================================================================
1203 //
1204 //
1205 void
1206 Base::serialize(CheckpointOut &cp) const
1207 {
1208 // Serialize the PciDevice base class
1209 PciDevice::serialize(cp);
1210
1211 SERIALIZE_SCALAR(rxEnable);
1212 SERIALIZE_SCALAR(txEnable);
1213 SERIALIZE_SCALAR(cpuIntrEnable);
1214
1215 /*
1216 * Keep track of pending interrupt status.
1217 */
1218 SERIALIZE_SCALAR(intrTick);
1219 SERIALIZE_SCALAR(cpuPendingIntr);
1220 Tick intrEventTick = 0;
1221 if (intrEvent)
1222 intrEventTick = intrEvent->when();
1223 SERIALIZE_SCALAR(intrEventTick);
1224 }
1225
1226 void
1227 Base::unserialize(CheckpointIn &cp)
1228 {
1229 // Unserialize the PciDevice base class
1230 PciDevice::unserialize(cp);
1231
1232 UNSERIALIZE_SCALAR(rxEnable);
1233 UNSERIALIZE_SCALAR(txEnable);
1234 UNSERIALIZE_SCALAR(cpuIntrEnable);
1235
1236 /*
1237 * Keep track of pending interrupt status.
1238 */
1239 UNSERIALIZE_SCALAR(intrTick);
1240 UNSERIALIZE_SCALAR(cpuPendingIntr);
1241 Tick intrEventTick;
1242 UNSERIALIZE_SCALAR(intrEventTick);
1243 if (intrEventTick) {
1244 intrEvent = new EventFunctionWrapper([this]{ cpuInterrupt(); },
1245 name(), true);
1246 schedule(intrEvent, intrEventTick);
1247 }
1248 }
1249
1250 void
1251 Device::serialize(CheckpointOut &cp) const
1252 {
1253 int count;
1254
1255 // Serialize the PciDevice base class
1256 Base::serialize(cp);
1257
1258 if (rxState == rxCopy)
1259 panic("can't serialize with an in flight dma request rxState=%s",
1260 RxStateStrings[rxState]);
1261
1262 if (txState == txCopy)
1263 panic("can't serialize with an in flight dma request txState=%s",
1264 TxStateStrings[txState]);
1265
1266 /*
1267 * Serialize the device registers that could be modified by the OS.
1268 */
1269 SERIALIZE_SCALAR(regs.Config);
1270 SERIALIZE_SCALAR(regs.IntrStatus);
1271 SERIALIZE_SCALAR(regs.IntrMask);
1272 SERIALIZE_SCALAR(regs.RxData);
1273 SERIALIZE_SCALAR(regs.TxData);
1274
1275 /*
1276 * Serialize the virtual nic state
1277 */
1278 int virtualRegsSize = virtualRegs.size();
1279 SERIALIZE_SCALAR(virtualRegsSize);
1280 for (int i = 0; i < virtualRegsSize; ++i) {
1281 const VirtualReg *vnic = &virtualRegs[i];
1282
1283 std::string reg = csprintf("vnic%d", i);
1284 paramOut(cp, reg + ".RxData", vnic->RxData);
1285 paramOut(cp, reg + ".RxDone", vnic->RxDone);
1286 paramOut(cp, reg + ".TxData", vnic->TxData);
1287 paramOut(cp, reg + ".TxDone", vnic->TxDone);
1288
1289 bool rxPacketExists = vnic->rxIndex != rxFifo.end();
1290 paramOut(cp, reg + ".rxPacketExists", rxPacketExists);
1291 if (rxPacketExists) {
1292 int rxPacket = 0;
1293 auto i = rxFifo.begin();
1294 while (i != vnic->rxIndex) {
1295 assert(i != rxFifo.end());
1296 ++i;
1297 ++rxPacket;
1298 }
1299
1300 paramOut(cp, reg + ".rxPacket", rxPacket);
1301 paramOut(cp, reg + ".rxPacketOffset", vnic->rxPacketOffset);
1302 paramOut(cp, reg + ".rxPacketBytes", vnic->rxPacketBytes);
1303 }
1304 paramOut(cp, reg + ".rxDoneData", vnic->rxDoneData);
1305 }
1306
1307 int rxFifoPtr = -1;
1308 if (this->rxFifoPtr != rxFifo.end())
1309 rxFifoPtr = rxFifo.countPacketsBefore(this->rxFifoPtr);
1310 SERIALIZE_SCALAR(rxFifoPtr);
1311
1312 SERIALIZE_SCALAR(rxActive);
1313 SERIALIZE_SCALAR(rxBusyCount);
1314 SERIALIZE_SCALAR(rxDirtyCount);
1315 SERIALIZE_SCALAR(rxMappedCount);
1316
1317 VirtualList::const_iterator i, end;
1318 for (count = 0, i = rxList.begin(), end = rxList.end(); i != end; ++i)
1319 paramOut(cp, csprintf("rxList%d", count++), *i);
1320 int rxListSize = count;
1321 SERIALIZE_SCALAR(rxListSize);
1322
1323 for (count = 0, i = rxBusy.begin(), end = rxBusy.end(); i != end; ++i)
1324 paramOut(cp, csprintf("rxBusy%d", count++), *i);
1325 int rxBusySize = count;
1326 SERIALIZE_SCALAR(rxBusySize);
1327
1328 for (count = 0, i = txList.begin(), end = txList.end(); i != end; ++i)
1329 paramOut(cp, csprintf("txList%d", count++), *i);
1330 int txListSize = count;
1331 SERIALIZE_SCALAR(txListSize);
1332
1333 /*
1334 * Serialize rx state machine
1335 */
1336 int rxState = this->rxState;
1337 SERIALIZE_SCALAR(rxState);
1338 SERIALIZE_SCALAR(rxEmpty);
1339 SERIALIZE_SCALAR(rxLow);
1340 rxFifo.serialize("rxFifo", cp);
1341
1342 /*
1343 * Serialize tx state machine
1344 */
1345 int txState = this->txState;
1346 SERIALIZE_SCALAR(txState);
1347 SERIALIZE_SCALAR(txFull);
1348 txFifo.serialize("txFifo", cp);
1349 bool txPacketExists = txPacket != nullptr;
1350 SERIALIZE_SCALAR(txPacketExists);
1351 if (txPacketExists) {
1352 txPacket->serialize("txPacket", cp);
1353 SERIALIZE_SCALAR(txPacketOffset);
1354 SERIALIZE_SCALAR(txPacketBytes);
1355 }
1356
1357 /*
1358 * If there's a pending transmit, store the time so we can
1359 * reschedule it later
1360 */
1361 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
1362 SERIALIZE_SCALAR(transmitTick);
1363 }
1364
1365 void
1366 Device::unserialize(CheckpointIn &cp)
1367 {
1368 // Unserialize the PciDevice base class
1369 Base::unserialize(cp);
1370
1371 /*
1372 * Unserialize the device registers that may have been written by the OS.
1373 */
1374 UNSERIALIZE_SCALAR(regs.Config);
1375 UNSERIALIZE_SCALAR(regs.IntrStatus);
1376 UNSERIALIZE_SCALAR(regs.IntrMask);
1377 UNSERIALIZE_SCALAR(regs.RxData);
1378 UNSERIALIZE_SCALAR(regs.TxData);
1379
1380 UNSERIALIZE_SCALAR(rxActive);
1381 UNSERIALIZE_SCALAR(rxBusyCount);
1382 UNSERIALIZE_SCALAR(rxDirtyCount);
1383 UNSERIALIZE_SCALAR(rxMappedCount);
1384
1385 int rxListSize;
1386 UNSERIALIZE_SCALAR(rxListSize);
1387 rxList.clear();
1388 for (int i = 0; i < rxListSize; ++i) {
1389 int value;
1390 paramIn(cp, csprintf("rxList%d", i), value);
1391 rxList.push_back(value);
1392 }
1393
1394 int rxBusySize;
1395 UNSERIALIZE_SCALAR(rxBusySize);
1396 rxBusy.clear();
1397 for (int i = 0; i < rxBusySize; ++i) {
1398 int value;
1399 paramIn(cp, csprintf("rxBusy%d", i), value);
1400 rxBusy.push_back(value);
1401 }
1402
1403 int txListSize;
1404 UNSERIALIZE_SCALAR(txListSize);
1405 txList.clear();
1406 for (int i = 0; i < txListSize; ++i) {
1407 int value;
1408 paramIn(cp, csprintf("txList%d", i), value);
1409 txList.push_back(value);
1410 }
1411
1412 /*
1413 * Unserialize rx state machine
1414 */
1415 int rxState;
1416 UNSERIALIZE_SCALAR(rxState);
1417 UNSERIALIZE_SCALAR(rxEmpty);
1418 UNSERIALIZE_SCALAR(rxLow);
1419 this->rxState = (RxState) rxState;
1420 rxFifo.unserialize("rxFifo", cp);
1421
1422 int rxFifoPtr;
1423 UNSERIALIZE_SCALAR(rxFifoPtr);
1424 if (rxFifoPtr >= 0) {
1425 this->rxFifoPtr = rxFifo.begin();
1426 for (int i = 0; i < rxFifoPtr; ++i)
1427 ++this->rxFifoPtr;
1428 } else {
1429 this->rxFifoPtr = rxFifo.end();
1430 }
1431
1432 /*
1433 * Unserialize tx state machine
1434 */
1435 int txState;
1436 UNSERIALIZE_SCALAR(txState);
1437 UNSERIALIZE_SCALAR(txFull);
1438 this->txState = (TxState) txState;
1439 txFifo.unserialize("txFifo", cp);
1440 bool txPacketExists;
1441 UNSERIALIZE_SCALAR(txPacketExists);
1442 txPacket = 0;
1443 if (txPacketExists) {
1444 txPacket = make_shared<EthPacketData>(16384);
1445 txPacket->unserialize("txPacket", cp);
1446 UNSERIALIZE_SCALAR(txPacketOffset);
1447 UNSERIALIZE_SCALAR(txPacketBytes);
1448 }
1449
1450 /*
1451 * unserialize the virtual nic registers/state
1452 *
1453 * this must be done after the unserialization of the rxFifo
1454 * because the packet iterators depend on the fifo being populated
1455 */
1456 int virtualRegsSize;
1457 UNSERIALIZE_SCALAR(virtualRegsSize);
1458 virtualRegs.clear();
1459 virtualRegs.resize(virtualRegsSize);
1460 for (int i = 0; i < virtualRegsSize; ++i) {
1461 VirtualReg *vnic = &virtualRegs[i];
1462 std::string reg = csprintf("vnic%d", i);
1463
1464 paramIn(cp, reg + ".RxData", vnic->RxData);
1465 paramIn(cp, reg + ".RxDone", vnic->RxDone);
1466 paramIn(cp, reg + ".TxData", vnic->TxData);
1467 paramIn(cp, reg + ".TxDone", vnic->TxDone);
1468
1469 vnic->rxUnique = rxUnique++;
1470 vnic->txUnique = txUnique++;
1471
1472 bool rxPacketExists;
1473 paramIn(cp, reg + ".rxPacketExists", rxPacketExists);
1474 if (rxPacketExists) {
1475 int rxPacket;
1476 paramIn(cp, reg + ".rxPacket", rxPacket);
1477 vnic->rxIndex = rxFifo.begin();
1478 while (rxPacket--)
1479 ++vnic->rxIndex;
1480
1481 paramIn(cp, reg + ".rxPacketOffset",
1482 vnic->rxPacketOffset);
1483 paramIn(cp, reg + ".rxPacketBytes", vnic->rxPacketBytes);
1484 } else {
1485 vnic->rxIndex = rxFifo.end();
1486 }
1487 paramIn(cp, reg + ".rxDoneData", vnic->rxDoneData);
1488 }
1489
1490 /*
1491 * If there's a pending transmit, reschedule it now
1492 */
1493 Tick transmitTick;
1494 UNSERIALIZE_SCALAR(transmitTick);
1495 if (transmitTick)
1496 schedule(txEvent, curTick() + transmitTick);
1497
1498 pioPort.sendRangeChange();
1499
1500 }
1501
1502 } // namespace Sinic
1503
1504 Sinic::Device *
1505 SinicParams::create()
1506 {
1507 return new Sinic::Device(this);
1508 }