mem: Fix guest corruption when caches handle uncacheable accesses
[gem5.git] / src / dev / sinic.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 */
30
31 #include <deque>
32 #include <limits>
33 #include <string>
34
35 #include "arch/vtophys.hh"
36 #include "base/compiler.hh"
37 #include "base/debug.hh"
38 #include "base/inet.hh"
39 #include "base/types.hh"
40 #include "config/the_isa.hh"
41 #include "cpu/intr_control.hh"
42 #include "cpu/thread_context.hh"
43 #include "debug/EthernetAll.hh"
44 #include "dev/etherlink.hh"
45 #include "dev/sinic.hh"
46 #include "mem/packet.hh"
47 #include "mem/packet_access.hh"
48 #include "sim/eventq.hh"
49 #include "sim/stats.hh"
50
51 using namespace std;
52 using namespace Net;
53 using namespace TheISA;
54
55 namespace Sinic {
56
57 const char *RxStateStrings[] =
58 {
59 "rxIdle",
60 "rxFifoBlock",
61 "rxBeginCopy",
62 "rxCopy",
63 "rxCopyDone"
64 };
65
66 const char *TxStateStrings[] =
67 {
68 "txIdle",
69 "txFifoBlock",
70 "txBeginCopy",
71 "txCopy",
72 "txCopyDone"
73 };
74
75
76 ///////////////////////////////////////////////////////////////////////
77 //
78 // Sinic PCI Device
79 //
80 Base::Base(const Params *p)
81 : EtherDevBase(p), rxEnable(false), txEnable(false),
82 intrDelay(p->intr_delay), intrTick(0), cpuIntrEnable(false),
83 cpuPendingIntr(false), intrEvent(0), interface(NULL)
84 {
85 }
86
87 Device::Device(const Params *p)
88 : Base(p), rxUnique(0), txUnique(0),
89 virtualRegs(p->virtual_count < 1 ? 1 : p->virtual_count),
90 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size),
91 rxKickTick(0), txKickTick(0),
92 txEvent(this), rxDmaEvent(this), txDmaEvent(this),
93 dmaReadDelay(p->dma_read_delay), dmaReadFactor(p->dma_read_factor),
94 dmaWriteDelay(p->dma_write_delay), dmaWriteFactor(p->dma_write_factor)
95 {
96 interface = new Interface(name() + ".int0", this);
97 reset();
98
99 }
100
101 Device::~Device()
102 {}
103
104 void
105 Device::regStats()
106 {
107 Base::regStats();
108
109 _maxVnicDistance = 0;
110
111 maxVnicDistance
112 .name(name() + ".maxVnicDistance")
113 .desc("maximum vnic distance")
114 ;
115
116 totalVnicDistance
117 .name(name() + ".totalVnicDistance")
118 .desc("total vnic distance")
119 ;
120 numVnicDistance
121 .name(name() + ".numVnicDistance")
122 .desc("number of vnic distance measurements")
123 ;
124
125 avgVnicDistance
126 .name(name() + ".avgVnicDistance")
127 .desc("average vnic distance")
128 ;
129
130 avgVnicDistance = totalVnicDistance / numVnicDistance;
131 }
132
133 void
134 Device::resetStats()
135 {
136 Base::resetStats();
137
138 _maxVnicDistance = 0;
139 }
140
141 EtherInt*
142 Device::getEthPort(const std::string &if_name, int idx)
143 {
144 if (if_name == "interface") {
145 if (interface->getPeer())
146 panic("interface already connected to\n");
147
148 return interface;
149 }
150 return NULL;
151 }
152
153
154 void
155 Device::prepareIO(int cpu, int index)
156 {
157 int size = virtualRegs.size();
158 if (index > size)
159 panic("Trying to access a vnic that doesn't exist %d > %d\n",
160 index, size);
161 }
162
163 //add stats for head of line blocking
164 //add stats for average fifo length
165 //add stats for average number of vnics busy
166
167 void
168 Device::prepareRead(int cpu, int index)
169 {
170 using namespace Regs;
171 prepareIO(cpu, index);
172
173 VirtualReg &vnic = virtualRegs[index];
174
175 // update rx registers
176 uint64_t rxdone = vnic.RxDone;
177 rxdone = set_RxDone_Packets(rxdone, rxFifo.countPacketsAfter(rxFifoPtr));
178 rxdone = set_RxDone_Empty(rxdone, rxFifo.empty());
179 rxdone = set_RxDone_High(rxdone, rxFifo.size() > regs.RxFifoHigh);
180 rxdone = set_RxDone_NotHigh(rxdone, rxLow);
181 regs.RxData = vnic.RxData;
182 regs.RxDone = rxdone;
183 regs.RxWait = rxdone;
184
185 // update tx regsiters
186 uint64_t txdone = vnic.TxDone;
187 txdone = set_TxDone_Packets(txdone, txFifo.packets());
188 txdone = set_TxDone_Full(txdone, txFifo.avail() < regs.TxMaxCopy);
189 txdone = set_TxDone_Low(txdone, txFifo.size() < regs.TxFifoLow);
190 regs.TxData = vnic.TxData;
191 regs.TxDone = txdone;
192 regs.TxWait = txdone;
193
194 int head = 0xffff;
195
196 if (!rxFifo.empty()) {
197 int vnic = rxFifo.begin()->priv;
198 if (vnic != -1 && virtualRegs[vnic].rxPacketOffset > 0)
199 head = vnic;
200 }
201
202 regs.RxStatus = set_RxStatus_Head(regs.RxStatus, head);
203 regs.RxStatus = set_RxStatus_Busy(regs.RxStatus, rxBusyCount);
204 regs.RxStatus = set_RxStatus_Mapped(regs.RxStatus, rxMappedCount);
205 regs.RxStatus = set_RxStatus_Dirty(regs.RxStatus, rxDirtyCount);
206 }
207
208 void
209 Device::prepareWrite(int cpu, int index)
210 {
211 prepareIO(cpu, index);
212 }
213
214 /**
215 * I/O read of device register
216 */
217 Tick
218 Device::read(PacketPtr pkt)
219 {
220 assert(config.command & PCI_CMD_MSE);
221 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]);
222
223 int cpu = pkt->req->contextId();
224 Addr daddr = pkt->getAddr() - BARAddrs[0];
225 Addr index = daddr >> Regs::VirtualShift;
226 Addr raddr = daddr & Regs::VirtualMask;
227
228 pkt->allocate();
229
230 if (!regValid(raddr))
231 panic("invalid register: cpu=%d vnic=%d da=%#x pa=%#x size=%d",
232 cpu, index, daddr, pkt->getAddr(), pkt->getSize());
233
234 const Regs::Info &info = regInfo(raddr);
235 if (!info.read)
236 panic("read %s (write only): "
237 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
238 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
239
240 panic("read %s (invalid size): "
241 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
242 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
243
244 prepareRead(cpu, index);
245
246 uint64_t value M5_VAR_USED = 0;
247 if (pkt->getSize() == 4) {
248 uint32_t reg = regData32(raddr);
249 pkt->set(reg);
250 value = reg;
251 }
252
253 if (pkt->getSize() == 8) {
254 uint64_t reg = regData64(raddr);
255 pkt->set(reg);
256 value = reg;
257 }
258
259 DPRINTF(EthernetPIO,
260 "read %s: cpu=%d vnic=%d da=%#x pa=%#x size=%d val=%#x\n",
261 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize(), value);
262
263 // reading the interrupt status register has the side effect of
264 // clearing it
265 if (raddr == Regs::IntrStatus)
266 devIntrClear();
267
268 return pioDelay;
269 }
270
271 /**
272 * IPR read of device register
273
274 Fault
275 Device::iprRead(Addr daddr, int cpu, uint64_t &result)
276 {
277 if (!regValid(daddr))
278 panic("invalid address: da=%#x", daddr);
279
280 const Regs::Info &info = regInfo(daddr);
281 if (!info.read)
282 panic("reading %s (write only): cpu=%d da=%#x", info.name, cpu, daddr);
283
284 DPRINTF(EthernetPIO, "IPR read %s: cpu=%d da=%#x\n",
285 info.name, cpu, daddr);
286
287 prepareRead(cpu, 0);
288
289 if (info.size == 4)
290 result = regData32(daddr);
291
292 if (info.size == 8)
293 result = regData64(daddr);
294
295 DPRINTF(EthernetPIO, "IPR read %s: cpu=%s da=%#x val=%#x\n",
296 info.name, cpu, result);
297
298 return NoFault;
299 }
300 */
301 /**
302 * I/O write of device register
303 */
304 Tick
305 Device::write(PacketPtr pkt)
306 {
307 assert(config.command & PCI_CMD_MSE);
308 assert(pkt->getAddr() >= BARAddrs[0] && pkt->getSize() < BARSize[0]);
309
310 int cpu = pkt->req->contextId();
311 Addr daddr = pkt->getAddr() - BARAddrs[0];
312 Addr index = daddr >> Regs::VirtualShift;
313 Addr raddr = daddr & Regs::VirtualMask;
314
315 if (!regValid(raddr))
316 panic("invalid register: cpu=%d, da=%#x pa=%#x size=%d",
317 cpu, daddr, pkt->getAddr(), pkt->getSize());
318
319 const Regs::Info &info = regInfo(raddr);
320 if (!info.write)
321 panic("write %s (read only): "
322 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
323 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
324
325 if (pkt->getSize() != info.size)
326 panic("write %s (invalid size): "
327 "cpu=%d vnic=%d da=%#x pa=%#x size=%d",
328 info.name, cpu, index, daddr, pkt->getAddr(), pkt->getSize());
329
330 VirtualReg &vnic = virtualRegs[index];
331
332 DPRINTF(EthernetPIO,
333 "write %s vnic %d: cpu=%d val=%#x da=%#x pa=%#x size=%d\n",
334 info.name, index, cpu, info.size == 4 ? pkt->get<uint32_t>() :
335 pkt->get<uint64_t>(), daddr, pkt->getAddr(), pkt->getSize());
336
337 prepareWrite(cpu, index);
338
339 switch (raddr) {
340 case Regs::Config:
341 changeConfig(pkt->get<uint32_t>());
342 break;
343
344 case Regs::Command:
345 command(pkt->get<uint32_t>());
346 break;
347
348 case Regs::IntrStatus:
349 devIntrClear(regs.IntrStatus & pkt->get<uint32_t>());
350 break;
351
352 case Regs::IntrMask:
353 devIntrChangeMask(pkt->get<uint32_t>());
354 break;
355
356 case Regs::RxData:
357 if (Regs::get_RxDone_Busy(vnic.RxDone))
358 panic("receive machine busy with another request! rxState=%s",
359 RxStateStrings[rxState]);
360
361 vnic.rxUnique = rxUnique++;
362 vnic.RxDone = Regs::RxDone_Busy;
363 vnic.RxData = pkt->get<uint64_t>();
364 rxBusyCount++;
365
366 if (Regs::get_RxData_Vaddr(pkt->get<uint64_t>())) {
367 panic("vtophys not implemented in newmem");
368 #ifdef SINIC_VTOPHYS
369 Addr vaddr = Regs::get_RxData_Addr(reg64);
370 Addr paddr = vtophys(req->xc, vaddr);
371 DPRINTF(EthernetPIO, "write RxData vnic %d (rxunique %d): "
372 "vaddr=%#x, paddr=%#x\n",
373 index, vnic.rxUnique, vaddr, paddr);
374
375 vnic.RxData = Regs::set_RxData_Addr(vnic.RxData, paddr);
376 #endif
377 } else {
378 DPRINTF(EthernetPIO, "write RxData vnic %d (rxunique %d)\n",
379 index, vnic.rxUnique);
380 }
381
382 if (vnic.rxIndex == rxFifo.end()) {
383 DPRINTF(EthernetPIO, "request new packet...appending to rxList\n");
384 rxList.push_back(index);
385 } else {
386 DPRINTF(EthernetPIO, "packet exists...appending to rxBusy\n");
387 rxBusy.push_back(index);
388 }
389
390 if (rxEnable && (rxState == rxIdle || rxState == rxFifoBlock)) {
391 rxState = rxFifoBlock;
392 rxKick();
393 }
394 break;
395
396 case Regs::TxData:
397 if (Regs::get_TxDone_Busy(vnic.TxDone))
398 panic("transmit machine busy with another request! txState=%s",
399 TxStateStrings[txState]);
400
401 vnic.txUnique = txUnique++;
402 vnic.TxDone = Regs::TxDone_Busy;
403
404 if (Regs::get_TxData_Vaddr(pkt->get<uint64_t>())) {
405 panic("vtophys won't work here in newmem.\n");
406 #ifdef SINIC_VTOPHYS
407 Addr vaddr = Regs::get_TxData_Addr(reg64);
408 Addr paddr = vtophys(req->xc, vaddr);
409 DPRINTF(EthernetPIO, "write TxData vnic %d (txunique %d): "
410 "vaddr=%#x, paddr=%#x\n",
411 index, vnic.txUnique, vaddr, paddr);
412
413 vnic.TxData = Regs::set_TxData_Addr(vnic.TxData, paddr);
414 #endif
415 } else {
416 DPRINTF(EthernetPIO, "write TxData vnic %d (txunique %d)\n",
417 index, vnic.txUnique);
418 }
419
420 if (txList.empty() || txList.front() != index)
421 txList.push_back(index);
422 if (txEnable && txState == txIdle && txList.front() == index) {
423 txState = txFifoBlock;
424 txKick();
425 }
426 break;
427 }
428
429 return pioDelay;
430 }
431
432 void
433 Device::devIntrPost(uint32_t interrupts)
434 {
435 if ((interrupts & Regs::Intr_Res))
436 panic("Cannot set a reserved interrupt");
437
438 regs.IntrStatus |= interrupts;
439
440 DPRINTF(EthernetIntr,
441 "interrupt written to intStatus: intr=%#x status=%#x mask=%#x\n",
442 interrupts, regs.IntrStatus, regs.IntrMask);
443
444 interrupts = regs.IntrStatus & regs.IntrMask;
445
446 // Intr_RxHigh is special, we only signal it if we've emptied the fifo
447 // and then filled it above the high watermark
448 if (rxEmpty)
449 rxEmpty = false;
450 else
451 interrupts &= ~Regs::Intr_RxHigh;
452
453 // Intr_TxLow is special, we only signal it if we've filled up the fifo
454 // and then dropped below the low watermark
455 if (txFull)
456 txFull = false;
457 else
458 interrupts &= ~Regs::Intr_TxLow;
459
460 if (interrupts) {
461 Tick when = curTick();
462 if ((interrupts & Regs::Intr_NoDelay) == 0)
463 when += intrDelay;
464 cpuIntrPost(when);
465 }
466 }
467
468 void
469 Device::devIntrClear(uint32_t interrupts)
470 {
471 if ((interrupts & Regs::Intr_Res))
472 panic("Cannot clear a reserved interrupt");
473
474 regs.IntrStatus &= ~interrupts;
475
476 DPRINTF(EthernetIntr,
477 "interrupt cleared from intStatus: intr=%x status=%x mask=%x\n",
478 interrupts, regs.IntrStatus, regs.IntrMask);
479
480 if (!(regs.IntrStatus & regs.IntrMask))
481 cpuIntrClear();
482 }
483
484 void
485 Device::devIntrChangeMask(uint32_t newmask)
486 {
487 if (regs.IntrMask == newmask)
488 return;
489
490 regs.IntrMask = newmask;
491
492 DPRINTF(EthernetIntr,
493 "interrupt mask changed: intStatus=%x intMask=%x masked=%x\n",
494 regs.IntrStatus, regs.IntrMask, regs.IntrStatus & regs.IntrMask);
495
496 if (regs.IntrStatus & regs.IntrMask)
497 cpuIntrPost(curTick());
498 else
499 cpuIntrClear();
500 }
501
502 void
503 Base::cpuIntrPost(Tick when)
504 {
505 // If the interrupt you want to post is later than an interrupt
506 // already scheduled, just let it post in the coming one and don't
507 // schedule another.
508 // HOWEVER, must be sure that the scheduled intrTick is in the
509 // future (this was formerly the source of a bug)
510 /**
511 * @todo this warning should be removed and the intrTick code should
512 * be fixed.
513 */
514 assert(when >= curTick());
515 assert(intrTick >= curTick() || intrTick == 0);
516 if (!cpuIntrEnable) {
517 DPRINTF(EthernetIntr, "interrupts not enabled.\n",
518 intrTick);
519 return;
520 }
521
522 if (when > intrTick && intrTick != 0) {
523 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
524 intrTick);
525 return;
526 }
527
528 intrTick = when;
529 if (intrTick < curTick()) {
530 Debug::breakpoint();
531 intrTick = curTick();
532 }
533
534 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
535 intrTick);
536
537 if (intrEvent)
538 intrEvent->squash();
539 intrEvent = new IntrEvent(this, true);
540 schedule(intrEvent, intrTick);
541 }
542
543 void
544 Base::cpuInterrupt()
545 {
546 assert(intrTick == curTick());
547
548 // Whether or not there's a pending interrupt, we don't care about
549 // it anymore
550 intrEvent = 0;
551 intrTick = 0;
552
553 // Don't send an interrupt if there's already one
554 if (cpuPendingIntr) {
555 DPRINTF(EthernetIntr,
556 "would send an interrupt now, but there's already pending\n");
557 } else {
558 // Send interrupt
559 cpuPendingIntr = true;
560
561 DPRINTF(EthernetIntr, "posting interrupt\n");
562 intrPost();
563 }
564 }
565
566 void
567 Base::cpuIntrClear()
568 {
569 if (!cpuPendingIntr)
570 return;
571
572 if (intrEvent) {
573 intrEvent->squash();
574 intrEvent = 0;
575 }
576
577 intrTick = 0;
578
579 cpuPendingIntr = false;
580
581 DPRINTF(EthernetIntr, "clearing cchip interrupt\n");
582 intrClear();
583 }
584
585 bool
586 Base::cpuIntrPending() const
587 { return cpuPendingIntr; }
588
589 void
590 Device::changeConfig(uint32_t newconf)
591 {
592 uint32_t changed = regs.Config ^ newconf;
593 if (!changed)
594 return;
595
596 regs.Config = newconf;
597
598 if ((changed & Regs::Config_IntEn)) {
599 cpuIntrEnable = regs.Config & Regs::Config_IntEn;
600 if (cpuIntrEnable) {
601 if (regs.IntrStatus & regs.IntrMask)
602 cpuIntrPost(curTick());
603 } else {
604 cpuIntrClear();
605 }
606 }
607
608 if ((changed & Regs::Config_TxEn)) {
609 txEnable = regs.Config & Regs::Config_TxEn;
610 if (txEnable)
611 txKick();
612 }
613
614 if ((changed & Regs::Config_RxEn)) {
615 rxEnable = regs.Config & Regs::Config_RxEn;
616 if (rxEnable)
617 rxKick();
618 }
619 }
620
621 void
622 Device::command(uint32_t command)
623 {
624 if (command & Regs::Command_Intr)
625 devIntrPost(Regs::Intr_Soft);
626
627 if (command & Regs::Command_Reset)
628 reset();
629 }
630
631 void
632 Device::reset()
633 {
634 using namespace Regs;
635
636 memset(&regs, 0, sizeof(regs));
637
638 regs.Config = 0;
639 if (params()->rx_thread)
640 regs.Config |= Config_RxThread;
641 if (params()->tx_thread)
642 regs.Config |= Config_TxThread;
643 if (params()->rss)
644 regs.Config |= Config_RSS;
645 if (params()->zero_copy)
646 regs.Config |= Config_ZeroCopy;
647 if (params()->delay_copy)
648 regs.Config |= Config_DelayCopy;
649 if (params()->virtual_addr)
650 regs.Config |= Config_Vaddr;
651
652 if (params()->delay_copy && params()->zero_copy)
653 panic("Can't delay copy and zero copy");
654
655 regs.IntrMask = Intr_Soft | Intr_RxHigh | Intr_RxPacket | Intr_TxLow;
656 regs.RxMaxCopy = params()->rx_max_copy;
657 regs.TxMaxCopy = params()->tx_max_copy;
658 regs.ZeroCopySize = params()->zero_copy_size;
659 regs.ZeroCopyMark = params()->zero_copy_threshold;
660 regs.VirtualCount = params()->virtual_count;
661 regs.RxMaxIntr = params()->rx_max_intr;
662 regs.RxFifoSize = params()->rx_fifo_size;
663 regs.TxFifoSize = params()->tx_fifo_size;
664 regs.RxFifoLow = params()->rx_fifo_low_mark;
665 regs.TxFifoLow = params()->tx_fifo_threshold;
666 regs.RxFifoHigh = params()->rx_fifo_threshold;
667 regs.TxFifoHigh = params()->tx_fifo_high_mark;
668 regs.HwAddr = params()->hardware_address;
669
670 if (regs.RxMaxCopy < regs.ZeroCopyMark)
671 panic("Must be able to copy at least as many bytes as the threshold");
672
673 if (regs.ZeroCopySize >= regs.ZeroCopyMark)
674 panic("The number of bytes to copy must be less than the threshold");
675
676 rxList.clear();
677 rxBusy.clear();
678 rxActive = -1;
679 txList.clear();
680 rxBusyCount = 0;
681 rxDirtyCount = 0;
682 rxMappedCount = 0;
683
684 rxState = rxIdle;
685 txState = txIdle;
686
687 rxFifo.clear();
688 rxFifoPtr = rxFifo.end();
689 txFifo.clear();
690 rxEmpty = false;
691 rxLow = true;
692 txFull = false;
693
694 int size = virtualRegs.size();
695 virtualRegs.clear();
696 virtualRegs.resize(size);
697 for (int i = 0; i < size; ++i)
698 virtualRegs[i].rxIndex = rxFifo.end();
699 }
700
701 void
702 Device::rxDmaDone()
703 {
704 assert(rxState == rxCopy);
705 rxState = rxCopyDone;
706 DPRINTF(EthernetDMA, "end rx dma write paddr=%#x len=%d\n",
707 rxDmaAddr, rxDmaLen);
708 DDUMP(EthernetData, rxDmaData, rxDmaLen);
709
710 // If the transmit state machine has a pending DMA, let it go first
711 if (txState == txBeginCopy)
712 txKick();
713
714 rxKick();
715 }
716
717 void
718 Device::rxKick()
719 {
720 VirtualReg *vnic = NULL;
721
722 DPRINTF(EthernetSM, "rxKick: rxState=%s (rxFifo.size=%d)\n",
723 RxStateStrings[rxState], rxFifo.size());
724
725 if (rxKickTick > curTick()) {
726 DPRINTF(EthernetSM, "rxKick: exiting, can't run till %d\n",
727 rxKickTick);
728 return;
729 }
730
731 next:
732 rxFifo.check();
733 if (rxState == rxIdle)
734 goto exit;
735
736 if (rxActive == -1) {
737 if (rxState != rxFifoBlock)
738 panic("no active vnic while in state %s", RxStateStrings[rxState]);
739
740 DPRINTF(EthernetSM, "processing rxState=%s\n",
741 RxStateStrings[rxState]);
742 } else {
743 vnic = &virtualRegs[rxActive];
744 DPRINTF(EthernetSM,
745 "processing rxState=%s for vnic %d (rxunique %d)\n",
746 RxStateStrings[rxState], rxActive, vnic->rxUnique);
747 }
748
749 switch (rxState) {
750 case rxFifoBlock:
751 if (DTRACE(EthernetSM)) {
752 PacketFifo::iterator end = rxFifo.end();
753 int size = virtualRegs.size();
754 for (int i = 0; i < size; ++i) {
755 VirtualReg *vn = &virtualRegs[i];
756 bool busy = Regs::get_RxDone_Busy(vn->RxDone);
757 if (vn->rxIndex != end) {
758 #ifndef NDEBUG
759 bool dirty = vn->rxPacketOffset > 0;
760 const char *status;
761
762 if (busy && dirty)
763 status = "busy,dirty";
764 else if (busy)
765 status = "busy";
766 else if (dirty)
767 status = "dirty";
768 else
769 status = "mapped";
770
771 DPRINTF(EthernetSM,
772 "vnic %d %s (rxunique %d), packet %d, slack %d\n",
773 i, status, vn->rxUnique,
774 rxFifo.countPacketsBefore(vn->rxIndex),
775 vn->rxIndex->slack);
776 #endif
777 } else if (busy) {
778 DPRINTF(EthernetSM, "vnic %d unmapped (rxunique %d)\n",
779 i, vn->rxUnique);
780 }
781 }
782 }
783
784 if (!rxBusy.empty()) {
785 rxActive = rxBusy.front();
786 rxBusy.pop_front();
787 vnic = &virtualRegs[rxActive];
788
789 if (vnic->rxIndex == rxFifo.end())
790 panic("continuing vnic without packet\n");
791
792 DPRINTF(EthernetSM,
793 "continue processing for vnic %d (rxunique %d)\n",
794 rxActive, vnic->rxUnique);
795
796 rxState = rxBeginCopy;
797
798 int vnic_distance = rxFifo.countPacketsBefore(vnic->rxIndex);
799 totalVnicDistance += vnic_distance;
800 numVnicDistance += 1;
801 if (vnic_distance > _maxVnicDistance) {
802 maxVnicDistance = vnic_distance;
803 _maxVnicDistance = vnic_distance;
804 }
805
806 break;
807 }
808
809 if (rxFifoPtr == rxFifo.end()) {
810 DPRINTF(EthernetSM, "receive waiting for data. Nothing to do.\n");
811 goto exit;
812 }
813
814 if (rxList.empty())
815 panic("Not idle, but nothing to do!");
816
817 assert(!rxFifo.empty());
818
819 rxActive = rxList.front();
820 rxList.pop_front();
821 vnic = &virtualRegs[rxActive];
822
823 DPRINTF(EthernetSM,
824 "processing new packet for vnic %d (rxunique %d)\n",
825 rxActive, vnic->rxUnique);
826
827 // Grab a new packet from the fifo.
828 vnic->rxIndex = rxFifoPtr++;
829 vnic->rxIndex->priv = rxActive;
830 vnic->rxPacketOffset = 0;
831 vnic->rxPacketBytes = vnic->rxIndex->packet->length;
832 assert(vnic->rxPacketBytes);
833 rxMappedCount++;
834
835 vnic->rxDoneData = 0;
836 /* scope for variables */ {
837 IpPtr ip(vnic->rxIndex->packet);
838 if (ip) {
839 DPRINTF(Ethernet, "ID is %d\n", ip->id());
840 vnic->rxDoneData |= Regs::RxDone_IpPacket;
841 rxIpChecksums++;
842 if (cksum(ip) != 0) {
843 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
844 vnic->rxDoneData |= Regs::RxDone_IpError;
845 }
846 TcpPtr tcp(ip);
847 UdpPtr udp(ip);
848 if (tcp) {
849 DPRINTF(Ethernet,
850 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
851 tcp->sport(), tcp->dport(), tcp->seq(),
852 tcp->ack());
853 vnic->rxDoneData |= Regs::RxDone_TcpPacket;
854 rxTcpChecksums++;
855 if (cksum(tcp) != 0) {
856 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
857 vnic->rxDoneData |= Regs::RxDone_TcpError;
858 }
859 } else if (udp) {
860 vnic->rxDoneData |= Regs::RxDone_UdpPacket;
861 rxUdpChecksums++;
862 if (cksum(udp) != 0) {
863 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
864 vnic->rxDoneData |= Regs::RxDone_UdpError;
865 }
866 }
867 }
868 }
869 rxState = rxBeginCopy;
870 break;
871
872 case rxBeginCopy:
873 if (dmaPending() || getDrainState() != Drainable::Running)
874 goto exit;
875
876 rxDmaAddr = params()->platform->pciToDma(
877 Regs::get_RxData_Addr(vnic->RxData));
878 rxDmaLen = min<unsigned>(Regs::get_RxData_Len(vnic->RxData),
879 vnic->rxPacketBytes);
880
881 /*
882 * if we're doing zero/delay copy and we're below the fifo
883 * threshold, see if we should try to do the zero/defer copy
884 */
885 if ((Regs::get_Config_ZeroCopy(regs.Config) ||
886 Regs::get_Config_DelayCopy(regs.Config)) &&
887 !Regs::get_RxData_NoDelay(vnic->RxData) && rxLow) {
888 if (rxDmaLen > regs.ZeroCopyMark)
889 rxDmaLen = regs.ZeroCopySize;
890 }
891 rxDmaData = vnic->rxIndex->packet->data + vnic->rxPacketOffset;
892 rxState = rxCopy;
893 if (rxDmaAddr == 1LL) {
894 rxState = rxCopyDone;
895 break;
896 }
897
898 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaEvent, rxDmaData);
899 break;
900
901 case rxCopy:
902 DPRINTF(EthernetSM, "receive machine still copying\n");
903 goto exit;
904
905 case rxCopyDone:
906 vnic->RxDone = vnic->rxDoneData;
907 vnic->RxDone |= Regs::RxDone_Complete;
908 rxBusyCount--;
909
910 if (vnic->rxPacketBytes == rxDmaLen) {
911 if (vnic->rxPacketOffset)
912 rxDirtyCount--;
913
914 // Packet is complete. Indicate how many bytes were copied
915 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone, rxDmaLen);
916
917 DPRINTF(EthernetSM,
918 "rxKick: packet complete on vnic %d (rxunique %d)\n",
919 rxActive, vnic->rxUnique);
920 rxFifo.remove(vnic->rxIndex);
921 vnic->rxIndex = rxFifo.end();
922 rxMappedCount--;
923 } else {
924 if (!vnic->rxPacketOffset)
925 rxDirtyCount++;
926
927 vnic->rxPacketBytes -= rxDmaLen;
928 vnic->rxPacketOffset += rxDmaLen;
929 vnic->RxDone |= Regs::RxDone_More;
930 vnic->RxDone = Regs::set_RxDone_CopyLen(vnic->RxDone,
931 vnic->rxPacketBytes);
932 DPRINTF(EthernetSM,
933 "rxKick: packet not complete on vnic %d (rxunique %d): "
934 "%d bytes left\n",
935 rxActive, vnic->rxUnique, vnic->rxPacketBytes);
936 }
937
938 rxActive = -1;
939 rxState = rxBusy.empty() && rxList.empty() ? rxIdle : rxFifoBlock;
940
941 if (rxFifo.empty()) {
942 devIntrPost(Regs::Intr_RxEmpty);
943 rxEmpty = true;
944 }
945
946 if (rxFifo.size() < regs.RxFifoLow)
947 rxLow = true;
948
949 if (rxFifo.size() > regs.RxFifoHigh)
950 rxLow = false;
951
952 devIntrPost(Regs::Intr_RxDMA);
953 break;
954
955 default:
956 panic("Invalid rxState!");
957 }
958
959 DPRINTF(EthernetSM, "entering next rxState=%s\n",
960 RxStateStrings[rxState]);
961
962 goto next;
963
964 exit:
965 /**
966 * @todo do we want to schedule a future kick?
967 */
968 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
969 RxStateStrings[rxState]);
970 }
971
972 void
973 Device::txDmaDone()
974 {
975 assert(txState == txCopy);
976 txState = txCopyDone;
977 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
978 txDmaAddr, txDmaLen);
979 DDUMP(EthernetData, txDmaData, txDmaLen);
980
981 // If the receive state machine has a pending DMA, let it go first
982 if (rxState == rxBeginCopy)
983 rxKick();
984
985 txKick();
986 }
987
988 void
989 Device::transmit()
990 {
991 if (txFifo.empty()) {
992 DPRINTF(Ethernet, "nothing to transmit\n");
993 return;
994 }
995
996 uint32_t interrupts;
997 EthPacketPtr packet = txFifo.front();
998 if (!interface->sendPacket(packet)) {
999 DPRINTF(Ethernet, "Packet Transmit: failed txFifo available %d\n",
1000 txFifo.avail());
1001 return;
1002 }
1003
1004 txFifo.pop();
1005 #if TRACING_ON
1006 if (DTRACE(Ethernet)) {
1007 IpPtr ip(packet);
1008 if (ip) {
1009 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1010 TcpPtr tcp(ip);
1011 if (tcp) {
1012 DPRINTF(Ethernet,
1013 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1014 tcp->sport(), tcp->dport(), tcp->seq(),
1015 tcp->ack());
1016 }
1017 }
1018 }
1019 #endif
1020
1021 DDUMP(EthernetData, packet->data, packet->length);
1022 txBytes += packet->length;
1023 txPackets++;
1024
1025 DPRINTF(Ethernet, "Packet Transmit: successful txFifo Available %d\n",
1026 txFifo.avail());
1027
1028 interrupts = Regs::Intr_TxPacket;
1029 if (txFifo.size() < regs.TxFifoLow)
1030 interrupts |= Regs::Intr_TxLow;
1031 devIntrPost(interrupts);
1032 }
1033
1034 void
1035 Device::txKick()
1036 {
1037 VirtualReg *vnic;
1038 DPRINTF(EthernetSM, "txKick: txState=%s (txFifo.size=%d)\n",
1039 TxStateStrings[txState], txFifo.size());
1040
1041 if (txKickTick > curTick()) {
1042 DPRINTF(EthernetSM, "txKick: exiting, can't run till %d\n",
1043 txKickTick);
1044 return;
1045 }
1046
1047 next:
1048 if (txState == txIdle)
1049 goto exit;
1050
1051 assert(!txList.empty());
1052 vnic = &virtualRegs[txList.front()];
1053
1054 switch (txState) {
1055 case txFifoBlock:
1056 assert(Regs::get_TxDone_Busy(vnic->TxDone));
1057 if (!txPacket) {
1058 // Grab a new packet from the fifo.
1059 txPacket = new EthPacketData(16384);
1060 txPacketOffset = 0;
1061 }
1062
1063 if (txFifo.avail() - txPacket->length <
1064 Regs::get_TxData_Len(vnic->TxData)) {
1065 DPRINTF(EthernetSM, "transmit fifo full. Nothing to do.\n");
1066 goto exit;
1067 }
1068
1069 txState = txBeginCopy;
1070 break;
1071
1072 case txBeginCopy:
1073 if (dmaPending() || getDrainState() != Drainable::Running)
1074 goto exit;
1075
1076 txDmaAddr = params()->platform->pciToDma(
1077 Regs::get_TxData_Addr(vnic->TxData));
1078 txDmaLen = Regs::get_TxData_Len(vnic->TxData);
1079 txDmaData = txPacket->data + txPacketOffset;
1080 txState = txCopy;
1081
1082 dmaRead(txDmaAddr, txDmaLen, &txDmaEvent, txDmaData);
1083 break;
1084
1085 case txCopy:
1086 DPRINTF(EthernetSM, "transmit machine still copying\n");
1087 goto exit;
1088
1089 case txCopyDone:
1090 vnic->TxDone = txDmaLen | Regs::TxDone_Complete;
1091 txPacket->length += txDmaLen;
1092 if ((vnic->TxData & Regs::TxData_More)) {
1093 txPacketOffset += txDmaLen;
1094 txState = txIdle;
1095 devIntrPost(Regs::Intr_TxDMA);
1096 break;
1097 }
1098
1099 assert(txPacket->length <= txFifo.avail());
1100 if ((vnic->TxData & Regs::TxData_Checksum)) {
1101 IpPtr ip(txPacket);
1102 if (ip) {
1103 TcpPtr tcp(ip);
1104 if (tcp) {
1105 tcp->sum(0);
1106 tcp->sum(cksum(tcp));
1107 txTcpChecksums++;
1108 }
1109
1110 UdpPtr udp(ip);
1111 if (udp) {
1112 udp->sum(0);
1113 udp->sum(cksum(udp));
1114 txUdpChecksums++;
1115 }
1116
1117 ip->sum(0);
1118 ip->sum(cksum(ip));
1119 txIpChecksums++;
1120 }
1121 }
1122
1123 txFifo.push(txPacket);
1124 if (txFifo.avail() < regs.TxMaxCopy) {
1125 devIntrPost(Regs::Intr_TxFull);
1126 txFull = true;
1127 }
1128 txPacket = 0;
1129 transmit();
1130 txList.pop_front();
1131 txState = txList.empty() ? txIdle : txFifoBlock;
1132 devIntrPost(Regs::Intr_TxDMA);
1133 break;
1134
1135 default:
1136 panic("Invalid txState!");
1137 }
1138
1139 DPRINTF(EthernetSM, "entering next txState=%s\n",
1140 TxStateStrings[txState]);
1141
1142 goto next;
1143
1144 exit:
1145 /**
1146 * @todo do we want to schedule a future kick?
1147 */
1148 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1149 TxStateStrings[txState]);
1150 }
1151
1152 void
1153 Device::transferDone()
1154 {
1155 if (txFifo.empty()) {
1156 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1157 return;
1158 }
1159
1160 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
1161
1162 reschedule(txEvent, clockEdge(Cycles(1)), true);
1163 }
1164
1165 bool
1166 Device::rxFilter(const EthPacketPtr &packet)
1167 {
1168 if (!Regs::get_Config_Filter(regs.Config))
1169 return false;
1170
1171 panic("receive filter not implemented\n");
1172 bool drop = true;
1173
1174 #if 0
1175 string type;
1176
1177 EthHdr *eth = packet->eth();
1178 if (eth->unicast()) {
1179 // If we're accepting all unicast addresses
1180 if (acceptUnicast)
1181 drop = false;
1182
1183 // If we make a perfect match
1184 if (acceptPerfect && params->eaddr == eth.dst())
1185 drop = false;
1186
1187 if (acceptArp && eth->type() == ETH_TYPE_ARP)
1188 drop = false;
1189
1190 } else if (eth->broadcast()) {
1191 // if we're accepting broadcasts
1192 if (acceptBroadcast)
1193 drop = false;
1194
1195 } else if (eth->multicast()) {
1196 // if we're accepting all multicasts
1197 if (acceptMulticast)
1198 drop = false;
1199
1200 }
1201
1202 if (drop) {
1203 DPRINTF(Ethernet, "rxFilter drop\n");
1204 DDUMP(EthernetData, packet->data, packet->length);
1205 }
1206 #endif
1207 return drop;
1208 }
1209
1210 bool
1211 Device::recvPacket(EthPacketPtr packet)
1212 {
1213 rxBytes += packet->length;
1214 rxPackets++;
1215
1216 DPRINTF(Ethernet, "Receiving packet from wire, rxFifo Available is %d\n",
1217 rxFifo.avail());
1218
1219 if (!rxEnable) {
1220 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1221 return true;
1222 }
1223
1224 if (rxFilter(packet)) {
1225 DPRINTF(Ethernet, "packet filtered...dropped\n");
1226 return true;
1227 }
1228
1229 if (rxFifo.size() >= regs.RxFifoHigh)
1230 devIntrPost(Regs::Intr_RxHigh);
1231
1232 if (!rxFifo.push(packet)) {
1233 DPRINTF(Ethernet,
1234 "packet will not fit in receive buffer...packet dropped\n");
1235 return false;
1236 }
1237
1238 // If we were at the last element, back up one ot go to the new
1239 // last element of the list.
1240 if (rxFifoPtr == rxFifo.end())
1241 --rxFifoPtr;
1242
1243 devIntrPost(Regs::Intr_RxPacket);
1244 rxKick();
1245 return true;
1246 }
1247
1248 void
1249 Device::drainResume()
1250 {
1251 Drainable::drainResume();
1252
1253 // During drain we could have left the state machines in a waiting state and
1254 // they wouldn't get out until some other event occured to kick them.
1255 // This way they'll get out immediately
1256 txKick();
1257 rxKick();
1258 }
1259
1260 //=====================================================================
1261 //
1262 //
1263 void
1264 Base::serialize(std::ostream &os)
1265 {
1266 // Serialize the PciDev base class
1267 PciDev::serialize(os);
1268
1269 SERIALIZE_SCALAR(rxEnable);
1270 SERIALIZE_SCALAR(txEnable);
1271 SERIALIZE_SCALAR(cpuIntrEnable);
1272
1273 /*
1274 * Keep track of pending interrupt status.
1275 */
1276 SERIALIZE_SCALAR(intrTick);
1277 SERIALIZE_SCALAR(cpuPendingIntr);
1278 Tick intrEventTick = 0;
1279 if (intrEvent)
1280 intrEventTick = intrEvent->when();
1281 SERIALIZE_SCALAR(intrEventTick);
1282 }
1283
1284 void
1285 Base::unserialize(Checkpoint *cp, const std::string &section)
1286 {
1287 // Unserialize the PciDev base class
1288 PciDev::unserialize(cp, section);
1289
1290 UNSERIALIZE_SCALAR(rxEnable);
1291 UNSERIALIZE_SCALAR(txEnable);
1292 UNSERIALIZE_SCALAR(cpuIntrEnable);
1293
1294 /*
1295 * Keep track of pending interrupt status.
1296 */
1297 UNSERIALIZE_SCALAR(intrTick);
1298 UNSERIALIZE_SCALAR(cpuPendingIntr);
1299 Tick intrEventTick;
1300 UNSERIALIZE_SCALAR(intrEventTick);
1301 if (intrEventTick) {
1302 intrEvent = new IntrEvent(this, true);
1303 schedule(intrEvent, intrEventTick);
1304 }
1305 }
1306
1307 void
1308 Device::serialize(std::ostream &os)
1309 {
1310 int count;
1311
1312 // Serialize the PciDev base class
1313 Base::serialize(os);
1314
1315 if (rxState == rxCopy)
1316 panic("can't serialize with an in flight dma request rxState=%s",
1317 RxStateStrings[rxState]);
1318
1319 if (txState == txCopy)
1320 panic("can't serialize with an in flight dma request txState=%s",
1321 TxStateStrings[txState]);
1322
1323 /*
1324 * Serialize the device registers that could be modified by the OS.
1325 */
1326 SERIALIZE_SCALAR(regs.Config);
1327 SERIALIZE_SCALAR(regs.IntrStatus);
1328 SERIALIZE_SCALAR(regs.IntrMask);
1329 SERIALIZE_SCALAR(regs.RxData);
1330 SERIALIZE_SCALAR(regs.TxData);
1331
1332 /*
1333 * Serialize the virtual nic state
1334 */
1335 int virtualRegsSize = virtualRegs.size();
1336 SERIALIZE_SCALAR(virtualRegsSize);
1337 for (int i = 0; i < virtualRegsSize; ++i) {
1338 VirtualReg *vnic = &virtualRegs[i];
1339
1340 std::string reg = csprintf("vnic%d", i);
1341 paramOut(os, reg + ".RxData", vnic->RxData);
1342 paramOut(os, reg + ".RxDone", vnic->RxDone);
1343 paramOut(os, reg + ".TxData", vnic->TxData);
1344 paramOut(os, reg + ".TxDone", vnic->TxDone);
1345
1346 bool rxPacketExists = vnic->rxIndex != rxFifo.end();
1347 paramOut(os, reg + ".rxPacketExists", rxPacketExists);
1348 if (rxPacketExists) {
1349 int rxPacket = 0;
1350 PacketFifo::iterator i = rxFifo.begin();
1351 while (i != vnic->rxIndex) {
1352 assert(i != rxFifo.end());
1353 ++i;
1354 ++rxPacket;
1355 }
1356
1357 paramOut(os, reg + ".rxPacket", rxPacket);
1358 paramOut(os, reg + ".rxPacketOffset", vnic->rxPacketOffset);
1359 paramOut(os, reg + ".rxPacketBytes", vnic->rxPacketBytes);
1360 }
1361 paramOut(os, reg + ".rxDoneData", vnic->rxDoneData);
1362 }
1363
1364 int rxFifoPtr = -1;
1365 if (this->rxFifoPtr != rxFifo.end())
1366 rxFifoPtr = rxFifo.countPacketsBefore(this->rxFifoPtr);
1367 SERIALIZE_SCALAR(rxFifoPtr);
1368
1369 SERIALIZE_SCALAR(rxActive);
1370 SERIALIZE_SCALAR(rxBusyCount);
1371 SERIALIZE_SCALAR(rxDirtyCount);
1372 SERIALIZE_SCALAR(rxMappedCount);
1373
1374 VirtualList::iterator i, end;
1375 for (count = 0, i = rxList.begin(), end = rxList.end(); i != end; ++i)
1376 paramOut(os, csprintf("rxList%d", count++), *i);
1377 int rxListSize = count;
1378 SERIALIZE_SCALAR(rxListSize);
1379
1380 for (count = 0, i = rxBusy.begin(), end = rxBusy.end(); i != end; ++i)
1381 paramOut(os, csprintf("rxBusy%d", count++), *i);
1382 int rxBusySize = count;
1383 SERIALIZE_SCALAR(rxBusySize);
1384
1385 for (count = 0, i = txList.begin(), end = txList.end(); i != end; ++i)
1386 paramOut(os, csprintf("txList%d", count++), *i);
1387 int txListSize = count;
1388 SERIALIZE_SCALAR(txListSize);
1389
1390 /*
1391 * Serialize rx state machine
1392 */
1393 int rxState = this->rxState;
1394 SERIALIZE_SCALAR(rxState);
1395 SERIALIZE_SCALAR(rxEmpty);
1396 SERIALIZE_SCALAR(rxLow);
1397 rxFifo.serialize("rxFifo", os);
1398
1399 /*
1400 * Serialize tx state machine
1401 */
1402 int txState = this->txState;
1403 SERIALIZE_SCALAR(txState);
1404 SERIALIZE_SCALAR(txFull);
1405 txFifo.serialize("txFifo", os);
1406 bool txPacketExists = txPacket;
1407 SERIALIZE_SCALAR(txPacketExists);
1408 if (txPacketExists) {
1409 txPacket->serialize("txPacket", os);
1410 SERIALIZE_SCALAR(txPacketOffset);
1411 SERIALIZE_SCALAR(txPacketBytes);
1412 }
1413
1414 /*
1415 * If there's a pending transmit, store the time so we can
1416 * reschedule it later
1417 */
1418 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick() : 0;
1419 SERIALIZE_SCALAR(transmitTick);
1420 }
1421
1422 void
1423 Device::unserialize(Checkpoint *cp, const std::string &section)
1424 {
1425 // Unserialize the PciDev base class
1426 Base::unserialize(cp, section);
1427
1428 /*
1429 * Unserialize the device registers that may have been written by the OS.
1430 */
1431 UNSERIALIZE_SCALAR(regs.Config);
1432 UNSERIALIZE_SCALAR(regs.IntrStatus);
1433 UNSERIALIZE_SCALAR(regs.IntrMask);
1434 UNSERIALIZE_SCALAR(regs.RxData);
1435 UNSERIALIZE_SCALAR(regs.TxData);
1436
1437 UNSERIALIZE_SCALAR(rxActive);
1438 UNSERIALIZE_SCALAR(rxBusyCount);
1439 UNSERIALIZE_SCALAR(rxDirtyCount);
1440 UNSERIALIZE_SCALAR(rxMappedCount);
1441
1442 int rxListSize;
1443 UNSERIALIZE_SCALAR(rxListSize);
1444 rxList.clear();
1445 for (int i = 0; i < rxListSize; ++i) {
1446 int value;
1447 paramIn(cp, section, csprintf("rxList%d", i), value);
1448 rxList.push_back(value);
1449 }
1450
1451 int rxBusySize;
1452 UNSERIALIZE_SCALAR(rxBusySize);
1453 rxBusy.clear();
1454 for (int i = 0; i < rxBusySize; ++i) {
1455 int value;
1456 paramIn(cp, section, csprintf("rxBusy%d", i), value);
1457 rxBusy.push_back(value);
1458 }
1459
1460 int txListSize;
1461 UNSERIALIZE_SCALAR(txListSize);
1462 txList.clear();
1463 for (int i = 0; i < txListSize; ++i) {
1464 int value;
1465 paramIn(cp, section, csprintf("txList%d", i), value);
1466 txList.push_back(value);
1467 }
1468
1469 /*
1470 * Unserialize rx state machine
1471 */
1472 int rxState;
1473 UNSERIALIZE_SCALAR(rxState);
1474 UNSERIALIZE_SCALAR(rxEmpty);
1475 UNSERIALIZE_SCALAR(rxLow);
1476 this->rxState = (RxState) rxState;
1477 rxFifo.unserialize("rxFifo", cp, section);
1478
1479 int rxFifoPtr;
1480 UNSERIALIZE_SCALAR(rxFifoPtr);
1481 if (rxFifoPtr >= 0) {
1482 this->rxFifoPtr = rxFifo.begin();
1483 for (int i = 0; i < rxFifoPtr; ++i)
1484 ++this->rxFifoPtr;
1485 } else {
1486 this->rxFifoPtr = rxFifo.end();
1487 }
1488
1489 /*
1490 * Unserialize tx state machine
1491 */
1492 int txState;
1493 UNSERIALIZE_SCALAR(txState);
1494 UNSERIALIZE_SCALAR(txFull);
1495 this->txState = (TxState) txState;
1496 txFifo.unserialize("txFifo", cp, section);
1497 bool txPacketExists;
1498 UNSERIALIZE_SCALAR(txPacketExists);
1499 txPacket = 0;
1500 if (txPacketExists) {
1501 txPacket = new EthPacketData(16384);
1502 txPacket->unserialize("txPacket", cp, section);
1503 UNSERIALIZE_SCALAR(txPacketOffset);
1504 UNSERIALIZE_SCALAR(txPacketBytes);
1505 }
1506
1507 /*
1508 * unserialize the virtual nic registers/state
1509 *
1510 * this must be done after the unserialization of the rxFifo
1511 * because the packet iterators depend on the fifo being populated
1512 */
1513 int virtualRegsSize;
1514 UNSERIALIZE_SCALAR(virtualRegsSize);
1515 virtualRegs.clear();
1516 virtualRegs.resize(virtualRegsSize);
1517 for (int i = 0; i < virtualRegsSize; ++i) {
1518 VirtualReg *vnic = &virtualRegs[i];
1519 std::string reg = csprintf("vnic%d", i);
1520
1521 paramIn(cp, section, reg + ".RxData", vnic->RxData);
1522 paramIn(cp, section, reg + ".RxDone", vnic->RxDone);
1523 paramIn(cp, section, reg + ".TxData", vnic->TxData);
1524 paramIn(cp, section, reg + ".TxDone", vnic->TxDone);
1525
1526 vnic->rxUnique = rxUnique++;
1527 vnic->txUnique = txUnique++;
1528
1529 bool rxPacketExists;
1530 paramIn(cp, section, reg + ".rxPacketExists", rxPacketExists);
1531 if (rxPacketExists) {
1532 int rxPacket;
1533 paramIn(cp, section, reg + ".rxPacket", rxPacket);
1534 vnic->rxIndex = rxFifo.begin();
1535 while (rxPacket--)
1536 ++vnic->rxIndex;
1537
1538 paramIn(cp, section, reg + ".rxPacketOffset",
1539 vnic->rxPacketOffset);
1540 paramIn(cp, section, reg + ".rxPacketBytes", vnic->rxPacketBytes);
1541 } else {
1542 vnic->rxIndex = rxFifo.end();
1543 }
1544 paramIn(cp, section, reg + ".rxDoneData", vnic->rxDoneData);
1545 }
1546
1547 /*
1548 * If there's a pending transmit, reschedule it now
1549 */
1550 Tick transmitTick;
1551 UNSERIALIZE_SCALAR(transmitTick);
1552 if (transmitTick)
1553 schedule(txEvent, curTick() + transmitTick);
1554
1555 pioPort.sendRangeChange();
1556
1557 }
1558
1559 } // namespace Sinic
1560
1561 Sinic::Device *
1562 SinicParams::create()
1563 {
1564 return new Sinic::Device(this);
1565 }