c28ab335b6d11c6062d99d1fc4cbc8a48b36b3f3
[gem5.git] / dev / sinic.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <cstdio>
30 #include <deque>
31 #include <string>
32
33 #include "base/inet.hh"
34 #include "cpu/exec_context.hh"
35 #include "cpu/intr_control.hh"
36 #include "dev/etherlink.hh"
37 #include "dev/sinic.hh"
38 #include "dev/pciconfigall.hh"
39 #include "mem/bus/bus.hh"
40 #include "mem/bus/dma_interface.hh"
41 #include "mem/bus/pio_interface.hh"
42 #include "mem/bus/pio_interface_impl.hh"
43 #include "mem/functional/memory_control.hh"
44 #include "mem/functional/physical.hh"
45 #include "sim/builder.hh"
46 #include "sim/debug.hh"
47 #include "sim/eventq.hh"
48 #include "sim/host.hh"
49 #include "sim/stats.hh"
50 #include "arch/vtophys.hh"
51
52 using namespace Net;
53 using namespace TheISA;
54
55 namespace Sinic {
56
57 const char *RxStateStrings[] =
58 {
59 "rxIdle",
60 "rxFifoBlock",
61 "rxBeginCopy",
62 "rxCopy",
63 "rxCopyDone"
64 };
65
66 const char *TxStateStrings[] =
67 {
68 "txIdle",
69 "txFifoBlock",
70 "txBeginCopy",
71 "txCopy",
72 "txCopyDone"
73 };
74
75
76 ///////////////////////////////////////////////////////////////////////
77 //
78 // Sinic PCI Device
79 //
80 Base::Base(Params *p)
81 : PciDev(p), rxEnable(false), txEnable(false), clock(p->clock),
82 intrDelay(p->intr_delay), intrTick(0), cpuIntrEnable(false),
83 cpuPendingIntr(false), intrEvent(0), interface(NULL)
84 {
85 }
86
87 Device::Device(Params *p)
88 : Base(p), plat(p->plat), physmem(p->physmem),
89 rxFifo(p->rx_fifo_size), txFifo(p->tx_fifo_size),
90 rxKickTick(0), txKickTick(0),
91 txEvent(this), rxDmaEvent(this), txDmaEvent(this),
92 dmaReadDelay(p->dma_read_delay), dmaReadFactor(p->dma_read_factor),
93 dmaWriteDelay(p->dma_write_delay), dmaWriteFactor(p->dma_write_factor)
94 {
95 reset();
96
97 if (p->pio_bus) {
98 pioInterface = newPioInterface(p->name + ".pio", p->hier, p->pio_bus,
99 this, &Device::cacheAccess);
100 pioLatency = p->pio_latency * p->pio_bus->clockRate;
101 }
102
103 if (p->header_bus) {
104 if (p->payload_bus)
105 dmaInterface = new DMAInterface<Bus>(p->name + ".dma",
106 p->header_bus,
107 p->payload_bus, 1,
108 p->dma_no_allocate);
109 else
110 dmaInterface = new DMAInterface<Bus>(p->name + ".dma",
111 p->header_bus,
112 p->header_bus, 1,
113 p->dma_no_allocate);
114 } else if (p->payload_bus)
115 panic("must define a header bus if defining a payload bus");
116 }
117
118 Device::~Device()
119 {}
120
121 void
122 Device::regStats()
123 {
124 rxBytes
125 .name(name() + ".rxBytes")
126 .desc("Bytes Received")
127 .prereq(rxBytes)
128 ;
129
130 rxBandwidth
131 .name(name() + ".rxBandwidth")
132 .desc("Receive Bandwidth (bits/s)")
133 .precision(0)
134 .prereq(rxBytes)
135 ;
136
137 rxPackets
138 .name(name() + ".rxPackets")
139 .desc("Number of Packets Received")
140 .prereq(rxBytes)
141 ;
142
143 rxPacketRate
144 .name(name() + ".rxPPS")
145 .desc("Packet Reception Rate (packets/s)")
146 .precision(0)
147 .prereq(rxBytes)
148 ;
149
150 rxIpPackets
151 .name(name() + ".rxIpPackets")
152 .desc("Number of IP Packets Received")
153 .prereq(rxBytes)
154 ;
155
156 rxTcpPackets
157 .name(name() + ".rxTcpPackets")
158 .desc("Number of Packets Received")
159 .prereq(rxBytes)
160 ;
161
162 rxUdpPackets
163 .name(name() + ".rxUdpPackets")
164 .desc("Number of UDP Packets Received")
165 .prereq(rxBytes)
166 ;
167
168 rxIpChecksums
169 .name(name() + ".rxIpChecksums")
170 .desc("Number of rx IP Checksums done by device")
171 .precision(0)
172 .prereq(rxBytes)
173 ;
174
175 rxTcpChecksums
176 .name(name() + ".rxTcpChecksums")
177 .desc("Number of rx TCP Checksums done by device")
178 .precision(0)
179 .prereq(rxBytes)
180 ;
181
182 rxUdpChecksums
183 .name(name() + ".rxUdpChecksums")
184 .desc("Number of rx UDP Checksums done by device")
185 .precision(0)
186 .prereq(rxBytes)
187 ;
188
189 totBandwidth
190 .name(name() + ".totBandwidth")
191 .desc("Total Bandwidth (bits/s)")
192 .precision(0)
193 .prereq(totBytes)
194 ;
195
196 totPackets
197 .name(name() + ".totPackets")
198 .desc("Total Packets")
199 .precision(0)
200 .prereq(totBytes)
201 ;
202
203 totBytes
204 .name(name() + ".totBytes")
205 .desc("Total Bytes")
206 .precision(0)
207 .prereq(totBytes)
208 ;
209
210 totPacketRate
211 .name(name() + ".totPPS")
212 .desc("Total Tranmission Rate (packets/s)")
213 .precision(0)
214 .prereq(totBytes)
215 ;
216
217 txBytes
218 .name(name() + ".txBytes")
219 .desc("Bytes Transmitted")
220 .prereq(txBytes)
221 ;
222
223 txBandwidth
224 .name(name() + ".txBandwidth")
225 .desc("Transmit Bandwidth (bits/s)")
226 .precision(0)
227 .prereq(txBytes)
228 ;
229
230 txPackets
231 .name(name() + ".txPackets")
232 .desc("Number of Packets Transmitted")
233 .prereq(txBytes)
234 ;
235
236 txPacketRate
237 .name(name() + ".txPPS")
238 .desc("Packet Tranmission Rate (packets/s)")
239 .precision(0)
240 .prereq(txBytes)
241 ;
242
243 txIpPackets
244 .name(name() + ".txIpPackets")
245 .desc("Number of IP Packets Transmitted")
246 .prereq(txBytes)
247 ;
248
249 txTcpPackets
250 .name(name() + ".txTcpPackets")
251 .desc("Number of TCP Packets Transmitted")
252 .prereq(txBytes)
253 ;
254
255 txUdpPackets
256 .name(name() + ".txUdpPackets")
257 .desc("Number of Packets Transmitted")
258 .prereq(txBytes)
259 ;
260
261 txIpChecksums
262 .name(name() + ".txIpChecksums")
263 .desc("Number of tx IP Checksums done by device")
264 .precision(0)
265 .prereq(txBytes)
266 ;
267
268 txTcpChecksums
269 .name(name() + ".txTcpChecksums")
270 .desc("Number of tx TCP Checksums done by device")
271 .precision(0)
272 .prereq(txBytes)
273 ;
274
275 txUdpChecksums
276 .name(name() + ".txUdpChecksums")
277 .desc("Number of tx UDP Checksums done by device")
278 .precision(0)
279 .prereq(txBytes)
280 ;
281
282 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
283 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
284 totBandwidth = txBandwidth + rxBandwidth;
285 totBytes = txBytes + rxBytes;
286 totPackets = txPackets + rxPackets;
287 txPacketRate = txPackets / simSeconds;
288 rxPacketRate = rxPackets / simSeconds;
289 }
290
291 /**
292 * This is to write to the PCI general configuration registers
293 */
294 void
295 Device::writeConfig(int offset, int size, const uint8_t *data)
296 {
297 switch (offset) {
298 case PCI0_BASE_ADDR0:
299 // Need to catch writes to BARs to update the PIO interface
300 PciDev::writeConfig(offset, size, data);
301 if (BARAddrs[0] != 0) {
302 if (pioInterface)
303 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
304
305 BARAddrs[0] &= EV5::PAddrUncachedMask;
306 }
307 break;
308
309 default:
310 PciDev::writeConfig(offset, size, data);
311 }
312 }
313
314 void
315 Device::prepareIO(int cpu, int index)
316 {
317 int size = virtualRegs.size();
318 if (index < size)
319 return;
320
321 virtualRegs.resize(index + 1);
322 for (int i = size; i <= index; ++i)
323 virtualRegs[i].rxPacket = rxFifo.end();
324 }
325
326 void
327 Device::prepareRead(int cpu, int index)
328 {
329 using namespace Regs;
330 prepareIO(cpu, index);
331
332 VirtualReg &vnic = virtualRegs[index];
333
334 // update rx registers
335 uint64_t rxdone = vnic.RxDone;
336 rxdone = set_RxDone_Packets(rxdone, rxFifo.packets());
337 regs.RxData = vnic.RxData;
338 regs.RxDone = rxdone;
339 regs.RxWait = rxdone;
340
341 // update tx regsiters
342 uint64_t txdone = vnic.TxDone;
343 txdone = set_TxDone_Packets(txdone, txFifo.packets());
344 txdone = set_TxDone_Full(txdone, txFifo.avail() < regs.TxMaxCopy);
345 txdone = set_TxDone_Low(txdone, txFifo.size() < regs.TxFifoMark);
346 regs.TxData = vnic.TxData;
347 regs.TxDone = txdone;
348 regs.TxWait = txdone;
349 }
350
351 void
352 Device::prepareWrite(int cpu, int index)
353 {
354 prepareIO(cpu, index);
355 }
356
357 /**
358 * I/O read of device register
359 */
360 Fault
361 Device::read(MemReqPtr &req, uint8_t *data)
362 {
363 assert(config.command & PCI_CMD_MSE);
364 Fault fault = readBar(req, data);
365
366 if (fault->isMachineCheckFault()) {
367 panic("address does not map to a BAR pa=%#x va=%#x size=%d",
368 req->paddr, req->vaddr, req->size);
369
370 return genMachineCheckFault();
371 }
372
373 return fault;
374 }
375
376 Fault
377 Device::readBar0(MemReqPtr &req, Addr daddr, uint8_t *data)
378 {
379 int cpu = (req->xc->regs.ipr[TheISA::IPR_PALtemp16] >> 8) & 0xff;
380 Addr index = daddr >> Regs::VirtualShift;
381 Addr raddr = daddr & Regs::VirtualMask;
382
383 if (!regValid(raddr))
384 panic("invalid register: cpu=%d, da=%#x pa=%#x va=%#x size=%d",
385 cpu, daddr, req->paddr, req->vaddr, req->size);
386
387 const Regs::Info &info = regInfo(raddr);
388 if (!info.read)
389 panic("reading %s (write only): cpu=%d da=%#x pa=%#x va=%#x size=%d",
390 info.name, cpu, daddr, req->paddr, req->vaddr, req->size);
391
392 if (req->size != info.size)
393 panic("invalid size for reg %s: cpu=%d da=%#x pa=%#x va=%#x size=%d",
394 info.name, cpu, daddr, req->paddr, req->vaddr, req->size);
395
396 prepareRead(cpu, index);
397
398 uint64_t value = 0;
399 if (req->size == 4) {
400 uint32_t &reg = *(uint32_t *)data;
401 reg = regData32(raddr);
402 value = reg;
403 }
404
405 if (req->size == 8) {
406 uint64_t &reg = *(uint64_t *)data;
407 reg = regData64(raddr);
408 value = reg;
409 }
410
411 DPRINTF(EthernetPIO,
412 "read %s cpu=%d da=%#x pa=%#x va=%#x size=%d val=%#x\n",
413 info.name, cpu, daddr, req->paddr, req->vaddr, req->size, value);
414
415 // reading the interrupt status register has the side effect of
416 // clearing it
417 if (raddr == Regs::IntrStatus)
418 devIntrClear();
419
420 return NoFault;
421 }
422
423 /**
424 * IPR read of device register
425 */
426 Fault
427 Device::iprRead(Addr daddr, int cpu, uint64_t &result)
428 {
429 if (!regValid(daddr))
430 panic("invalid address: da=%#x", daddr);
431
432 const Regs::Info &info = regInfo(daddr);
433 if (!info.read)
434 panic("reading %s (write only): cpu=%d da=%#x", info.name, cpu, daddr);
435
436 DPRINTF(EthernetPIO, "IPR read %s: cpu=%d da=%#x\n",
437 info.name, cpu, daddr);
438
439 prepareRead(cpu, 0);
440
441 if (info.size == 4)
442 result = regData32(daddr);
443
444 if (info.size == 8)
445 result = regData64(daddr);
446
447 DPRINTF(EthernetPIO, "IPR read %s: cpu=%s da=%#x val=%#x\n",
448 info.name, cpu, result);
449
450 return NoFault;
451 }
452
453 /**
454 * I/O write of device register
455 */
456 Fault
457 Device::write(MemReqPtr &req, const uint8_t *data)
458 {
459 assert(config.command & PCI_CMD_MSE);
460 Fault fault = writeBar(req, data);
461
462 if (fault->isMachineCheckFault()) {
463 panic("address does not map to a BAR pa=%#x va=%#x size=%d",
464 req->paddr, req->vaddr, req->size);
465
466 return genMachineCheckFault();
467 }
468
469 return fault;
470 }
471
472 Fault
473 Device::writeBar0(MemReqPtr &req, Addr daddr, const uint8_t *data)
474 {
475 int cpu = (req->xc->regs.ipr[TheISA::IPR_PALtemp16] >> 8) & 0xff;
476 Addr index = daddr >> Regs::VirtualShift;
477 Addr raddr = daddr & Regs::VirtualMask;
478
479 if (!regValid(raddr))
480 panic("invalid address: cpu=%d da=%#x pa=%#x va=%#x size=%d",
481 cpu, daddr, req->paddr, req->vaddr, req->size);
482
483 const Regs::Info &info = regInfo(raddr);
484 if (!info.write)
485 panic("writing %s (read only): cpu=%d da=%#x",
486 info.name, cpu, daddr);
487
488 if (req->size != info.size)
489 panic("invalid size for %s: cpu=%d da=%#x pa=%#x va=%#x size=%d",
490 info.name, cpu, daddr, req->paddr, req->vaddr, req->size);
491
492 //These are commmented out because when the DPRINTF below isn't used,
493 //these values aren't used and gcc issues a warning. With -Werror,
494 //this prevents compilation.
495 //uint32_t reg32 = *(uint32_t *)data;
496 //uint64_t reg64 = *(uint64_t *)data;
497 DPRINTF(EthernetPIO,
498 "write %s: cpu=%d val=%#x da=%#x pa=%#x va=%#x size=%d\n",
499 info.name, cpu, info.size == 4 ?
500 (*(uint32_t *)data) :
501 (*(uint32_t *)data),
502 daddr, req->paddr, req->vaddr, req->size);
503
504 prepareWrite(cpu, index);
505
506 regWrite(daddr, cpu, data);
507
508 return NoFault;
509 }
510
511 void
512 Device::regWrite(Addr daddr, int cpu, const uint8_t *data)
513 {
514 Addr index = daddr >> Regs::VirtualShift;
515 Addr raddr = daddr & Regs::VirtualMask;
516
517 uint32_t reg32 = *(uint32_t *)data;
518 uint64_t reg64 = *(uint64_t *)data;
519 VirtualReg &vnic = virtualRegs[index];
520
521 switch (raddr) {
522 case Regs::Config:
523 changeConfig(reg32);
524 break;
525
526 case Regs::Command:
527 command(reg32);
528 break;
529
530 case Regs::IntrStatus:
531 devIntrClear(regs.IntrStatus & reg32);
532 break;
533
534 case Regs::IntrMask:
535 devIntrChangeMask(reg32);
536 break;
537
538 case Regs::RxData:
539 if (Regs::get_RxDone_Busy(vnic.RxDone))
540 panic("receive machine busy with another request! rxState=%s",
541 RxStateStrings[rxState]);
542
543 vnic.RxDone = Regs::RxDone_Busy;
544 vnic.RxData = reg64;
545 rxList.push_back(index);
546 if (rxEnable && rxState == rxIdle) {
547 rxState = rxFifoBlock;
548 rxKick();
549 }
550 break;
551
552 case Regs::TxData:
553 if (Regs::get_TxDone_Busy(vnic.TxDone))
554 panic("transmit machine busy with another request! txState=%s",
555 TxStateStrings[txState]);
556
557 vnic.TxDone = Regs::TxDone_Busy;
558 vnic.TxData = reg64;
559 if (txList.empty() || txList.front() != index)
560 txList.push_back(index);
561 if (txEnable && txState == txIdle && txList.front() == index) {
562 txState = txFifoBlock;
563 txKick();
564 }
565 break;
566 }
567 }
568
569 void
570 Device::devIntrPost(uint32_t interrupts)
571 {
572 if ((interrupts & Regs::Intr_Res))
573 panic("Cannot set a reserved interrupt");
574
575 regs.IntrStatus |= interrupts;
576
577 DPRINTF(EthernetIntr,
578 "interrupt written to intStatus: intr=%#x status=%#x mask=%#x\n",
579 interrupts, regs.IntrStatus, regs.IntrMask);
580
581 interrupts = regs.IntrStatus & regs.IntrMask;
582
583 // Intr_RxHigh is special, we only signal it if we've emptied the fifo
584 // and then filled it above the high watermark
585 if (rxEmpty)
586 rxEmpty = false;
587 else
588 interrupts &= ~Regs::Intr_RxHigh;
589
590 // Intr_TxLow is special, we only signal it if we've filled up the fifo
591 // and then dropped below the low watermark
592 if (txFull)
593 txFull = false;
594 else
595 interrupts &= ~Regs::Intr_TxLow;
596
597 if (interrupts) {
598 Tick when = curTick;
599 if ((interrupts & Regs::Intr_NoDelay) == 0)
600 when += intrDelay;
601 cpuIntrPost(when);
602 }
603 }
604
605 void
606 Device::devIntrClear(uint32_t interrupts)
607 {
608 if ((interrupts & Regs::Intr_Res))
609 panic("Cannot clear a reserved interrupt");
610
611 regs.IntrStatus &= ~interrupts;
612
613 DPRINTF(EthernetIntr,
614 "interrupt cleared from intStatus: intr=%x status=%x mask=%x\n",
615 interrupts, regs.IntrStatus, regs.IntrMask);
616
617 if (!(regs.IntrStatus & regs.IntrMask))
618 cpuIntrClear();
619 }
620
621 void
622 Device::devIntrChangeMask(uint32_t newmask)
623 {
624 if (regs.IntrMask == newmask)
625 return;
626
627 regs.IntrMask = newmask;
628
629 DPRINTF(EthernetIntr,
630 "interrupt mask changed: intStatus=%x intMask=%x masked=%x\n",
631 regs.IntrStatus, regs.IntrMask, regs.IntrStatus & regs.IntrMask);
632
633 if (regs.IntrStatus & regs.IntrMask)
634 cpuIntrPost(curTick);
635 else
636 cpuIntrClear();
637 }
638
639 void
640 Base::cpuIntrPost(Tick when)
641 {
642 // If the interrupt you want to post is later than an interrupt
643 // already scheduled, just let it post in the coming one and don't
644 // schedule another.
645 // HOWEVER, must be sure that the scheduled intrTick is in the
646 // future (this was formerly the source of a bug)
647 /**
648 * @todo this warning should be removed and the intrTick code should
649 * be fixed.
650 */
651 assert(when >= curTick);
652 assert(intrTick >= curTick || intrTick == 0);
653 if (!cpuIntrEnable) {
654 DPRINTF(EthernetIntr, "interrupts not enabled.\n",
655 intrTick);
656 return;
657 }
658
659 if (when > intrTick && intrTick != 0) {
660 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
661 intrTick);
662 return;
663 }
664
665 intrTick = when;
666 if (intrTick < curTick) {
667 debug_break();
668 intrTick = curTick;
669 }
670
671 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
672 intrTick);
673
674 if (intrEvent)
675 intrEvent->squash();
676 intrEvent = new IntrEvent(this, true);
677 intrEvent->schedule(intrTick);
678 }
679
680 void
681 Base::cpuInterrupt()
682 {
683 assert(intrTick == curTick);
684
685 // Whether or not there's a pending interrupt, we don't care about
686 // it anymore
687 intrEvent = 0;
688 intrTick = 0;
689
690 // Don't send an interrupt if there's already one
691 if (cpuPendingIntr) {
692 DPRINTF(EthernetIntr,
693 "would send an interrupt now, but there's already pending\n");
694 } else {
695 // Send interrupt
696 cpuPendingIntr = true;
697
698 DPRINTF(EthernetIntr, "posting interrupt\n");
699 intrPost();
700 }
701 }
702
703 void
704 Base::cpuIntrClear()
705 {
706 if (!cpuPendingIntr)
707 return;
708
709 if (intrEvent) {
710 intrEvent->squash();
711 intrEvent = 0;
712 }
713
714 intrTick = 0;
715
716 cpuPendingIntr = false;
717
718 DPRINTF(EthernetIntr, "clearing cchip interrupt\n");
719 intrClear();
720 }
721
722 bool
723 Base::cpuIntrPending() const
724 { return cpuPendingIntr; }
725
726 void
727 Device::changeConfig(uint32_t newconf)
728 {
729 uint32_t changed = regs.Config ^ newconf;
730 if (!changed)
731 return;
732
733 regs.Config = newconf;
734
735 if ((changed & Regs::Config_IntEn)) {
736 cpuIntrEnable = regs.Config & Regs::Config_IntEn;
737 if (cpuIntrEnable) {
738 if (regs.IntrStatus & regs.IntrMask)
739 cpuIntrPost(curTick);
740 } else {
741 cpuIntrClear();
742 }
743 }
744
745 if ((changed & Regs::Config_TxEn)) {
746 txEnable = regs.Config & Regs::Config_TxEn;
747 if (txEnable)
748 txKick();
749 }
750
751 if ((changed & Regs::Config_RxEn)) {
752 rxEnable = regs.Config & Regs::Config_RxEn;
753 if (rxEnable)
754 rxKick();
755 }
756 }
757
758 void
759 Device::command(uint32_t command)
760 {
761 if (command & Regs::Command_Intr)
762 devIntrPost(Regs::Intr_Soft);
763
764 if (command & Regs::Command_Reset)
765 reset();
766 }
767
768 void
769 Device::reset()
770 {
771 using namespace Regs;
772
773 memset(&regs, 0, sizeof(regs));
774
775 regs.Config = 0;
776 if (params()->rx_thread)
777 regs.Config |= Config_RxThread;
778 if (params()->tx_thread)
779 regs.Config |= Config_TxThread;
780 regs.IntrMask = Intr_Soft | Intr_RxHigh | Intr_RxPacket | Intr_TxLow;
781 regs.RxMaxCopy = params()->rx_max_copy;
782 regs.TxMaxCopy = params()->tx_max_copy;
783 regs.RxMaxIntr = params()->rx_max_intr;
784 regs.RxFifoSize = params()->rx_fifo_size;
785 regs.TxFifoSize = params()->tx_fifo_size;
786 regs.RxFifoMark = params()->rx_fifo_threshold;
787 regs.TxFifoMark = params()->tx_fifo_threshold;
788 regs.HwAddr = params()->eaddr;
789
790 rxList.clear();
791 txList.clear();
792
793 rxState = rxIdle;
794 txState = txIdle;
795
796 rxFifo.clear();
797 rxFifoPtr = rxFifo.end();
798 txFifo.clear();
799 rxEmpty = false;
800 txFull = false;
801
802 int size = virtualRegs.size();
803 virtualRegs.clear();
804 virtualRegs.resize(size);
805 for (int i = 0; i < size; ++i)
806 virtualRegs[i].rxPacket = rxFifo.end();
807 }
808
809 void
810 Device::rxDmaCopy()
811 {
812 assert(rxState == rxCopy);
813 rxState = rxCopyDone;
814 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
815 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
816 rxDmaAddr, rxDmaLen);
817 DDUMP(EthernetData, rxDmaData, rxDmaLen);
818 }
819
820 void
821 Device::rxDmaDone()
822 {
823 rxDmaCopy();
824
825 // If the transmit state machine has a pending DMA, let it go first
826 if (txState == txBeginCopy)
827 txKick();
828
829 rxKick();
830 }
831
832 void
833 Device::rxKick()
834 {
835 VirtualReg *vnic;
836
837 DPRINTF(EthernetSM, "receive kick rxState=%s (rxFifo.size=%d)\n",
838 RxStateStrings[rxState], rxFifo.size());
839
840 if (rxKickTick > curTick) {
841 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
842 rxKickTick);
843 return;
844 }
845
846 next:
847 if (rxState == rxIdle)
848 goto exit;
849
850 assert(!rxList.empty());
851 vnic = &virtualRegs[rxList.front()];
852
853 DPRINTF(EthernetSM, "processing rxState=%s for virtual nic %d\n",
854 RxStateStrings[rxState], rxList.front());
855
856 switch (rxState) {
857 case rxFifoBlock:
858 if (vnic->rxPacket != rxFifo.end()) {
859 rxState = rxBeginCopy;
860 break;
861 }
862
863 if (rxFifoPtr == rxFifo.end()) {
864 DPRINTF(EthernetSM, "receive waiting for data. Nothing to do.\n");
865 goto exit;
866 }
867
868 assert(!rxFifo.empty());
869
870 // Grab a new packet from the fifo.
871 vnic->rxPacket = rxFifoPtr++;
872 vnic->rxPacketOffset = 0;
873 vnic->rxPacketBytes = (*vnic->rxPacket)->length;
874 assert(vnic->rxPacketBytes);
875
876 vnic->rxDoneData = 0;
877 /* scope for variables */ {
878 IpPtr ip(*vnic->rxPacket);
879 if (ip) {
880 vnic->rxDoneData |= Regs::RxDone_IpPacket;
881 rxIpChecksums++;
882 if (cksum(ip) != 0) {
883 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
884 vnic->rxDoneData |= Regs::RxDone_IpError;
885 }
886 TcpPtr tcp(ip);
887 UdpPtr udp(ip);
888 if (tcp) {
889 vnic->rxDoneData |= Regs::RxDone_TcpPacket;
890 rxTcpChecksums++;
891 if (cksum(tcp) != 0) {
892 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
893 vnic->rxDoneData |= Regs::RxDone_TcpError;
894 }
895 } else if (udp) {
896 vnic->rxDoneData |= Regs::RxDone_UdpPacket;
897 rxUdpChecksums++;
898 if (cksum(udp) != 0) {
899 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
900 vnic->rxDoneData |= Regs::RxDone_UdpError;
901 }
902 }
903 }
904 }
905 rxState = rxBeginCopy;
906 break;
907
908 case rxBeginCopy:
909 if (dmaInterface && dmaInterface->busy())
910 goto exit;
911
912 rxDmaAddr = plat->pciToDma(Regs::get_RxData_Addr(vnic->RxData));
913 rxDmaLen = min<int>(Regs::get_RxData_Len(vnic->RxData),
914 vnic->rxPacketBytes);
915 rxDmaData = (*vnic->rxPacket)->data + vnic->rxPacketOffset;
916 rxState = rxCopy;
917
918 if (dmaInterface) {
919 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen,
920 curTick, &rxDmaEvent, true);
921 goto exit;
922 }
923
924 if (dmaWriteDelay != 0 || dmaWriteFactor != 0) {
925 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
926 Tick start = curTick + dmaWriteDelay + factor;
927 rxDmaEvent.schedule(start);
928 goto exit;
929 }
930
931 rxDmaCopy();
932 break;
933
934 case rxCopy:
935 DPRINTF(EthernetSM, "receive machine still copying\n");
936 goto exit;
937
938 case rxCopyDone:
939 vnic->RxDone = vnic->rxDoneData | rxDmaLen;
940 vnic->RxDone |= Regs::RxDone_Complete;
941
942 if (vnic->rxPacketBytes == rxDmaLen) {
943 DPRINTF(EthernetSM, "rxKick: packet complete on vnic %d\n",
944 rxList.front());
945 rxFifo.remove(vnic->rxPacket);
946 vnic->rxPacket = rxFifo.end();
947 } else {
948 vnic->RxDone |= Regs::RxDone_More;
949 vnic->rxPacketBytes -= rxDmaLen;
950 vnic->rxPacketOffset += rxDmaLen;
951 DPRINTF(EthernetSM,
952 "rxKick: packet not complete on vnic %d: %d bytes left\n",
953 rxList.front(), vnic->rxPacketBytes);
954 }
955
956 rxList.pop_front();
957 rxState = rxList.empty() ? rxIdle : rxFifoBlock;
958
959 if (rxFifo.empty()) {
960 devIntrPost(Regs::Intr_RxEmpty);
961 rxEmpty = true;
962 }
963
964 devIntrPost(Regs::Intr_RxDMA);
965 break;
966
967 default:
968 panic("Invalid rxState!");
969 }
970
971 DPRINTF(EthernetSM, "entering next rxState=%s\n",
972 RxStateStrings[rxState]);
973
974 goto next;
975
976 exit:
977 /**
978 * @todo do we want to schedule a future kick?
979 */
980 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
981 RxStateStrings[rxState]);
982 }
983
984 void
985 Device::txDmaCopy()
986 {
987 assert(txState == txCopy);
988 txState = txCopyDone;
989 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
990 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
991 txDmaAddr, txDmaLen);
992 DDUMP(EthernetData, txDmaData, txDmaLen);
993 }
994
995 void
996 Device::txDmaDone()
997 {
998 txDmaCopy();
999
1000 // If the receive state machine has a pending DMA, let it go first
1001 if (rxState == rxBeginCopy)
1002 rxKick();
1003
1004 txKick();
1005 }
1006
1007 void
1008 Device::transmit()
1009 {
1010 if (txFifo.empty()) {
1011 DPRINTF(Ethernet, "nothing to transmit\n");
1012 return;
1013 }
1014
1015 uint32_t interrupts;
1016 PacketPtr packet = txFifo.front();
1017 if (!interface->sendPacket(packet)) {
1018 DPRINTF(Ethernet, "Packet Transmit: failed txFifo available %d\n",
1019 txFifo.avail());
1020 goto reschedule;
1021 }
1022
1023 txFifo.pop();
1024 #if TRACING_ON
1025 if (DTRACE(Ethernet)) {
1026 IpPtr ip(packet);
1027 if (ip) {
1028 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1029 TcpPtr tcp(ip);
1030 if (tcp) {
1031 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1032 tcp->sport(), tcp->dport());
1033 }
1034 }
1035 }
1036 #endif
1037
1038 DDUMP(EthernetData, packet->data, packet->length);
1039 txBytes += packet->length;
1040 txPackets++;
1041
1042 DPRINTF(Ethernet, "Packet Transmit: successful txFifo Available %d\n",
1043 txFifo.avail());
1044
1045 interrupts = Regs::Intr_TxPacket;
1046 if (txFifo.size() < regs.TxFifoMark)
1047 interrupts |= Regs::Intr_TxLow;
1048 devIntrPost(interrupts);
1049
1050 reschedule:
1051 if (!txFifo.empty() && !txEvent.scheduled()) {
1052 DPRINTF(Ethernet, "reschedule transmit\n");
1053 txEvent.schedule(curTick + retryTime);
1054 }
1055 }
1056
1057 void
1058 Device::txKick()
1059 {
1060 VirtualReg *vnic;
1061 DPRINTF(EthernetSM, "transmit kick txState=%s (txFifo.size=%d)\n",
1062 TxStateStrings[txState], txFifo.size());
1063
1064 if (txKickTick > curTick) {
1065 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1066 txKickTick);
1067 return;
1068 }
1069
1070 next:
1071 if (txState == txIdle)
1072 goto exit;
1073
1074 assert(!txList.empty());
1075 vnic = &virtualRegs[txList.front()];
1076
1077 switch (txState) {
1078 case txFifoBlock:
1079 assert(Regs::get_TxDone_Busy(vnic->TxData));
1080 if (!txPacket) {
1081 // Grab a new packet from the fifo.
1082 txPacket = new PacketData(16384);
1083 txPacketOffset = 0;
1084 }
1085
1086 if (txFifo.avail() - txPacket->length <
1087 Regs::get_TxData_Len(vnic->TxData)) {
1088 DPRINTF(EthernetSM, "transmit fifo full. Nothing to do.\n");
1089 goto exit;
1090 }
1091
1092 txState = txBeginCopy;
1093 break;
1094
1095 case txBeginCopy:
1096 if (dmaInterface && dmaInterface->busy())
1097 goto exit;
1098
1099 txDmaAddr = plat->pciToDma(Regs::get_TxData_Addr(vnic->TxData));
1100 txDmaLen = Regs::get_TxData_Len(vnic->TxData);
1101 txDmaData = txPacket->data + txPacketOffset;
1102 txState = txCopy;
1103
1104 if (dmaInterface) {
1105 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen,
1106 curTick, &txDmaEvent, true);
1107 goto exit;
1108 }
1109
1110 if (dmaReadDelay != 0 || dmaReadFactor != 0) {
1111 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1112 Tick start = curTick + dmaReadDelay + factor;
1113 txDmaEvent.schedule(start);
1114 goto exit;
1115 }
1116
1117 txDmaCopy();
1118 break;
1119
1120 case txCopy:
1121 DPRINTF(EthernetSM, "transmit machine still copying\n");
1122 goto exit;
1123
1124 case txCopyDone:
1125 vnic->TxDone = txDmaLen | Regs::TxDone_Complete;
1126 txPacket->length += txDmaLen;
1127 if ((vnic->TxData & Regs::TxData_More)) {
1128 txPacketOffset += txDmaLen;
1129 txState = txIdle;
1130 devIntrPost(Regs::Intr_TxDMA);
1131 break;
1132 }
1133
1134 assert(txPacket->length <= txFifo.avail());
1135 if ((vnic->TxData & Regs::TxData_Checksum)) {
1136 IpPtr ip(txPacket);
1137 if (ip) {
1138 TcpPtr tcp(ip);
1139 if (tcp) {
1140 tcp->sum(0);
1141 tcp->sum(cksum(tcp));
1142 txTcpChecksums++;
1143 }
1144
1145 UdpPtr udp(ip);
1146 if (udp) {
1147 udp->sum(0);
1148 udp->sum(cksum(udp));
1149 txUdpChecksums++;
1150 }
1151
1152 ip->sum(0);
1153 ip->sum(cksum(ip));
1154 txIpChecksums++;
1155 }
1156 }
1157
1158 txFifo.push(txPacket);
1159 if (txFifo.avail() < regs.TxMaxCopy) {
1160 devIntrPost(Regs::Intr_TxFull);
1161 txFull = true;
1162 }
1163 txPacket = 0;
1164 transmit();
1165 txList.pop_front();
1166 txState = txList.empty() ? txIdle : txFifoBlock;
1167 devIntrPost(Regs::Intr_TxDMA);
1168 break;
1169
1170 default:
1171 panic("Invalid txState!");
1172 }
1173
1174 DPRINTF(EthernetSM, "entering next txState=%s\n",
1175 TxStateStrings[txState]);
1176
1177 goto next;
1178
1179 exit:
1180 /**
1181 * @todo do we want to schedule a future kick?
1182 */
1183 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
1184 TxStateStrings[txState]);
1185 }
1186
1187 void
1188 Device::transferDone()
1189 {
1190 if (txFifo.empty()) {
1191 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
1192 return;
1193 }
1194
1195 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
1196
1197 if (txEvent.scheduled())
1198 txEvent.reschedule(curTick + cycles(1));
1199 else
1200 txEvent.schedule(curTick + cycles(1));
1201 }
1202
1203 bool
1204 Device::rxFilter(const PacketPtr &packet)
1205 {
1206 if (!Regs::get_Config_Filter(regs.Config))
1207 return false;
1208
1209 panic("receive filter not implemented\n");
1210 bool drop = true;
1211
1212 #if 0
1213 string type;
1214
1215 EthHdr *eth = packet->eth();
1216 if (eth->unicast()) {
1217 // If we're accepting all unicast addresses
1218 if (acceptUnicast)
1219 drop = false;
1220
1221 // If we make a perfect match
1222 if (acceptPerfect && params->eaddr == eth.dst())
1223 drop = false;
1224
1225 if (acceptArp && eth->type() == ETH_TYPE_ARP)
1226 drop = false;
1227
1228 } else if (eth->broadcast()) {
1229 // if we're accepting broadcasts
1230 if (acceptBroadcast)
1231 drop = false;
1232
1233 } else if (eth->multicast()) {
1234 // if we're accepting all multicasts
1235 if (acceptMulticast)
1236 drop = false;
1237
1238 }
1239
1240 if (drop) {
1241 DPRINTF(Ethernet, "rxFilter drop\n");
1242 DDUMP(EthernetData, packet->data, packet->length);
1243 }
1244 #endif
1245 return drop;
1246 }
1247
1248 bool
1249 Device::recvPacket(PacketPtr packet)
1250 {
1251 rxBytes += packet->length;
1252 rxPackets++;
1253
1254 DPRINTF(Ethernet, "Receiving packet from wire, rxFifo Available is %d\n",
1255 rxFifo.avail());
1256
1257 if (!rxEnable) {
1258 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
1259 return true;
1260 }
1261
1262 if (rxFilter(packet)) {
1263 DPRINTF(Ethernet, "packet filtered...dropped\n");
1264 return true;
1265 }
1266
1267 if (rxFifo.size() >= regs.RxFifoMark)
1268 devIntrPost(Regs::Intr_RxHigh);
1269
1270 if (!rxFifo.push(packet)) {
1271 DPRINTF(Ethernet,
1272 "packet will not fit in receive buffer...packet dropped\n");
1273 return false;
1274 }
1275
1276 // If we were at the last element, back up one ot go to the new
1277 // last element of the list.
1278 if (rxFifoPtr == rxFifo.end())
1279 --rxFifoPtr;
1280
1281 devIntrPost(Regs::Intr_RxPacket);
1282 rxKick();
1283 return true;
1284 }
1285
1286 //=====================================================================
1287 //
1288 //
1289 void
1290 Base::serialize(ostream &os)
1291 {
1292 // Serialize the PciDev base class
1293 PciDev::serialize(os);
1294
1295 SERIALIZE_SCALAR(rxEnable);
1296 SERIALIZE_SCALAR(txEnable);
1297 SERIALIZE_SCALAR(cpuIntrEnable);
1298
1299 /*
1300 * Keep track of pending interrupt status.
1301 */
1302 SERIALIZE_SCALAR(intrTick);
1303 SERIALIZE_SCALAR(cpuPendingIntr);
1304 Tick intrEventTick = 0;
1305 if (intrEvent)
1306 intrEventTick = intrEvent->when();
1307 SERIALIZE_SCALAR(intrEventTick);
1308 }
1309
1310 void
1311 Base::unserialize(Checkpoint *cp, const std::string &section)
1312 {
1313 // Unserialize the PciDev base class
1314 PciDev::unserialize(cp, section);
1315
1316 UNSERIALIZE_SCALAR(rxEnable);
1317 UNSERIALIZE_SCALAR(txEnable);
1318 UNSERIALIZE_SCALAR(cpuIntrEnable);
1319
1320 /*
1321 * Keep track of pending interrupt status.
1322 */
1323 UNSERIALIZE_SCALAR(intrTick);
1324 UNSERIALIZE_SCALAR(cpuPendingIntr);
1325 Tick intrEventTick;
1326 UNSERIALIZE_SCALAR(intrEventTick);
1327 if (intrEventTick) {
1328 intrEvent = new IntrEvent(this, true);
1329 intrEvent->schedule(intrEventTick);
1330 }
1331 }
1332
1333 void
1334 Device::serialize(ostream &os)
1335 {
1336 // Serialize the PciDev base class
1337 Base::serialize(os);
1338
1339 if (rxState == rxCopy)
1340 panic("can't serialize with an in flight dma request rxState=%s",
1341 RxStateStrings[rxState]);
1342
1343 if (txState == txCopy)
1344 panic("can't serialize with an in flight dma request txState=%s",
1345 TxStateStrings[txState]);
1346
1347 /*
1348 * Serialize the device registers
1349 */
1350 SERIALIZE_SCALAR(regs.Config);
1351 SERIALIZE_SCALAR(regs.IntrStatus);
1352 SERIALIZE_SCALAR(regs.IntrMask);
1353 SERIALIZE_SCALAR(regs.RxMaxCopy);
1354 SERIALIZE_SCALAR(regs.TxMaxCopy);
1355 SERIALIZE_SCALAR(regs.RxMaxIntr);
1356 SERIALIZE_SCALAR(regs.RxData);
1357 SERIALIZE_SCALAR(regs.RxDone);
1358 SERIALIZE_SCALAR(regs.TxData);
1359 SERIALIZE_SCALAR(regs.TxDone);
1360
1361 /*
1362 * Serialize the virtual nic state
1363 */
1364 int virtualRegsSize = virtualRegs.size();
1365 SERIALIZE_SCALAR(virtualRegsSize);
1366 for (int i = 0; i < virtualRegsSize; ++i) {
1367 VirtualReg *vnic = &virtualRegs[i];
1368
1369 string reg = csprintf("vnic%d", i);
1370 paramOut(os, reg + ".RxData", vnic->RxData);
1371 paramOut(os, reg + ".RxDone", vnic->RxDone);
1372 paramOut(os, reg + ".TxData", vnic->TxData);
1373 paramOut(os, reg + ".TxDone", vnic->TxDone);
1374
1375 PacketFifo::iterator rxFifoPtr;
1376
1377 bool rxPacketExists = vnic->rxPacket != rxFifo.end();
1378 paramOut(os, reg + ".rxPacketExists", rxPacketExists);
1379 if (rxPacketExists) {
1380 int rxPacket = 0;
1381 PacketFifo::iterator i = rxFifo.begin();
1382 while (i != vnic->rxPacket) {
1383 assert(i != rxFifo.end());
1384 ++i;
1385 ++rxPacket;
1386 }
1387
1388 paramOut(os, reg + ".rxPacket", rxPacket);
1389 paramOut(os, reg + ".rxPacketOffset", vnic->rxPacketOffset);
1390 paramOut(os, reg + ".rxPacketBytes", vnic->rxPacketBytes);
1391 }
1392 paramOut(os, reg + ".rxDoneData", vnic->rxDoneData);
1393 }
1394
1395 VirtualList::iterator i, end;
1396 int count;
1397
1398 int rxListSize = rxList.size();
1399 SERIALIZE_SCALAR(rxListSize);
1400 for (count = 0, i = rxList.begin(), end = rxList.end(); i != end; ++i)
1401 paramOut(os, csprintf("rxList%d", count++), *i);
1402
1403 int txListSize = txList.size();
1404 SERIALIZE_SCALAR(txListSize);
1405 for (count = 0, i = txList.begin(), end = txList.end(); i != end; ++i)
1406 paramOut(os, csprintf("txList%d", count++), *i);
1407
1408 /*
1409 * Serialize rx state machine
1410 */
1411 int rxState = this->rxState;
1412 SERIALIZE_SCALAR(rxState);
1413 SERIALIZE_SCALAR(rxEmpty);
1414 rxFifo.serialize("rxFifo", os);
1415
1416 /*
1417 * Serialize tx state machine
1418 */
1419 int txState = this->txState;
1420 SERIALIZE_SCALAR(txState);
1421 SERIALIZE_SCALAR(txFull);
1422 txFifo.serialize("txFifo", os);
1423 bool txPacketExists = txPacket;
1424 SERIALIZE_SCALAR(txPacketExists);
1425 if (txPacketExists) {
1426 txPacket->serialize("txPacket", os);
1427 SERIALIZE_SCALAR(txPacketOffset);
1428 SERIALIZE_SCALAR(txPacketBytes);
1429 }
1430
1431 /*
1432 * If there's a pending transmit, store the time so we can
1433 * reschedule it later
1434 */
1435 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
1436 SERIALIZE_SCALAR(transmitTick);
1437 }
1438
1439 void
1440 Device::unserialize(Checkpoint *cp, const std::string &section)
1441 {
1442 // Unserialize the PciDev base class
1443 Base::unserialize(cp, section);
1444
1445 /*
1446 * Unserialize the device registers
1447 */
1448 UNSERIALIZE_SCALAR(regs.Config);
1449 UNSERIALIZE_SCALAR(regs.IntrStatus);
1450 UNSERIALIZE_SCALAR(regs.IntrMask);
1451 UNSERIALIZE_SCALAR(regs.RxMaxCopy);
1452 UNSERIALIZE_SCALAR(regs.TxMaxCopy);
1453 UNSERIALIZE_SCALAR(regs.RxMaxIntr);
1454 UNSERIALIZE_SCALAR(regs.RxData);
1455 UNSERIALIZE_SCALAR(regs.RxDone);
1456 UNSERIALIZE_SCALAR(regs.TxData);
1457 UNSERIALIZE_SCALAR(regs.TxDone);
1458
1459 int rxListSize;
1460 UNSERIALIZE_SCALAR(rxListSize);
1461 rxList.clear();
1462 for (int i = 0; i < rxListSize; ++i) {
1463 int value;
1464 paramIn(cp, section, csprintf("rxList%d", i), value);
1465 rxList.push_back(value);
1466 }
1467
1468 int txListSize;
1469 UNSERIALIZE_SCALAR(txListSize);
1470 txList.clear();
1471 for (int i = 0; i < txListSize; ++i) {
1472 int value;
1473 paramIn(cp, section, csprintf("txList%d", i), value);
1474 txList.push_back(value);
1475 }
1476
1477 /*
1478 * Unserialize rx state machine
1479 */
1480 int rxState;
1481 UNSERIALIZE_SCALAR(rxState);
1482 UNSERIALIZE_SCALAR(rxEmpty);
1483 this->rxState = (RxState) rxState;
1484 rxFifo.unserialize("rxFifo", cp, section);
1485
1486 /*
1487 * Unserialize tx state machine
1488 */
1489 int txState;
1490 UNSERIALIZE_SCALAR(txState);
1491 UNSERIALIZE_SCALAR(txFull);
1492 this->txState = (TxState) txState;
1493 txFifo.unserialize("txFifo", cp, section);
1494 bool txPacketExists;
1495 UNSERIALIZE_SCALAR(txPacketExists);
1496 txPacket = 0;
1497 if (txPacketExists) {
1498 txPacket = new PacketData(16384);
1499 txPacket->unserialize("txPacket", cp, section);
1500 UNSERIALIZE_SCALAR(txPacketOffset);
1501 UNSERIALIZE_SCALAR(txPacketBytes);
1502 }
1503
1504 /*
1505 * unserialize the virtual nic registers/state
1506 *
1507 * this must be done after the unserialization of the rxFifo
1508 * because the packet iterators depend on the fifo being populated
1509 */
1510 int virtualRegsSize;
1511 UNSERIALIZE_SCALAR(virtualRegsSize);
1512 virtualRegs.clear();
1513 virtualRegs.resize(virtualRegsSize);
1514 for (int i = 0; i < virtualRegsSize; ++i) {
1515 VirtualReg *vnic = &virtualRegs[i];
1516 string reg = csprintf("vnic%d", i);
1517
1518 paramIn(cp, section, reg + ".RxData", vnic->RxData);
1519 paramIn(cp, section, reg + ".RxDone", vnic->RxDone);
1520 paramIn(cp, section, reg + ".TxData", vnic->TxData);
1521 paramIn(cp, section, reg + ".TxDone", vnic->TxDone);
1522
1523 bool rxPacketExists;
1524 paramIn(cp, section, reg + ".rxPacketExists", rxPacketExists);
1525 if (rxPacketExists) {
1526 int rxPacket;
1527 paramIn(cp, section, reg + ".rxPacket", rxPacket);
1528 vnic->rxPacket = rxFifo.begin();
1529 while (rxPacket--)
1530 ++vnic->rxPacket;
1531
1532 paramIn(cp, section, reg + ".rxPacketOffset",
1533 vnic->rxPacketOffset);
1534 paramIn(cp, section, reg + ".rxPacketBytes", vnic->rxPacketBytes);
1535 } else {
1536 vnic->rxPacket = rxFifo.end();
1537 }
1538 paramIn(cp, section, reg + ".rxDoneData", vnic->rxDoneData);
1539 }
1540
1541 /*
1542 * If there's a pending transmit, reschedule it now
1543 */
1544 Tick transmitTick;
1545 UNSERIALIZE_SCALAR(transmitTick);
1546 if (transmitTick)
1547 txEvent.schedule(curTick + transmitTick);
1548
1549 /*
1550 * re-add addrRanges to bus bridges
1551 */
1552 if (pioInterface) {
1553 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
1554 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
1555 }
1556 }
1557
1558 Tick
1559 Device::cacheAccess(MemReqPtr &req)
1560 {
1561 Addr daddr;
1562 int bar;
1563 if (!getBAR(req->paddr, daddr, bar))
1564 panic("address does not map to a BAR pa=%#x va=%#x size=%d",
1565 req->paddr, req->vaddr, req->size);
1566
1567 DPRINTF(EthernetPIO, "timing %s to paddr=%#x bar=%d daddr=%#x\n",
1568 req->cmd.toString(), req->paddr, bar, daddr);
1569
1570 return curTick + pioLatency;
1571 }
1572
1573 BEGIN_DECLARE_SIM_OBJECT_PARAMS(Interface)
1574
1575 SimObjectParam<EtherInt *> peer;
1576 SimObjectParam<Device *> device;
1577
1578 END_DECLARE_SIM_OBJECT_PARAMS(Interface)
1579
1580 BEGIN_INIT_SIM_OBJECT_PARAMS(Interface)
1581
1582 INIT_PARAM_DFLT(peer, "peer interface", NULL),
1583 INIT_PARAM(device, "Ethernet device of this interface")
1584
1585 END_INIT_SIM_OBJECT_PARAMS(Interface)
1586
1587 CREATE_SIM_OBJECT(Interface)
1588 {
1589 Interface *dev_int = new Interface(getInstanceName(), device);
1590
1591 EtherInt *p = (EtherInt *)peer;
1592 if (p) {
1593 dev_int->setPeer(p);
1594 p->setPeer(dev_int);
1595 }
1596
1597 return dev_int;
1598 }
1599
1600 REGISTER_SIM_OBJECT("SinicInt", Interface)
1601
1602
1603 BEGIN_DECLARE_SIM_OBJECT_PARAMS(Device)
1604
1605 Param<Tick> clock;
1606
1607 Param<Addr> addr;
1608 SimObjectParam<MemoryController *> mmu;
1609 SimObjectParam<PhysicalMemory *> physmem;
1610 SimObjectParam<PciConfigAll *> configspace;
1611 SimObjectParam<PciConfigData *> configdata;
1612 SimObjectParam<Platform *> platform;
1613 Param<uint32_t> pci_bus;
1614 Param<uint32_t> pci_dev;
1615 Param<uint32_t> pci_func;
1616
1617 SimObjectParam<HierParams *> hier;
1618 SimObjectParam<Bus*> pio_bus;
1619 SimObjectParam<Bus*> dma_bus;
1620 SimObjectParam<Bus*> payload_bus;
1621 Param<Tick> dma_read_delay;
1622 Param<Tick> dma_read_factor;
1623 Param<Tick> dma_write_delay;
1624 Param<Tick> dma_write_factor;
1625 Param<bool> dma_no_allocate;
1626 Param<Tick> pio_latency;
1627 Param<Tick> intr_delay;
1628
1629 Param<Tick> rx_delay;
1630 Param<Tick> tx_delay;
1631 Param<uint32_t> rx_max_copy;
1632 Param<uint32_t> tx_max_copy;
1633 Param<uint32_t> rx_max_intr;
1634 Param<uint32_t> rx_fifo_size;
1635 Param<uint32_t> tx_fifo_size;
1636 Param<uint32_t> rx_fifo_threshold;
1637 Param<uint32_t> tx_fifo_threshold;
1638
1639 Param<bool> rx_filter;
1640 Param<string> hardware_address;
1641 Param<bool> rx_thread;
1642 Param<bool> tx_thread;
1643
1644 END_DECLARE_SIM_OBJECT_PARAMS(Device)
1645
1646 BEGIN_INIT_SIM_OBJECT_PARAMS(Device)
1647
1648 INIT_PARAM(clock, "State machine cycle time"),
1649
1650 INIT_PARAM(addr, "Device Address"),
1651 INIT_PARAM(mmu, "Memory Controller"),
1652 INIT_PARAM(physmem, "Physical Memory"),
1653 INIT_PARAM(configspace, "PCI Configspace"),
1654 INIT_PARAM(configdata, "PCI Config data"),
1655 INIT_PARAM(platform, "Platform"),
1656 INIT_PARAM(pci_bus, "PCI bus"),
1657 INIT_PARAM(pci_dev, "PCI device number"),
1658 INIT_PARAM(pci_func, "PCI function code"),
1659
1660 INIT_PARAM(hier, "Hierarchy global variables"),
1661 INIT_PARAM(pio_bus, ""),
1662 INIT_PARAM(dma_bus, ""),
1663 INIT_PARAM(payload_bus, "The IO Bus to attach to for payload"),
1664 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
1665 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
1666 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
1667 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
1668 INIT_PARAM(dma_no_allocate, "Should we allocat on read in cache"),
1669 INIT_PARAM(pio_latency, "Programmed IO latency in bus cycles"),
1670 INIT_PARAM(intr_delay, "Interrupt Delay"),
1671
1672 INIT_PARAM(rx_delay, "Receive Delay"),
1673 INIT_PARAM(tx_delay, "Transmit Delay"),
1674 INIT_PARAM(rx_max_copy, "rx max copy"),
1675 INIT_PARAM(tx_max_copy, "rx max copy"),
1676 INIT_PARAM(rx_max_intr, "rx max intr"),
1677 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
1678 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
1679 INIT_PARAM(rx_fifo_threshold, "max size in bytes of rxFifo"),
1680 INIT_PARAM(tx_fifo_threshold, "max size in bytes of txFifo"),
1681
1682 INIT_PARAM(rx_filter, "Enable Receive Filter"),
1683 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
1684 INIT_PARAM(rx_thread, ""),
1685 INIT_PARAM(tx_thread, "")
1686
1687 END_INIT_SIM_OBJECT_PARAMS(Device)
1688
1689
1690 CREATE_SIM_OBJECT(Device)
1691 {
1692 Device::Params *params = new Device::Params;
1693
1694 params->name = getInstanceName();
1695
1696 params->clock = clock;
1697
1698 params->mmu = mmu;
1699 params->physmem = physmem;
1700 params->configSpace = configspace;
1701 params->configData = configdata;
1702 params->plat = platform;
1703 params->busNum = pci_bus;
1704 params->deviceNum = pci_dev;
1705 params->functionNum = pci_func;
1706
1707 params->hier = hier;
1708 params->pio_bus = pio_bus;
1709 params->header_bus = dma_bus;
1710 params->payload_bus = payload_bus;
1711 params->dma_read_delay = dma_read_delay;
1712 params->dma_read_factor = dma_read_factor;
1713 params->dma_write_delay = dma_write_delay;
1714 params->dma_write_factor = dma_write_factor;
1715 params->dma_no_allocate = dma_no_allocate;
1716 params->pio_latency = pio_latency;
1717 params->intr_delay = intr_delay;
1718
1719 params->tx_delay = tx_delay;
1720 params->rx_delay = rx_delay;
1721 params->rx_max_copy = rx_max_copy;
1722 params->tx_max_copy = tx_max_copy;
1723 params->rx_max_intr = rx_max_intr;
1724 params->rx_fifo_size = rx_fifo_size;
1725 params->tx_fifo_size = tx_fifo_size;
1726 params->rx_fifo_threshold = rx_fifo_threshold;
1727 params->tx_fifo_threshold = tx_fifo_threshold;
1728
1729 params->rx_filter = rx_filter;
1730 params->eaddr = hardware_address;
1731 params->rx_thread = rx_thread;
1732 params->tx_thread = tx_thread;
1733
1734 return new Device(params);
1735 }
1736
1737 REGISTER_SIM_OBJECT("Sinic", Device)
1738
1739 /* namespace Sinic */ }