Merge zizzer:/bk/m5
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "arch/vtophys.hh"
53
54 const char *NsRxStateStrings[] =
55 {
56 "rxIdle",
57 "rxDescRefr",
58 "rxDescRead",
59 "rxFifoBlock",
60 "rxFragWrite",
61 "rxDescWrite",
62 "rxAdvance"
63 };
64
65 const char *NsTxStateStrings[] =
66 {
67 "txIdle",
68 "txDescRefr",
69 "txDescRead",
70 "txFifoBlock",
71 "txFragRead",
72 "txDescWrite",
73 "txAdvance"
74 };
75
76 const char *NsDmaState[] =
77 {
78 "dmaIdle",
79 "dmaReading",
80 "dmaWriting",
81 "dmaReadWaiting",
82 "dmaWriteWaiting"
83 };
84
85 using namespace std;
86 using namespace Net;
87 using namespace TheISA;
88
89 ///////////////////////////////////////////////////////////////////////
90 //
91 // NSGigE PCI Device
92 //
93 NSGigE::NSGigE(Params *p)
94 : PciDev(p), ioEnable(false),
95 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
97 txXferLen(0), rxXferLen(0), clock(p->clock),
98 txState(txIdle), txEnable(false), CTDD(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
102 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
105 txDelay(p->tx_delay), rxDelay(p->rx_delay),
106 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
107 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
110 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
112 {
113 if (p->pio_bus) {
114 pioInterface = newPioInterface(name() + ".pio", p->hier,
115 p->pio_bus, this,
116 &NSGigE::cacheAccess);
117 pioLatency = p->pio_latency * p->pio_bus->clockRate;
118 }
119
120 if (p->header_bus) {
121 if (p->payload_bus)
122 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
123 p->header_bus,
124 p->payload_bus, 1,
125 p->dma_no_allocate);
126 else
127 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
128 p->header_bus,
129 p->header_bus, 1,
130 p->dma_no_allocate);
131 } else if (p->payload_bus)
132 panic("Must define a header bus if defining a payload bus");
133
134 intrDelay = p->intr_delay;
135 dmaReadDelay = p->dma_read_delay;
136 dmaWriteDelay = p->dma_write_delay;
137 dmaReadFactor = p->dma_read_factor;
138 dmaWriteFactor = p->dma_write_factor;
139
140 regsReset();
141 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
142
143 memset(&rxDesc32, 0, sizeof(rxDesc32));
144 memset(&txDesc32, 0, sizeof(txDesc32));
145 memset(&rxDesc64, 0, sizeof(rxDesc64));
146 memset(&txDesc64, 0, sizeof(txDesc64));
147 }
148
149 NSGigE::~NSGigE()
150 {}
151
152 void
153 NSGigE::regStats()
154 {
155 txBytes
156 .name(name() + ".txBytes")
157 .desc("Bytes Transmitted")
158 .prereq(txBytes)
159 ;
160
161 rxBytes
162 .name(name() + ".rxBytes")
163 .desc("Bytes Received")
164 .prereq(rxBytes)
165 ;
166
167 txPackets
168 .name(name() + ".txPackets")
169 .desc("Number of Packets Transmitted")
170 .prereq(txBytes)
171 ;
172
173 rxPackets
174 .name(name() + ".rxPackets")
175 .desc("Number of Packets Received")
176 .prereq(rxBytes)
177 ;
178
179 txIpChecksums
180 .name(name() + ".txIpChecksums")
181 .desc("Number of tx IP Checksums done by device")
182 .precision(0)
183 .prereq(txBytes)
184 ;
185
186 rxIpChecksums
187 .name(name() + ".rxIpChecksums")
188 .desc("Number of rx IP Checksums done by device")
189 .precision(0)
190 .prereq(rxBytes)
191 ;
192
193 txTcpChecksums
194 .name(name() + ".txTcpChecksums")
195 .desc("Number of tx TCP Checksums done by device")
196 .precision(0)
197 .prereq(txBytes)
198 ;
199
200 rxTcpChecksums
201 .name(name() + ".rxTcpChecksums")
202 .desc("Number of rx TCP Checksums done by device")
203 .precision(0)
204 .prereq(rxBytes)
205 ;
206
207 txUdpChecksums
208 .name(name() + ".txUdpChecksums")
209 .desc("Number of tx UDP Checksums done by device")
210 .precision(0)
211 .prereq(txBytes)
212 ;
213
214 rxUdpChecksums
215 .name(name() + ".rxUdpChecksums")
216 .desc("Number of rx UDP Checksums done by device")
217 .precision(0)
218 .prereq(rxBytes)
219 ;
220
221 descDmaReads
222 .name(name() + ".descDMAReads")
223 .desc("Number of descriptors the device read w/ DMA")
224 .precision(0)
225 ;
226
227 descDmaWrites
228 .name(name() + ".descDMAWrites")
229 .desc("Number of descriptors the device wrote w/ DMA")
230 .precision(0)
231 ;
232
233 descDmaRdBytes
234 .name(name() + ".descDmaReadBytes")
235 .desc("number of descriptor bytes read w/ DMA")
236 .precision(0)
237 ;
238
239 descDmaWrBytes
240 .name(name() + ".descDmaWriteBytes")
241 .desc("number of descriptor bytes write w/ DMA")
242 .precision(0)
243 ;
244
245 txBandwidth
246 .name(name() + ".txBandwidth")
247 .desc("Transmit Bandwidth (bits/s)")
248 .precision(0)
249 .prereq(txBytes)
250 ;
251
252 rxBandwidth
253 .name(name() + ".rxBandwidth")
254 .desc("Receive Bandwidth (bits/s)")
255 .precision(0)
256 .prereq(rxBytes)
257 ;
258
259 totBandwidth
260 .name(name() + ".totBandwidth")
261 .desc("Total Bandwidth (bits/s)")
262 .precision(0)
263 .prereq(totBytes)
264 ;
265
266 totPackets
267 .name(name() + ".totPackets")
268 .desc("Total Packets")
269 .precision(0)
270 .prereq(totBytes)
271 ;
272
273 totBytes
274 .name(name() + ".totBytes")
275 .desc("Total Bytes")
276 .precision(0)
277 .prereq(totBytes)
278 ;
279
280 totPacketRate
281 .name(name() + ".totPPS")
282 .desc("Total Tranmission Rate (packets/s)")
283 .precision(0)
284 .prereq(totBytes)
285 ;
286
287 txPacketRate
288 .name(name() + ".txPPS")
289 .desc("Packet Tranmission Rate (packets/s)")
290 .precision(0)
291 .prereq(txBytes)
292 ;
293
294 rxPacketRate
295 .name(name() + ".rxPPS")
296 .desc("Packet Reception Rate (packets/s)")
297 .precision(0)
298 .prereq(rxBytes)
299 ;
300
301 postedSwi
302 .name(name() + ".postedSwi")
303 .desc("number of software interrupts posted to CPU")
304 .precision(0)
305 ;
306
307 totalSwi
308 .name(name() + ".totalSwi")
309 .desc("total number of Swi written to ISR")
310 .precision(0)
311 ;
312
313 coalescedSwi
314 .name(name() + ".coalescedSwi")
315 .desc("average number of Swi's coalesced into each post")
316 .precision(0)
317 ;
318
319 postedRxIdle
320 .name(name() + ".postedRxIdle")
321 .desc("number of rxIdle interrupts posted to CPU")
322 .precision(0)
323 ;
324
325 totalRxIdle
326 .name(name() + ".totalRxIdle")
327 .desc("total number of RxIdle written to ISR")
328 .precision(0)
329 ;
330
331 coalescedRxIdle
332 .name(name() + ".coalescedRxIdle")
333 .desc("average number of RxIdle's coalesced into each post")
334 .precision(0)
335 ;
336
337 postedRxOk
338 .name(name() + ".postedRxOk")
339 .desc("number of RxOk interrupts posted to CPU")
340 .precision(0)
341 ;
342
343 totalRxOk
344 .name(name() + ".totalRxOk")
345 .desc("total number of RxOk written to ISR")
346 .precision(0)
347 ;
348
349 coalescedRxOk
350 .name(name() + ".coalescedRxOk")
351 .desc("average number of RxOk's coalesced into each post")
352 .precision(0)
353 ;
354
355 postedRxDesc
356 .name(name() + ".postedRxDesc")
357 .desc("number of RxDesc interrupts posted to CPU")
358 .precision(0)
359 ;
360
361 totalRxDesc
362 .name(name() + ".totalRxDesc")
363 .desc("total number of RxDesc written to ISR")
364 .precision(0)
365 ;
366
367 coalescedRxDesc
368 .name(name() + ".coalescedRxDesc")
369 .desc("average number of RxDesc's coalesced into each post")
370 .precision(0)
371 ;
372
373 postedTxOk
374 .name(name() + ".postedTxOk")
375 .desc("number of TxOk interrupts posted to CPU")
376 .precision(0)
377 ;
378
379 totalTxOk
380 .name(name() + ".totalTxOk")
381 .desc("total number of TxOk written to ISR")
382 .precision(0)
383 ;
384
385 coalescedTxOk
386 .name(name() + ".coalescedTxOk")
387 .desc("average number of TxOk's coalesced into each post")
388 .precision(0)
389 ;
390
391 postedTxIdle
392 .name(name() + ".postedTxIdle")
393 .desc("number of TxIdle interrupts posted to CPU")
394 .precision(0)
395 ;
396
397 totalTxIdle
398 .name(name() + ".totalTxIdle")
399 .desc("total number of TxIdle written to ISR")
400 .precision(0)
401 ;
402
403 coalescedTxIdle
404 .name(name() + ".coalescedTxIdle")
405 .desc("average number of TxIdle's coalesced into each post")
406 .precision(0)
407 ;
408
409 postedTxDesc
410 .name(name() + ".postedTxDesc")
411 .desc("number of TxDesc interrupts posted to CPU")
412 .precision(0)
413 ;
414
415 totalTxDesc
416 .name(name() + ".totalTxDesc")
417 .desc("total number of TxDesc written to ISR")
418 .precision(0)
419 ;
420
421 coalescedTxDesc
422 .name(name() + ".coalescedTxDesc")
423 .desc("average number of TxDesc's coalesced into each post")
424 .precision(0)
425 ;
426
427 postedRxOrn
428 .name(name() + ".postedRxOrn")
429 .desc("number of RxOrn posted to CPU")
430 .precision(0)
431 ;
432
433 totalRxOrn
434 .name(name() + ".totalRxOrn")
435 .desc("total number of RxOrn written to ISR")
436 .precision(0)
437 ;
438
439 coalescedRxOrn
440 .name(name() + ".coalescedRxOrn")
441 .desc("average number of RxOrn's coalesced into each post")
442 .precision(0)
443 ;
444
445 coalescedTotal
446 .name(name() + ".coalescedTotal")
447 .desc("average number of interrupts coalesced into each post")
448 .precision(0)
449 ;
450
451 postedInterrupts
452 .name(name() + ".postedInterrupts")
453 .desc("number of posts to CPU")
454 .precision(0)
455 ;
456
457 droppedPackets
458 .name(name() + ".droppedPackets")
459 .desc("number of packets dropped")
460 .precision(0)
461 ;
462
463 coalescedSwi = totalSwi / postedInterrupts;
464 coalescedRxIdle = totalRxIdle / postedInterrupts;
465 coalescedRxOk = totalRxOk / postedInterrupts;
466 coalescedRxDesc = totalRxDesc / postedInterrupts;
467 coalescedTxOk = totalTxOk / postedInterrupts;
468 coalescedTxIdle = totalTxIdle / postedInterrupts;
469 coalescedTxDesc = totalTxDesc / postedInterrupts;
470 coalescedRxOrn = totalRxOrn / postedInterrupts;
471
472 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
473 totalTxOk + totalTxIdle + totalTxDesc +
474 totalRxOrn) / postedInterrupts;
475
476 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
477 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
478 totBandwidth = txBandwidth + rxBandwidth;
479 totBytes = txBytes + rxBytes;
480 totPackets = txPackets + rxPackets;
481
482 txPacketRate = txPackets / simSeconds;
483 rxPacketRate = rxPackets / simSeconds;
484 }
485
486 /**
487 * This is to read the PCI general configuration registers
488 */
489 void
490 NSGigE::readConfig(int offset, int size, uint8_t *data)
491 {
492 if (offset < PCI_DEVICE_SPECIFIC)
493 PciDev::readConfig(offset, size, data);
494 else
495 panic("Device specific PCI config space not implemented!\n");
496 }
497
498 /**
499 * This is to write to the PCI general configuration registers
500 */
501 void
502 NSGigE::writeConfig(int offset, int size, const uint8_t* data)
503 {
504 if (offset < PCI_DEVICE_SPECIFIC)
505 PciDev::writeConfig(offset, size, data);
506 else
507 panic("Device specific PCI config space not implemented!\n");
508
509 // Need to catch writes to BARs to update the PIO interface
510 switch (offset) {
511 // seems to work fine without all these PCI settings, but i
512 // put in the IO to double check, an assertion will fail if we
513 // need to properly implement it
514 case PCI_COMMAND:
515 if (config.data[offset] & PCI_CMD_IOSE)
516 ioEnable = true;
517 else
518 ioEnable = false;
519
520 #if 0
521 if (config.data[offset] & PCI_CMD_BME) {
522 bmEnabled = true;
523 }
524 else {
525 bmEnabled = false;
526 }
527
528 if (config.data[offset] & PCI_CMD_MSE) {
529 memEnable = true;
530 }
531 else {
532 memEnable = false;
533 }
534 #endif
535 break;
536
537 case PCI0_BASE_ADDR0:
538 if (BARAddrs[0] != 0) {
539 if (pioInterface)
540 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
541
542 BARAddrs[0] &= EV5::PAddrUncachedMask;
543 }
544 break;
545 case PCI0_BASE_ADDR1:
546 if (BARAddrs[1] != 0) {
547 if (pioInterface)
548 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
549
550 BARAddrs[1] &= EV5::PAddrUncachedMask;
551 }
552 break;
553 }
554 }
555
556 /**
557 * This reads the device registers, which are detailed in the NS83820
558 * spec sheet
559 */
560 Fault
561 NSGigE::read(MemReqPtr &req, uint8_t *data)
562 {
563 assert(ioEnable);
564
565 //The mask is to give you only the offset into the device register file
566 Addr daddr = req->paddr & 0xfff;
567 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
568 daddr, req->paddr, req->vaddr, req->size);
569
570
571 // there are some reserved registers, you can see ns_gige_reg.h and
572 // the spec sheet for details
573 if (daddr > LAST && daddr <= RESERVED) {
574 panic("Accessing reserved register");
575 } else if (daddr > RESERVED && daddr <= 0x3FC) {
576 readConfig(daddr & 0xff, req->size, data);
577 return NoFault;
578 } else if (daddr >= MIB_START && daddr <= MIB_END) {
579 // don't implement all the MIB's. hopefully the kernel
580 // doesn't actually DEPEND upon their values
581 // MIB are just hardware stats keepers
582 uint32_t &reg = *(uint32_t *) data;
583 reg = 0;
584 return NoFault;
585 } else if (daddr > 0x3FC)
586 panic("Something is messed up!\n");
587
588 switch (req->size) {
589 case sizeof(uint32_t):
590 {
591 uint32_t &reg = *(uint32_t *)data;
592 uint16_t rfaddr;
593
594 switch (daddr) {
595 case CR:
596 reg = regs.command;
597 //these are supposed to be cleared on a read
598 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
599 break;
600
601 case CFGR:
602 reg = regs.config;
603 break;
604
605 case MEAR:
606 reg = regs.mear;
607 break;
608
609 case PTSCR:
610 reg = regs.ptscr;
611 break;
612
613 case ISR:
614 reg = regs.isr;
615 devIntrClear(ISR_ALL);
616 break;
617
618 case IMR:
619 reg = regs.imr;
620 break;
621
622 case IER:
623 reg = regs.ier;
624 break;
625
626 case IHR:
627 reg = regs.ihr;
628 break;
629
630 case TXDP:
631 reg = regs.txdp;
632 break;
633
634 case TXDP_HI:
635 reg = regs.txdp_hi;
636 break;
637
638 case TX_CFG:
639 reg = regs.txcfg;
640 break;
641
642 case GPIOR:
643 reg = regs.gpior;
644 break;
645
646 case RXDP:
647 reg = regs.rxdp;
648 break;
649
650 case RXDP_HI:
651 reg = regs.rxdp_hi;
652 break;
653
654 case RX_CFG:
655 reg = regs.rxcfg;
656 break;
657
658 case PQCR:
659 reg = regs.pqcr;
660 break;
661
662 case WCSR:
663 reg = regs.wcsr;
664 break;
665
666 case PCR:
667 reg = regs.pcr;
668 break;
669
670 // see the spec sheet for how RFCR and RFDR work
671 // basically, you write to RFCR to tell the machine
672 // what you want to do next, then you act upon RFDR,
673 // and the device will be prepared b/c of what you
674 // wrote to RFCR
675 case RFCR:
676 reg = regs.rfcr;
677 break;
678
679 case RFDR:
680 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
681 switch (rfaddr) {
682 // Read from perfect match ROM octets
683 case 0x000:
684 reg = rom.perfectMatch[1];
685 reg = reg << 8;
686 reg += rom.perfectMatch[0];
687 break;
688 case 0x002:
689 reg = rom.perfectMatch[3] << 8;
690 reg += rom.perfectMatch[2];
691 break;
692 case 0x004:
693 reg = rom.perfectMatch[5] << 8;
694 reg += rom.perfectMatch[4];
695 break;
696 default:
697 // Read filter hash table
698 if (rfaddr >= FHASH_ADDR &&
699 rfaddr < FHASH_ADDR + FHASH_SIZE) {
700
701 // Only word-aligned reads supported
702 if (rfaddr % 2)
703 panic("unaligned read from filter hash table!");
704
705 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
706 reg += rom.filterHash[rfaddr - FHASH_ADDR];
707 break;
708 }
709
710 panic("reading RFDR for something other than pattern"
711 " matching or hashing! %#x\n", rfaddr);
712 }
713 break;
714
715 case SRR:
716 reg = regs.srr;
717 break;
718
719 case MIBC:
720 reg = regs.mibc;
721 reg &= ~(MIBC_MIBS | MIBC_ACLR);
722 break;
723
724 case VRCR:
725 reg = regs.vrcr;
726 break;
727
728 case VTCR:
729 reg = regs.vtcr;
730 break;
731
732 case VDR:
733 reg = regs.vdr;
734 break;
735
736 case CCSR:
737 reg = regs.ccsr;
738 break;
739
740 case TBICR:
741 reg = regs.tbicr;
742 break;
743
744 case TBISR:
745 reg = regs.tbisr;
746 break;
747
748 case TANAR:
749 reg = regs.tanar;
750 break;
751
752 case TANLPAR:
753 reg = regs.tanlpar;
754 break;
755
756 case TANER:
757 reg = regs.taner;
758 break;
759
760 case TESR:
761 reg = regs.tesr;
762 break;
763
764 case M5REG:
765 reg = 0;
766 if (params()->rx_thread)
767 reg |= M5REG_RX_THREAD;
768 if (params()->tx_thread)
769 reg |= M5REG_TX_THREAD;
770 if (params()->rss)
771 reg |= M5REG_RSS;
772 break;
773
774 default:
775 panic("reading unimplemented register: addr=%#x", daddr);
776 }
777
778 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
779 daddr, reg, reg);
780 }
781 break;
782
783 default:
784 panic("accessing register with invalid size: addr=%#x, size=%d",
785 daddr, req->size);
786 }
787
788 return NoFault;
789 }
790
791 Fault
792 NSGigE::write(MemReqPtr &req, const uint8_t *data)
793 {
794 assert(ioEnable);
795
796 Addr daddr = req->paddr & 0xfff;
797 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
798 daddr, req->paddr, req->vaddr, req->size);
799
800 if (daddr > LAST && daddr <= RESERVED) {
801 panic("Accessing reserved register");
802 } else if (daddr > RESERVED && daddr <= 0x3FC) {
803 writeConfig(daddr & 0xff, req->size, data);
804 return NoFault;
805 } else if (daddr > 0x3FC)
806 panic("Something is messed up!\n");
807
808 if (req->size == sizeof(uint32_t)) {
809 uint32_t reg = *(uint32_t *)data;
810 uint16_t rfaddr;
811
812 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
813
814 switch (daddr) {
815 case CR:
816 regs.command = reg;
817 if (reg & CR_TXD) {
818 txEnable = false;
819 } else if (reg & CR_TXE) {
820 txEnable = true;
821
822 // the kernel is enabling the transmit machine
823 if (txState == txIdle)
824 txKick();
825 }
826
827 if (reg & CR_RXD) {
828 rxEnable = false;
829 } else if (reg & CR_RXE) {
830 rxEnable = true;
831
832 if (rxState == rxIdle)
833 rxKick();
834 }
835
836 if (reg & CR_TXR)
837 txReset();
838
839 if (reg & CR_RXR)
840 rxReset();
841
842 if (reg & CR_SWI)
843 devIntrPost(ISR_SWI);
844
845 if (reg & CR_RST) {
846 txReset();
847 rxReset();
848
849 regsReset();
850 }
851 break;
852
853 case CFGR:
854 if (reg & CFGR_LNKSTS ||
855 reg & CFGR_SPDSTS ||
856 reg & CFGR_DUPSTS ||
857 reg & CFGR_RESERVED ||
858 reg & CFGR_T64ADDR ||
859 reg & CFGR_PCI64_DET)
860
861 // First clear all writable bits
862 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
863 CFGR_RESERVED | CFGR_T64ADDR |
864 CFGR_PCI64_DET;
865 // Now set the appropriate writable bits
866 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
867 CFGR_RESERVED | CFGR_T64ADDR |
868 CFGR_PCI64_DET);
869
870 // all these #if 0's are because i don't THINK the kernel needs to
871 // have these implemented. if there is a problem relating to one of
872 // these, you may need to add functionality in.
873 if (reg & CFGR_TBI_EN) ;
874 if (reg & CFGR_MODE_1000) ;
875
876 if (reg & CFGR_AUTO_1000)
877 panic("CFGR_AUTO_1000 not implemented!\n");
878
879 if (reg & CFGR_PINT_DUPSTS ||
880 reg & CFGR_PINT_LNKSTS ||
881 reg & CFGR_PINT_SPDSTS)
882 ;
883
884 if (reg & CFGR_TMRTEST) ;
885 if (reg & CFGR_MRM_DIS) ;
886 if (reg & CFGR_MWI_DIS) ;
887
888 if (reg & CFGR_T64ADDR) ;
889 // panic("CFGR_T64ADDR is read only register!\n");
890
891 if (reg & CFGR_PCI64_DET)
892 panic("CFGR_PCI64_DET is read only register!\n");
893
894 if (reg & CFGR_DATA64_EN) ;
895 if (reg & CFGR_M64ADDR) ;
896 if (reg & CFGR_PHY_RST) ;
897 if (reg & CFGR_PHY_DIS) ;
898
899 if (reg & CFGR_EXTSTS_EN)
900 extstsEnable = true;
901 else
902 extstsEnable = false;
903
904 if (reg & CFGR_REQALG) ;
905 if (reg & CFGR_SB) ;
906 if (reg & CFGR_POW) ;
907 if (reg & CFGR_EXD) ;
908 if (reg & CFGR_PESEL) ;
909 if (reg & CFGR_BROM_DIS) ;
910 if (reg & CFGR_EXT_125) ;
911 if (reg & CFGR_BEM) ;
912 break;
913
914 case MEAR:
915 // Clear writable bits
916 regs.mear &= MEAR_EEDO;
917 // Set appropriate writable bits
918 regs.mear |= reg & ~MEAR_EEDO;
919
920 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
921 // even though it could get it through RFDR
922 if (reg & MEAR_EESEL) {
923 // Rising edge of clock
924 if (reg & MEAR_EECLK && !eepromClk)
925 eepromKick();
926 }
927 else {
928 eepromState = eepromStart;
929 regs.mear &= ~MEAR_EEDI;
930 }
931
932 eepromClk = reg & MEAR_EECLK;
933
934 // since phy is completely faked, MEAR_MD* don't matter
935 if (reg & MEAR_MDIO) ;
936 if (reg & MEAR_MDDIR) ;
937 if (reg & MEAR_MDC) ;
938 break;
939
940 case PTSCR:
941 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
942 // these control BISTs for various parts of chip - we
943 // don't care or do just fake that the BIST is done
944 if (reg & PTSCR_RBIST_EN)
945 regs.ptscr |= PTSCR_RBIST_DONE;
946 if (reg & PTSCR_EEBIST_EN)
947 regs.ptscr &= ~PTSCR_EEBIST_EN;
948 if (reg & PTSCR_EELOAD_EN)
949 regs.ptscr &= ~PTSCR_EELOAD_EN;
950 break;
951
952 case ISR: /* writing to the ISR has no effect */
953 panic("ISR is a read only register!\n");
954
955 case IMR:
956 regs.imr = reg;
957 devIntrChangeMask();
958 break;
959
960 case IER:
961 regs.ier = reg;
962 break;
963
964 case IHR:
965 regs.ihr = reg;
966 /* not going to implement real interrupt holdoff */
967 break;
968
969 case TXDP:
970 regs.txdp = (reg & 0xFFFFFFFC);
971 assert(txState == txIdle);
972 CTDD = false;
973 break;
974
975 case TXDP_HI:
976 regs.txdp_hi = reg;
977 break;
978
979 case TX_CFG:
980 regs.txcfg = reg;
981 #if 0
982 if (reg & TX_CFG_CSI) ;
983 if (reg & TX_CFG_HBI) ;
984 if (reg & TX_CFG_MLB) ;
985 if (reg & TX_CFG_ATP) ;
986 if (reg & TX_CFG_ECRETRY) {
987 /*
988 * this could easily be implemented, but considering
989 * the network is just a fake pipe, wouldn't make
990 * sense to do this
991 */
992 }
993
994 if (reg & TX_CFG_BRST_DIS) ;
995 #endif
996
997 #if 0
998 /* we handle our own DMA, ignore the kernel's exhortations */
999 if (reg & TX_CFG_MXDMA) ;
1000 #endif
1001
1002 // also, we currently don't care about fill/drain
1003 // thresholds though this may change in the future with
1004 // more realistic networks or a driver which changes it
1005 // according to feedback
1006
1007 break;
1008
1009 case GPIOR:
1010 // Only write writable bits
1011 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1012 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
1013 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1014 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
1015 /* these just control general purpose i/o pins, don't matter */
1016 break;
1017
1018 case RXDP:
1019 regs.rxdp = reg;
1020 CRDD = false;
1021 break;
1022
1023 case RXDP_HI:
1024 regs.rxdp_hi = reg;
1025 break;
1026
1027 case RX_CFG:
1028 regs.rxcfg = reg;
1029 #if 0
1030 if (reg & RX_CFG_AEP) ;
1031 if (reg & RX_CFG_ARP) ;
1032 if (reg & RX_CFG_STRIPCRC) ;
1033 if (reg & RX_CFG_RX_RD) ;
1034 if (reg & RX_CFG_ALP) ;
1035 if (reg & RX_CFG_AIRL) ;
1036
1037 /* we handle our own DMA, ignore what kernel says about it */
1038 if (reg & RX_CFG_MXDMA) ;
1039
1040 //also, we currently don't care about fill/drain thresholds
1041 //though this may change in the future with more realistic
1042 //networks or a driver which changes it according to feedback
1043 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1044 #endif
1045 break;
1046
1047 case PQCR:
1048 /* there is no priority queueing used in the linux 2.6 driver */
1049 regs.pqcr = reg;
1050 break;
1051
1052 case WCSR:
1053 /* not going to implement wake on LAN */
1054 regs.wcsr = reg;
1055 break;
1056
1057 case PCR:
1058 /* not going to implement pause control */
1059 regs.pcr = reg;
1060 break;
1061
1062 case RFCR:
1063 regs.rfcr = reg;
1064
1065 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1066 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1067 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1068 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1069 acceptPerfect = (reg & RFCR_APM) ? true : false;
1070 acceptArp = (reg & RFCR_AARP) ? true : false;
1071 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1072
1073 #if 0
1074 if (reg & RFCR_APAT)
1075 panic("RFCR_APAT not implemented!\n");
1076 #endif
1077 if (reg & RFCR_UHEN)
1078 panic("Unicast hash filtering not used by drivers!\n");
1079
1080 if (reg & RFCR_ULM)
1081 panic("RFCR_ULM not implemented!\n");
1082
1083 break;
1084
1085 case RFDR:
1086 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1087 switch (rfaddr) {
1088 case 0x000:
1089 rom.perfectMatch[0] = (uint8_t)reg;
1090 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1091 break;
1092 case 0x002:
1093 rom.perfectMatch[2] = (uint8_t)reg;
1094 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1095 break;
1096 case 0x004:
1097 rom.perfectMatch[4] = (uint8_t)reg;
1098 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1099 break;
1100 default:
1101
1102 if (rfaddr >= FHASH_ADDR &&
1103 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1104
1105 // Only word-aligned writes supported
1106 if (rfaddr % 2)
1107 panic("unaligned write to filter hash table!");
1108
1109 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1110 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1111 = (uint8_t)(reg >> 8);
1112 break;
1113 }
1114 panic("writing RFDR for something other than pattern matching\
1115 or hashing! %#x\n", rfaddr);
1116 }
1117
1118 case BRAR:
1119 regs.brar = reg;
1120 break;
1121
1122 case BRDR:
1123 panic("the driver never uses BRDR, something is wrong!\n");
1124
1125 case SRR:
1126 panic("SRR is read only register!\n");
1127
1128 case MIBC:
1129 panic("the driver never uses MIBC, something is wrong!\n");
1130
1131 case VRCR:
1132 regs.vrcr = reg;
1133 break;
1134
1135 case VTCR:
1136 regs.vtcr = reg;
1137 break;
1138
1139 case VDR:
1140 panic("the driver never uses VDR, something is wrong!\n");
1141
1142 case CCSR:
1143 /* not going to implement clockrun stuff */
1144 regs.ccsr = reg;
1145 break;
1146
1147 case TBICR:
1148 regs.tbicr = reg;
1149 if (reg & TBICR_MR_LOOPBACK)
1150 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1151
1152 if (reg & TBICR_MR_AN_ENABLE) {
1153 regs.tanlpar = regs.tanar;
1154 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1155 }
1156
1157 #if 0
1158 if (reg & TBICR_MR_RESTART_AN) ;
1159 #endif
1160
1161 break;
1162
1163 case TBISR:
1164 panic("TBISR is read only register!\n");
1165
1166 case TANAR:
1167 // Only write the writable bits
1168 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1169 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1170
1171 // Pause capability unimplemented
1172 #if 0
1173 if (reg & TANAR_PS2) ;
1174 if (reg & TANAR_PS1) ;
1175 #endif
1176
1177 break;
1178
1179 case TANLPAR:
1180 panic("this should only be written to by the fake phy!\n");
1181
1182 case TANER:
1183 panic("TANER is read only register!\n");
1184
1185 case TESR:
1186 regs.tesr = reg;
1187 break;
1188
1189 default:
1190 panic("invalid register access daddr=%#x", daddr);
1191 }
1192 } else {
1193 panic("Invalid Request Size");
1194 }
1195
1196 return NoFault;
1197 }
1198
1199 void
1200 NSGigE::devIntrPost(uint32_t interrupts)
1201 {
1202 if (interrupts & ISR_RESERVE)
1203 panic("Cannot set a reserved interrupt");
1204
1205 if (interrupts & ISR_NOIMPL)
1206 warn("interrupt not implemented %#x\n", interrupts);
1207
1208 interrupts &= ISR_IMPL;
1209 regs.isr |= interrupts;
1210
1211 if (interrupts & regs.imr) {
1212 if (interrupts & ISR_SWI) {
1213 totalSwi++;
1214 }
1215 if (interrupts & ISR_RXIDLE) {
1216 totalRxIdle++;
1217 }
1218 if (interrupts & ISR_RXOK) {
1219 totalRxOk++;
1220 }
1221 if (interrupts & ISR_RXDESC) {
1222 totalRxDesc++;
1223 }
1224 if (interrupts & ISR_TXOK) {
1225 totalTxOk++;
1226 }
1227 if (interrupts & ISR_TXIDLE) {
1228 totalTxIdle++;
1229 }
1230 if (interrupts & ISR_TXDESC) {
1231 totalTxDesc++;
1232 }
1233 if (interrupts & ISR_RXORN) {
1234 totalRxOrn++;
1235 }
1236 }
1237
1238 DPRINTF(EthernetIntr,
1239 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1240 interrupts, regs.isr, regs.imr);
1241
1242 if ((regs.isr & regs.imr)) {
1243 Tick when = curTick;
1244 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1245 when += intrDelay;
1246 cpuIntrPost(when);
1247 }
1248 }
1249
1250 /* writing this interrupt counting stats inside this means that this function
1251 is now limited to being used to clear all interrupts upon the kernel
1252 reading isr and servicing. just telling you in case you were thinking
1253 of expanding use.
1254 */
1255 void
1256 NSGigE::devIntrClear(uint32_t interrupts)
1257 {
1258 if (interrupts & ISR_RESERVE)
1259 panic("Cannot clear a reserved interrupt");
1260
1261 if (regs.isr & regs.imr & ISR_SWI) {
1262 postedSwi++;
1263 }
1264 if (regs.isr & regs.imr & ISR_RXIDLE) {
1265 postedRxIdle++;
1266 }
1267 if (regs.isr & regs.imr & ISR_RXOK) {
1268 postedRxOk++;
1269 }
1270 if (regs.isr & regs.imr & ISR_RXDESC) {
1271 postedRxDesc++;
1272 }
1273 if (regs.isr & regs.imr & ISR_TXOK) {
1274 postedTxOk++;
1275 }
1276 if (regs.isr & regs.imr & ISR_TXIDLE) {
1277 postedTxIdle++;
1278 }
1279 if (regs.isr & regs.imr & ISR_TXDESC) {
1280 postedTxDesc++;
1281 }
1282 if (regs.isr & regs.imr & ISR_RXORN) {
1283 postedRxOrn++;
1284 }
1285
1286 if (regs.isr & regs.imr & ISR_IMPL)
1287 postedInterrupts++;
1288
1289 interrupts &= ~ISR_NOIMPL;
1290 regs.isr &= ~interrupts;
1291
1292 DPRINTF(EthernetIntr,
1293 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1294 interrupts, regs.isr, regs.imr);
1295
1296 if (!(regs.isr & regs.imr))
1297 cpuIntrClear();
1298 }
1299
1300 void
1301 NSGigE::devIntrChangeMask()
1302 {
1303 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1304 regs.isr, regs.imr, regs.isr & regs.imr);
1305
1306 if (regs.isr & regs.imr)
1307 cpuIntrPost(curTick);
1308 else
1309 cpuIntrClear();
1310 }
1311
1312 void
1313 NSGigE::cpuIntrPost(Tick when)
1314 {
1315 // If the interrupt you want to post is later than an interrupt
1316 // already scheduled, just let it post in the coming one and don't
1317 // schedule another.
1318 // HOWEVER, must be sure that the scheduled intrTick is in the
1319 // future (this was formerly the source of a bug)
1320 /**
1321 * @todo this warning should be removed and the intrTick code should
1322 * be fixed.
1323 */
1324 assert(when >= curTick);
1325 assert(intrTick >= curTick || intrTick == 0);
1326 if (when > intrTick && intrTick != 0) {
1327 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1328 intrTick);
1329 return;
1330 }
1331
1332 intrTick = when;
1333 if (intrTick < curTick) {
1334 debug_break();
1335 intrTick = curTick;
1336 }
1337
1338 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1339 intrTick);
1340
1341 if (intrEvent)
1342 intrEvent->squash();
1343 intrEvent = new IntrEvent(this, true);
1344 intrEvent->schedule(intrTick);
1345 }
1346
1347 void
1348 NSGigE::cpuInterrupt()
1349 {
1350 assert(intrTick == curTick);
1351
1352 // Whether or not there's a pending interrupt, we don't care about
1353 // it anymore
1354 intrEvent = 0;
1355 intrTick = 0;
1356
1357 // Don't send an interrupt if there's already one
1358 if (cpuPendingIntr) {
1359 DPRINTF(EthernetIntr,
1360 "would send an interrupt now, but there's already pending\n");
1361 } else {
1362 // Send interrupt
1363 cpuPendingIntr = true;
1364
1365 DPRINTF(EthernetIntr, "posting interrupt\n");
1366 intrPost();
1367 }
1368 }
1369
1370 void
1371 NSGigE::cpuIntrClear()
1372 {
1373 if (!cpuPendingIntr)
1374 return;
1375
1376 if (intrEvent) {
1377 intrEvent->squash();
1378 intrEvent = 0;
1379 }
1380
1381 intrTick = 0;
1382
1383 cpuPendingIntr = false;
1384
1385 DPRINTF(EthernetIntr, "clearing interrupt\n");
1386 intrClear();
1387 }
1388
1389 bool
1390 NSGigE::cpuIntrPending() const
1391 { return cpuPendingIntr; }
1392
1393 void
1394 NSGigE::txReset()
1395 {
1396
1397 DPRINTF(Ethernet, "transmit reset\n");
1398
1399 CTDD = false;
1400 txEnable = false;;
1401 txFragPtr = 0;
1402 assert(txDescCnt == 0);
1403 txFifo.clear();
1404 txState = txIdle;
1405 assert(txDmaState == dmaIdle);
1406 }
1407
1408 void
1409 NSGigE::rxReset()
1410 {
1411 DPRINTF(Ethernet, "receive reset\n");
1412
1413 CRDD = false;
1414 assert(rxPktBytes == 0);
1415 rxEnable = false;
1416 rxFragPtr = 0;
1417 assert(rxDescCnt == 0);
1418 assert(rxDmaState == dmaIdle);
1419 rxFifo.clear();
1420 rxState = rxIdle;
1421 }
1422
1423 void
1424 NSGigE::regsReset()
1425 {
1426 memset(&regs, 0, sizeof(regs));
1427 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1428 regs.mear = 0x12;
1429 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1430 // fill threshold to 32 bytes
1431 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1432 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1433 regs.mibc = MIBC_FRZ;
1434 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1435 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1436 regs.brar = 0xffffffff;
1437
1438 extstsEnable = false;
1439 acceptBroadcast = false;
1440 acceptMulticast = false;
1441 acceptUnicast = false;
1442 acceptPerfect = false;
1443 acceptArp = false;
1444 }
1445
1446 void
1447 NSGigE::rxDmaReadCopy()
1448 {
1449 assert(rxDmaState == dmaReading);
1450
1451 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1452 rxDmaState = dmaIdle;
1453
1454 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1455 rxDmaAddr, rxDmaLen);
1456 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1457 }
1458
1459 bool
1460 NSGigE::doRxDmaRead()
1461 {
1462 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1463 rxDmaState = dmaReading;
1464
1465 if (dmaInterface && !rxDmaFree) {
1466 if (dmaInterface->busy())
1467 rxDmaState = dmaReadWaiting;
1468 else
1469 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1470 &rxDmaReadEvent, true);
1471 return true;
1472 }
1473
1474 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1475 rxDmaReadCopy();
1476 return false;
1477 }
1478
1479 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1480 Tick start = curTick + dmaReadDelay + factor;
1481 rxDmaReadEvent.schedule(start);
1482 return true;
1483 }
1484
1485 void
1486 NSGigE::rxDmaReadDone()
1487 {
1488 assert(rxDmaState == dmaReading);
1489 rxDmaReadCopy();
1490
1491 // If the transmit state machine has a pending DMA, let it go first
1492 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1493 txKick();
1494
1495 rxKick();
1496 }
1497
1498 void
1499 NSGigE::rxDmaWriteCopy()
1500 {
1501 assert(rxDmaState == dmaWriting);
1502
1503 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1504 rxDmaState = dmaIdle;
1505
1506 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1507 rxDmaAddr, rxDmaLen);
1508 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1509 }
1510
1511 bool
1512 NSGigE::doRxDmaWrite()
1513 {
1514 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1515 rxDmaState = dmaWriting;
1516
1517 if (dmaInterface && !rxDmaFree) {
1518 if (dmaInterface->busy())
1519 rxDmaState = dmaWriteWaiting;
1520 else
1521 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1522 &rxDmaWriteEvent, true);
1523 return true;
1524 }
1525
1526 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1527 rxDmaWriteCopy();
1528 return false;
1529 }
1530
1531 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1532 Tick start = curTick + dmaWriteDelay + factor;
1533 rxDmaWriteEvent.schedule(start);
1534 return true;
1535 }
1536
1537 void
1538 NSGigE::rxDmaWriteDone()
1539 {
1540 assert(rxDmaState == dmaWriting);
1541 rxDmaWriteCopy();
1542
1543 // If the transmit state machine has a pending DMA, let it go first
1544 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1545 txKick();
1546
1547 rxKick();
1548 }
1549
1550 void
1551 NSGigE::rxKick()
1552 {
1553 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1554
1555 DPRINTF(EthernetSM,
1556 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1557 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1558
1559 Addr link, bufptr;
1560 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1561 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1562
1563 next:
1564 if (clock) {
1565 if (rxKickTick > curTick) {
1566 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1567 rxKickTick);
1568
1569 goto exit;
1570 }
1571
1572 // Go to the next state machine clock tick.
1573 rxKickTick = curTick + cycles(1);
1574 }
1575
1576 switch(rxDmaState) {
1577 case dmaReadWaiting:
1578 if (doRxDmaRead())
1579 goto exit;
1580 break;
1581 case dmaWriteWaiting:
1582 if (doRxDmaWrite())
1583 goto exit;
1584 break;
1585 default:
1586 break;
1587 }
1588
1589 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1590 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1591
1592 // see state machine from spec for details
1593 // the way this works is, if you finish work on one state and can
1594 // go directly to another, you do that through jumping to the
1595 // label "next". however, if you have intermediate work, like DMA
1596 // so that you can't go to the next state yet, you go to exit and
1597 // exit the loop. however, when the DMA is done it will trigger
1598 // an event and come back to this loop.
1599 switch (rxState) {
1600 case rxIdle:
1601 if (!rxEnable) {
1602 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1603 goto exit;
1604 }
1605
1606 if (CRDD) {
1607 rxState = rxDescRefr;
1608
1609 rxDmaAddr = regs.rxdp & 0x3fffffff;
1610 rxDmaData =
1611 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1612 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1613 rxDmaFree = dmaDescFree;
1614
1615 descDmaReads++;
1616 descDmaRdBytes += rxDmaLen;
1617
1618 if (doRxDmaRead())
1619 goto exit;
1620 } else {
1621 rxState = rxDescRead;
1622
1623 rxDmaAddr = regs.rxdp & 0x3fffffff;
1624 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1625 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1626 rxDmaFree = dmaDescFree;
1627
1628 descDmaReads++;
1629 descDmaRdBytes += rxDmaLen;
1630
1631 if (doRxDmaRead())
1632 goto exit;
1633 }
1634 break;
1635
1636 case rxDescRefr:
1637 if (rxDmaState != dmaIdle)
1638 goto exit;
1639
1640 rxState = rxAdvance;
1641 break;
1642
1643 case rxDescRead:
1644 if (rxDmaState != dmaIdle)
1645 goto exit;
1646
1647 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1648 regs.rxdp & 0x3fffffff);
1649 DPRINTF(EthernetDesc,
1650 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1651 link, bufptr, cmdsts, extsts);
1652
1653 if (cmdsts & CMDSTS_OWN) {
1654 devIntrPost(ISR_RXIDLE);
1655 rxState = rxIdle;
1656 goto exit;
1657 } else {
1658 rxState = rxFifoBlock;
1659 rxFragPtr = bufptr;
1660 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1661 }
1662 break;
1663
1664 case rxFifoBlock:
1665 if (!rxPacket) {
1666 /**
1667 * @todo in reality, we should be able to start processing
1668 * the packet as it arrives, and not have to wait for the
1669 * full packet ot be in the receive fifo.
1670 */
1671 if (rxFifo.empty())
1672 goto exit;
1673
1674 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1675
1676 // If we don't have a packet, grab a new one from the fifo.
1677 rxPacket = rxFifo.front();
1678 rxPktBytes = rxPacket->length;
1679 rxPacketBufPtr = rxPacket->data;
1680
1681 #if TRACING_ON
1682 if (DTRACE(Ethernet)) {
1683 IpPtr ip(rxPacket);
1684 if (ip) {
1685 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1686 TcpPtr tcp(ip);
1687 if (tcp) {
1688 DPRINTF(Ethernet,
1689 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1690 tcp->sport(), tcp->dport(), tcp->seq(),
1691 tcp->ack());
1692 }
1693 }
1694 }
1695 #endif
1696
1697 // sanity check - i think the driver behaves like this
1698 assert(rxDescCnt >= rxPktBytes);
1699 rxFifo.pop();
1700 }
1701
1702
1703 // dont' need the && rxDescCnt > 0 if driver sanity check
1704 // above holds
1705 if (rxPktBytes > 0) {
1706 rxState = rxFragWrite;
1707 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1708 // check holds
1709 rxXferLen = rxPktBytes;
1710
1711 rxDmaAddr = rxFragPtr & 0x3fffffff;
1712 rxDmaData = rxPacketBufPtr;
1713 rxDmaLen = rxXferLen;
1714 rxDmaFree = dmaDataFree;
1715
1716 if (doRxDmaWrite())
1717 goto exit;
1718
1719 } else {
1720 rxState = rxDescWrite;
1721
1722 //if (rxPktBytes == 0) { /* packet is done */
1723 assert(rxPktBytes == 0);
1724 DPRINTF(EthernetSM, "done with receiving packet\n");
1725
1726 cmdsts |= CMDSTS_OWN;
1727 cmdsts &= ~CMDSTS_MORE;
1728 cmdsts |= CMDSTS_OK;
1729 cmdsts &= 0xffff0000;
1730 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1731
1732 #if 0
1733 /*
1734 * all the driver uses these are for its own stats keeping
1735 * which we don't care about, aren't necessary for
1736 * functionality and doing this would just slow us down.
1737 * if they end up using this in a later version for
1738 * functional purposes, just undef
1739 */
1740 if (rxFilterEnable) {
1741 cmdsts &= ~CMDSTS_DEST_MASK;
1742 const EthAddr &dst = rxFifoFront()->dst();
1743 if (dst->unicast())
1744 cmdsts |= CMDSTS_DEST_SELF;
1745 if (dst->multicast())
1746 cmdsts |= CMDSTS_DEST_MULTI;
1747 if (dst->broadcast())
1748 cmdsts |= CMDSTS_DEST_MASK;
1749 }
1750 #endif
1751
1752 IpPtr ip(rxPacket);
1753 if (extstsEnable && ip) {
1754 extsts |= EXTSTS_IPPKT;
1755 rxIpChecksums++;
1756 if (cksum(ip) != 0) {
1757 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1758 extsts |= EXTSTS_IPERR;
1759 }
1760 TcpPtr tcp(ip);
1761 UdpPtr udp(ip);
1762 if (tcp) {
1763 extsts |= EXTSTS_TCPPKT;
1764 rxTcpChecksums++;
1765 if (cksum(tcp) != 0) {
1766 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1767 extsts |= EXTSTS_TCPERR;
1768
1769 }
1770 } else if (udp) {
1771 extsts |= EXTSTS_UDPPKT;
1772 rxUdpChecksums++;
1773 if (cksum(udp) != 0) {
1774 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1775 extsts |= EXTSTS_UDPERR;
1776 }
1777 }
1778 }
1779 rxPacket = 0;
1780
1781 /*
1782 * the driver seems to always receive into desc buffers
1783 * of size 1514, so you never have a pkt that is split
1784 * into multiple descriptors on the receive side, so
1785 * i don't implement that case, hence the assert above.
1786 */
1787
1788 DPRINTF(EthernetDesc,
1789 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1790 regs.rxdp & 0x3fffffff);
1791 DPRINTF(EthernetDesc,
1792 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1793 link, bufptr, cmdsts, extsts);
1794
1795 rxDmaAddr = regs.rxdp & 0x3fffffff;
1796 rxDmaData = &cmdsts;
1797 if (is64bit) {
1798 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1799 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1800 } else {
1801 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1802 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1803 }
1804 rxDmaFree = dmaDescFree;
1805
1806 descDmaWrites++;
1807 descDmaWrBytes += rxDmaLen;
1808
1809 if (doRxDmaWrite())
1810 goto exit;
1811 }
1812 break;
1813
1814 case rxFragWrite:
1815 if (rxDmaState != dmaIdle)
1816 goto exit;
1817
1818 rxPacketBufPtr += rxXferLen;
1819 rxFragPtr += rxXferLen;
1820 rxPktBytes -= rxXferLen;
1821
1822 rxState = rxFifoBlock;
1823 break;
1824
1825 case rxDescWrite:
1826 if (rxDmaState != dmaIdle)
1827 goto exit;
1828
1829 assert(cmdsts & CMDSTS_OWN);
1830
1831 assert(rxPacket == 0);
1832 devIntrPost(ISR_RXOK);
1833
1834 if (cmdsts & CMDSTS_INTR)
1835 devIntrPost(ISR_RXDESC);
1836
1837 if (!rxEnable) {
1838 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1839 rxState = rxIdle;
1840 goto exit;
1841 } else
1842 rxState = rxAdvance;
1843 break;
1844
1845 case rxAdvance:
1846 if (link == 0) {
1847 devIntrPost(ISR_RXIDLE);
1848 rxState = rxIdle;
1849 CRDD = true;
1850 goto exit;
1851 } else {
1852 if (rxDmaState != dmaIdle)
1853 goto exit;
1854 rxState = rxDescRead;
1855 regs.rxdp = link;
1856 CRDD = false;
1857
1858 rxDmaAddr = regs.rxdp & 0x3fffffff;
1859 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1860 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1861 rxDmaFree = dmaDescFree;
1862
1863 if (doRxDmaRead())
1864 goto exit;
1865 }
1866 break;
1867
1868 default:
1869 panic("Invalid rxState!");
1870 }
1871
1872 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1873 NsRxStateStrings[rxState]);
1874 goto next;
1875
1876 exit:
1877 /**
1878 * @todo do we want to schedule a future kick?
1879 */
1880 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1881 NsRxStateStrings[rxState]);
1882
1883 if (clock && !rxKickEvent.scheduled())
1884 rxKickEvent.schedule(rxKickTick);
1885 }
1886
1887 void
1888 NSGigE::transmit()
1889 {
1890 if (txFifo.empty()) {
1891 DPRINTF(Ethernet, "nothing to transmit\n");
1892 return;
1893 }
1894
1895 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1896 txFifo.size());
1897 if (interface->sendPacket(txFifo.front())) {
1898 #if TRACING_ON
1899 if (DTRACE(Ethernet)) {
1900 IpPtr ip(txFifo.front());
1901 if (ip) {
1902 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1903 TcpPtr tcp(ip);
1904 if (tcp) {
1905 DPRINTF(Ethernet,
1906 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1907 tcp->sport(), tcp->dport(), tcp->seq(),
1908 tcp->ack());
1909 }
1910 }
1911 }
1912 #endif
1913
1914 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1915 txBytes += txFifo.front()->length;
1916 txPackets++;
1917
1918 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1919 txFifo.avail());
1920 txFifo.pop();
1921
1922 /*
1923 * normally do a writeback of the descriptor here, and ONLY
1924 * after that is done, send this interrupt. but since our
1925 * stuff never actually fails, just do this interrupt here,
1926 * otherwise the code has to stray from this nice format.
1927 * besides, it's functionally the same.
1928 */
1929 devIntrPost(ISR_TXOK);
1930 }
1931
1932 if (!txFifo.empty() && !txEvent.scheduled()) {
1933 DPRINTF(Ethernet, "reschedule transmit\n");
1934 txEvent.schedule(curTick + retryTime);
1935 }
1936 }
1937
1938 void
1939 NSGigE::txDmaReadCopy()
1940 {
1941 assert(txDmaState == dmaReading);
1942
1943 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1944 txDmaState = dmaIdle;
1945
1946 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1947 txDmaAddr, txDmaLen);
1948 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1949 }
1950
1951 bool
1952 NSGigE::doTxDmaRead()
1953 {
1954 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1955 txDmaState = dmaReading;
1956
1957 if (dmaInterface && !txDmaFree) {
1958 if (dmaInterface->busy())
1959 txDmaState = dmaReadWaiting;
1960 else
1961 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1962 &txDmaReadEvent, true);
1963 return true;
1964 }
1965
1966 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1967 txDmaReadCopy();
1968 return false;
1969 }
1970
1971 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1972 Tick start = curTick + dmaReadDelay + factor;
1973 txDmaReadEvent.schedule(start);
1974 return true;
1975 }
1976
1977 void
1978 NSGigE::txDmaReadDone()
1979 {
1980 assert(txDmaState == dmaReading);
1981 txDmaReadCopy();
1982
1983 // If the receive state machine has a pending DMA, let it go first
1984 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1985 rxKick();
1986
1987 txKick();
1988 }
1989
1990 void
1991 NSGigE::txDmaWriteCopy()
1992 {
1993 assert(txDmaState == dmaWriting);
1994
1995 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1996 txDmaState = dmaIdle;
1997
1998 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1999 txDmaAddr, txDmaLen);
2000 DDUMP(EthernetDMA, txDmaData, txDmaLen);
2001 }
2002
2003 bool
2004 NSGigE::doTxDmaWrite()
2005 {
2006 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
2007 txDmaState = dmaWriting;
2008
2009 if (dmaInterface && !txDmaFree) {
2010 if (dmaInterface->busy())
2011 txDmaState = dmaWriteWaiting;
2012 else
2013 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
2014 &txDmaWriteEvent, true);
2015 return true;
2016 }
2017
2018 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
2019 txDmaWriteCopy();
2020 return false;
2021 }
2022
2023 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
2024 Tick start = curTick + dmaWriteDelay + factor;
2025 txDmaWriteEvent.schedule(start);
2026 return true;
2027 }
2028
2029 void
2030 NSGigE::txDmaWriteDone()
2031 {
2032 assert(txDmaState == dmaWriting);
2033 txDmaWriteCopy();
2034
2035 // If the receive state machine has a pending DMA, let it go first
2036 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
2037 rxKick();
2038
2039 txKick();
2040 }
2041
2042 void
2043 NSGigE::txKick()
2044 {
2045 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
2046
2047 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
2048 NsTxStateStrings[txState], is64bit ? 64 : 32);
2049
2050 Addr link, bufptr;
2051 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
2052 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
2053
2054 next:
2055 if (clock) {
2056 if (txKickTick > curTick) {
2057 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
2058 txKickTick);
2059 goto exit;
2060 }
2061
2062 // Go to the next state machine clock tick.
2063 txKickTick = curTick + cycles(1);
2064 }
2065
2066 switch(txDmaState) {
2067 case dmaReadWaiting:
2068 if (doTxDmaRead())
2069 goto exit;
2070 break;
2071 case dmaWriteWaiting:
2072 if (doTxDmaWrite())
2073 goto exit;
2074 break;
2075 default:
2076 break;
2077 }
2078
2079 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
2080 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
2081 switch (txState) {
2082 case txIdle:
2083 if (!txEnable) {
2084 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
2085 goto exit;
2086 }
2087
2088 if (CTDD) {
2089 txState = txDescRefr;
2090
2091 txDmaAddr = regs.txdp & 0x3fffffff;
2092 txDmaData =
2093 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
2094 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
2095 txDmaFree = dmaDescFree;
2096
2097 descDmaReads++;
2098 descDmaRdBytes += txDmaLen;
2099
2100 if (doTxDmaRead())
2101 goto exit;
2102
2103 } else {
2104 txState = txDescRead;
2105
2106 txDmaAddr = regs.txdp & 0x3fffffff;
2107 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2108 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2109 txDmaFree = dmaDescFree;
2110
2111 descDmaReads++;
2112 descDmaRdBytes += txDmaLen;
2113
2114 if (doTxDmaRead())
2115 goto exit;
2116 }
2117 break;
2118
2119 case txDescRefr:
2120 if (txDmaState != dmaIdle)
2121 goto exit;
2122
2123 txState = txAdvance;
2124 break;
2125
2126 case txDescRead:
2127 if (txDmaState != dmaIdle)
2128 goto exit;
2129
2130 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
2131 regs.txdp & 0x3fffffff);
2132 DPRINTF(EthernetDesc,
2133 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
2134 link, bufptr, cmdsts, extsts);
2135
2136 if (cmdsts & CMDSTS_OWN) {
2137 txState = txFifoBlock;
2138 txFragPtr = bufptr;
2139 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
2140 } else {
2141 devIntrPost(ISR_TXIDLE);
2142 txState = txIdle;
2143 goto exit;
2144 }
2145 break;
2146
2147 case txFifoBlock:
2148 if (!txPacket) {
2149 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2150 txPacket = new PacketData(16384);
2151 txPacketBufPtr = txPacket->data;
2152 }
2153
2154 if (txDescCnt == 0) {
2155 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2156 if (cmdsts & CMDSTS_MORE) {
2157 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2158 txState = txDescWrite;
2159
2160 cmdsts &= ~CMDSTS_OWN;
2161
2162 txDmaAddr = regs.txdp & 0x3fffffff;
2163 txDmaData = &cmdsts;
2164 if (is64bit) {
2165 txDmaAddr += offsetof(ns_desc64, cmdsts);
2166 txDmaLen = sizeof(txDesc64.cmdsts);
2167 } else {
2168 txDmaAddr += offsetof(ns_desc32, cmdsts);
2169 txDmaLen = sizeof(txDesc32.cmdsts);
2170 }
2171 txDmaFree = dmaDescFree;
2172
2173 if (doTxDmaWrite())
2174 goto exit;
2175
2176 } else { /* this packet is totally done */
2177 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2178 /* deal with the the packet that just finished */
2179 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2180 IpPtr ip(txPacket);
2181 if (extsts & EXTSTS_UDPPKT) {
2182 UdpPtr udp(ip);
2183 udp->sum(0);
2184 udp->sum(cksum(udp));
2185 txUdpChecksums++;
2186 } else if (extsts & EXTSTS_TCPPKT) {
2187 TcpPtr tcp(ip);
2188 tcp->sum(0);
2189 tcp->sum(cksum(tcp));
2190 txTcpChecksums++;
2191 }
2192 if (extsts & EXTSTS_IPPKT) {
2193 ip->sum(0);
2194 ip->sum(cksum(ip));
2195 txIpChecksums++;
2196 }
2197 }
2198
2199 txPacket->length = txPacketBufPtr - txPacket->data;
2200 // this is just because the receive can't handle a
2201 // packet bigger want to make sure
2202 if (txPacket->length > 1514)
2203 panic("transmit packet too large, %s > 1514\n",
2204 txPacket->length);
2205
2206 #ifndef NDEBUG
2207 bool success =
2208 #endif
2209 txFifo.push(txPacket);
2210 assert(success);
2211
2212 /*
2213 * this following section is not tqo spec, but
2214 * functionally shouldn't be any different. normally,
2215 * the chip will wait til the transmit has occurred
2216 * before writing back the descriptor because it has
2217 * to wait to see that it was successfully transmitted
2218 * to decide whether to set CMDSTS_OK or not.
2219 * however, in the simulator since it is always
2220 * successfully transmitted, and writing it exactly to
2221 * spec would complicate the code, we just do it here
2222 */
2223
2224 cmdsts &= ~CMDSTS_OWN;
2225 cmdsts |= CMDSTS_OK;
2226
2227 DPRINTF(EthernetDesc,
2228 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2229 cmdsts, extsts);
2230
2231 txDmaFree = dmaDescFree;
2232 txDmaAddr = regs.txdp & 0x3fffffff;
2233 txDmaData = &cmdsts;
2234 if (is64bit) {
2235 txDmaAddr += offsetof(ns_desc64, cmdsts);
2236 txDmaLen =
2237 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2238 } else {
2239 txDmaAddr += offsetof(ns_desc32, cmdsts);
2240 txDmaLen =
2241 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2242 }
2243
2244 descDmaWrites++;
2245 descDmaWrBytes += txDmaLen;
2246
2247 transmit();
2248 txPacket = 0;
2249
2250 if (!txEnable) {
2251 DPRINTF(EthernetSM, "halting TX state machine\n");
2252 txState = txIdle;
2253 goto exit;
2254 } else
2255 txState = txAdvance;
2256
2257 if (doTxDmaWrite())
2258 goto exit;
2259 }
2260 } else {
2261 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2262 if (!txFifo.full()) {
2263 txState = txFragRead;
2264
2265 /*
2266 * The number of bytes transferred is either whatever
2267 * is left in the descriptor (txDescCnt), or if there
2268 * is not enough room in the fifo, just whatever room
2269 * is left in the fifo
2270 */
2271 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2272
2273 txDmaAddr = txFragPtr & 0x3fffffff;
2274 txDmaData = txPacketBufPtr;
2275 txDmaLen = txXferLen;
2276 txDmaFree = dmaDataFree;
2277
2278 if (doTxDmaRead())
2279 goto exit;
2280 } else {
2281 txState = txFifoBlock;
2282 transmit();
2283
2284 goto exit;
2285 }
2286
2287 }
2288 break;
2289
2290 case txFragRead:
2291 if (txDmaState != dmaIdle)
2292 goto exit;
2293
2294 txPacketBufPtr += txXferLen;
2295 txFragPtr += txXferLen;
2296 txDescCnt -= txXferLen;
2297 txFifo.reserve(txXferLen);
2298
2299 txState = txFifoBlock;
2300 break;
2301
2302 case txDescWrite:
2303 if (txDmaState != dmaIdle)
2304 goto exit;
2305
2306 if (cmdsts & CMDSTS_INTR)
2307 devIntrPost(ISR_TXDESC);
2308
2309 if (!txEnable) {
2310 DPRINTF(EthernetSM, "halting TX state machine\n");
2311 txState = txIdle;
2312 goto exit;
2313 } else
2314 txState = txAdvance;
2315 break;
2316
2317 case txAdvance:
2318 if (link == 0) {
2319 devIntrPost(ISR_TXIDLE);
2320 txState = txIdle;
2321 goto exit;
2322 } else {
2323 if (txDmaState != dmaIdle)
2324 goto exit;
2325 txState = txDescRead;
2326 regs.txdp = link;
2327 CTDD = false;
2328
2329 txDmaAddr = link & 0x3fffffff;
2330 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2331 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2332 txDmaFree = dmaDescFree;
2333
2334 if (doTxDmaRead())
2335 goto exit;
2336 }
2337 break;
2338
2339 default:
2340 panic("invalid state");
2341 }
2342
2343 DPRINTF(EthernetSM, "entering next txState=%s\n",
2344 NsTxStateStrings[txState]);
2345 goto next;
2346
2347 exit:
2348 /**
2349 * @todo do we want to schedule a future kick?
2350 */
2351 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2352 NsTxStateStrings[txState]);
2353
2354 if (clock && !txKickEvent.scheduled())
2355 txKickEvent.schedule(txKickTick);
2356 }
2357
2358 /**
2359 * Advance the EEPROM state machine
2360 * Called on rising edge of EEPROM clock bit in MEAR
2361 */
2362 void
2363 NSGigE::eepromKick()
2364 {
2365 switch (eepromState) {
2366
2367 case eepromStart:
2368
2369 // Wait for start bit
2370 if (regs.mear & MEAR_EEDI) {
2371 // Set up to get 2 opcode bits
2372 eepromState = eepromGetOpcode;
2373 eepromBitsToRx = 2;
2374 eepromOpcode = 0;
2375 }
2376 break;
2377
2378 case eepromGetOpcode:
2379 eepromOpcode <<= 1;
2380 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2381 --eepromBitsToRx;
2382
2383 // Done getting opcode
2384 if (eepromBitsToRx == 0) {
2385 if (eepromOpcode != EEPROM_READ)
2386 panic("only EEPROM reads are implemented!");
2387
2388 // Set up to get address
2389 eepromState = eepromGetAddress;
2390 eepromBitsToRx = 6;
2391 eepromAddress = 0;
2392 }
2393 break;
2394
2395 case eepromGetAddress:
2396 eepromAddress <<= 1;
2397 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2398 --eepromBitsToRx;
2399
2400 // Done getting address
2401 if (eepromBitsToRx == 0) {
2402
2403 if (eepromAddress >= EEPROM_SIZE)
2404 panic("EEPROM read access out of range!");
2405
2406 switch (eepromAddress) {
2407
2408 case EEPROM_PMATCH2_ADDR:
2409 eepromData = rom.perfectMatch[5];
2410 eepromData <<= 8;
2411 eepromData += rom.perfectMatch[4];
2412 break;
2413
2414 case EEPROM_PMATCH1_ADDR:
2415 eepromData = rom.perfectMatch[3];
2416 eepromData <<= 8;
2417 eepromData += rom.perfectMatch[2];
2418 break;
2419
2420 case EEPROM_PMATCH0_ADDR:
2421 eepromData = rom.perfectMatch[1];
2422 eepromData <<= 8;
2423 eepromData += rom.perfectMatch[0];
2424 break;
2425
2426 default:
2427 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2428 }
2429 // Set up to read data
2430 eepromState = eepromRead;
2431 eepromBitsToRx = 16;
2432
2433 // Clear data in bit
2434 regs.mear &= ~MEAR_EEDI;
2435 }
2436 break;
2437
2438 case eepromRead:
2439 // Clear Data Out bit
2440 regs.mear &= ~MEAR_EEDO;
2441 // Set bit to value of current EEPROM bit
2442 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2443
2444 eepromData <<= 1;
2445 --eepromBitsToRx;
2446
2447 // All done
2448 if (eepromBitsToRx == 0) {
2449 eepromState = eepromStart;
2450 }
2451 break;
2452
2453 default:
2454 panic("invalid EEPROM state");
2455 }
2456
2457 }
2458
2459 void
2460 NSGigE::transferDone()
2461 {
2462 if (txFifo.empty()) {
2463 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2464 return;
2465 }
2466
2467 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2468
2469 if (txEvent.scheduled())
2470 txEvent.reschedule(curTick + cycles(1));
2471 else
2472 txEvent.schedule(curTick + cycles(1));
2473 }
2474
2475 bool
2476 NSGigE::rxFilter(const PacketPtr &packet)
2477 {
2478 EthPtr eth = packet;
2479 bool drop = true;
2480 string type;
2481
2482 const EthAddr &dst = eth->dst();
2483 if (dst.unicast()) {
2484 // If we're accepting all unicast addresses
2485 if (acceptUnicast)
2486 drop = false;
2487
2488 // If we make a perfect match
2489 if (acceptPerfect && dst == rom.perfectMatch)
2490 drop = false;
2491
2492 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2493 drop = false;
2494
2495 } else if (dst.broadcast()) {
2496 // if we're accepting broadcasts
2497 if (acceptBroadcast)
2498 drop = false;
2499
2500 } else if (dst.multicast()) {
2501 // if we're accepting all multicasts
2502 if (acceptMulticast)
2503 drop = false;
2504
2505 // Multicast hashing faked - all packets accepted
2506 if (multicastHashEnable)
2507 drop = false;
2508 }
2509
2510 if (drop) {
2511 DPRINTF(Ethernet, "rxFilter drop\n");
2512 DDUMP(EthernetData, packet->data, packet->length);
2513 }
2514
2515 return drop;
2516 }
2517
2518 bool
2519 NSGigE::recvPacket(PacketPtr packet)
2520 {
2521 rxBytes += packet->length;
2522 rxPackets++;
2523
2524 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2525 rxFifo.avail());
2526
2527 if (!rxEnable) {
2528 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2529 return true;
2530 }
2531
2532 if (!rxFilterEnable) {
2533 DPRINTF(Ethernet,
2534 "receive packet filtering disabled . . . packet dropped\n");
2535 return true;
2536 }
2537
2538 if (rxFilter(packet)) {
2539 DPRINTF(Ethernet, "packet filtered...dropped\n");
2540 return true;
2541 }
2542
2543 if (rxFifo.avail() < packet->length) {
2544 #if TRACING_ON
2545 IpPtr ip(packet);
2546 TcpPtr tcp(ip);
2547 if (ip) {
2548 DPRINTF(Ethernet,
2549 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2550 ip->id());
2551 if (tcp) {
2552 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2553 }
2554 }
2555 #endif
2556 droppedPackets++;
2557 devIntrPost(ISR_RXORN);
2558 return false;
2559 }
2560
2561 rxFifo.push(packet);
2562
2563 rxKick();
2564 return true;
2565 }
2566
2567 //=====================================================================
2568 //
2569 //
2570 void
2571 NSGigE::serialize(ostream &os)
2572 {
2573 // Serialize the PciDev base class
2574 PciDev::serialize(os);
2575
2576 /*
2577 * Finalize any DMA events now.
2578 */
2579 if (rxDmaReadEvent.scheduled())
2580 rxDmaReadCopy();
2581 if (rxDmaWriteEvent.scheduled())
2582 rxDmaWriteCopy();
2583 if (txDmaReadEvent.scheduled())
2584 txDmaReadCopy();
2585 if (txDmaWriteEvent.scheduled())
2586 txDmaWriteCopy();
2587
2588 /*
2589 * Serialize the device registers
2590 */
2591 SERIALIZE_SCALAR(regs.command);
2592 SERIALIZE_SCALAR(regs.config);
2593 SERIALIZE_SCALAR(regs.mear);
2594 SERIALIZE_SCALAR(regs.ptscr);
2595 SERIALIZE_SCALAR(regs.isr);
2596 SERIALIZE_SCALAR(regs.imr);
2597 SERIALIZE_SCALAR(regs.ier);
2598 SERIALIZE_SCALAR(regs.ihr);
2599 SERIALIZE_SCALAR(regs.txdp);
2600 SERIALIZE_SCALAR(regs.txdp_hi);
2601 SERIALIZE_SCALAR(regs.txcfg);
2602 SERIALIZE_SCALAR(regs.gpior);
2603 SERIALIZE_SCALAR(regs.rxdp);
2604 SERIALIZE_SCALAR(regs.rxdp_hi);
2605 SERIALIZE_SCALAR(regs.rxcfg);
2606 SERIALIZE_SCALAR(regs.pqcr);
2607 SERIALIZE_SCALAR(regs.wcsr);
2608 SERIALIZE_SCALAR(regs.pcr);
2609 SERIALIZE_SCALAR(regs.rfcr);
2610 SERIALIZE_SCALAR(regs.rfdr);
2611 SERIALIZE_SCALAR(regs.brar);
2612 SERIALIZE_SCALAR(regs.brdr);
2613 SERIALIZE_SCALAR(regs.srr);
2614 SERIALIZE_SCALAR(regs.mibc);
2615 SERIALIZE_SCALAR(regs.vrcr);
2616 SERIALIZE_SCALAR(regs.vtcr);
2617 SERIALIZE_SCALAR(regs.vdr);
2618 SERIALIZE_SCALAR(regs.ccsr);
2619 SERIALIZE_SCALAR(regs.tbicr);
2620 SERIALIZE_SCALAR(regs.tbisr);
2621 SERIALIZE_SCALAR(regs.tanar);
2622 SERIALIZE_SCALAR(regs.tanlpar);
2623 SERIALIZE_SCALAR(regs.taner);
2624 SERIALIZE_SCALAR(regs.tesr);
2625
2626 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2627 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2628
2629 SERIALIZE_SCALAR(ioEnable);
2630
2631 /*
2632 * Serialize the data Fifos
2633 */
2634 rxFifo.serialize("rxFifo", os);
2635 txFifo.serialize("txFifo", os);
2636
2637 /*
2638 * Serialize the various helper variables
2639 */
2640 bool txPacketExists = txPacket;
2641 SERIALIZE_SCALAR(txPacketExists);
2642 if (txPacketExists) {
2643 txPacket->length = txPacketBufPtr - txPacket->data;
2644 txPacket->serialize("txPacket", os);
2645 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2646 SERIALIZE_SCALAR(txPktBufPtr);
2647 }
2648
2649 bool rxPacketExists = rxPacket;
2650 SERIALIZE_SCALAR(rxPacketExists);
2651 if (rxPacketExists) {
2652 rxPacket->serialize("rxPacket", os);
2653 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2654 SERIALIZE_SCALAR(rxPktBufPtr);
2655 }
2656
2657 SERIALIZE_SCALAR(txXferLen);
2658 SERIALIZE_SCALAR(rxXferLen);
2659
2660 /*
2661 * Serialize Cached Descriptors
2662 */
2663 SERIALIZE_SCALAR(rxDesc64.link);
2664 SERIALIZE_SCALAR(rxDesc64.bufptr);
2665 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2666 SERIALIZE_SCALAR(rxDesc64.extsts);
2667 SERIALIZE_SCALAR(txDesc64.link);
2668 SERIALIZE_SCALAR(txDesc64.bufptr);
2669 SERIALIZE_SCALAR(txDesc64.cmdsts);
2670 SERIALIZE_SCALAR(txDesc64.extsts);
2671 SERIALIZE_SCALAR(rxDesc32.link);
2672 SERIALIZE_SCALAR(rxDesc32.bufptr);
2673 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2674 SERIALIZE_SCALAR(rxDesc32.extsts);
2675 SERIALIZE_SCALAR(txDesc32.link);
2676 SERIALIZE_SCALAR(txDesc32.bufptr);
2677 SERIALIZE_SCALAR(txDesc32.cmdsts);
2678 SERIALIZE_SCALAR(txDesc32.extsts);
2679 SERIALIZE_SCALAR(extstsEnable);
2680
2681 /*
2682 * Serialize tx state machine
2683 */
2684 int txState = this->txState;
2685 SERIALIZE_SCALAR(txState);
2686 SERIALIZE_SCALAR(txEnable);
2687 SERIALIZE_SCALAR(CTDD);
2688 SERIALIZE_SCALAR(txFragPtr);
2689 SERIALIZE_SCALAR(txDescCnt);
2690 int txDmaState = this->txDmaState;
2691 SERIALIZE_SCALAR(txDmaState);
2692 SERIALIZE_SCALAR(txKickTick);
2693
2694 /*
2695 * Serialize rx state machine
2696 */
2697 int rxState = this->rxState;
2698 SERIALIZE_SCALAR(rxState);
2699 SERIALIZE_SCALAR(rxEnable);
2700 SERIALIZE_SCALAR(CRDD);
2701 SERIALIZE_SCALAR(rxPktBytes);
2702 SERIALIZE_SCALAR(rxFragPtr);
2703 SERIALIZE_SCALAR(rxDescCnt);
2704 int rxDmaState = this->rxDmaState;
2705 SERIALIZE_SCALAR(rxDmaState);
2706 SERIALIZE_SCALAR(rxKickTick);
2707
2708 /*
2709 * Serialize EEPROM state machine
2710 */
2711 int eepromState = this->eepromState;
2712 SERIALIZE_SCALAR(eepromState);
2713 SERIALIZE_SCALAR(eepromClk);
2714 SERIALIZE_SCALAR(eepromBitsToRx);
2715 SERIALIZE_SCALAR(eepromOpcode);
2716 SERIALIZE_SCALAR(eepromAddress);
2717 SERIALIZE_SCALAR(eepromData);
2718
2719 /*
2720 * If there's a pending transmit, store the time so we can
2721 * reschedule it later
2722 */
2723 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2724 SERIALIZE_SCALAR(transmitTick);
2725
2726 /*
2727 * receive address filter settings
2728 */
2729 SERIALIZE_SCALAR(rxFilterEnable);
2730 SERIALIZE_SCALAR(acceptBroadcast);
2731 SERIALIZE_SCALAR(acceptMulticast);
2732 SERIALIZE_SCALAR(acceptUnicast);
2733 SERIALIZE_SCALAR(acceptPerfect);
2734 SERIALIZE_SCALAR(acceptArp);
2735 SERIALIZE_SCALAR(multicastHashEnable);
2736
2737 /*
2738 * Keep track of pending interrupt status.
2739 */
2740 SERIALIZE_SCALAR(intrTick);
2741 SERIALIZE_SCALAR(cpuPendingIntr);
2742 Tick intrEventTick = 0;
2743 if (intrEvent)
2744 intrEventTick = intrEvent->when();
2745 SERIALIZE_SCALAR(intrEventTick);
2746
2747 }
2748
2749 void
2750 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2751 {
2752 // Unserialize the PciDev base class
2753 PciDev::unserialize(cp, section);
2754
2755 UNSERIALIZE_SCALAR(regs.command);
2756 UNSERIALIZE_SCALAR(regs.config);
2757 UNSERIALIZE_SCALAR(regs.mear);
2758 UNSERIALIZE_SCALAR(regs.ptscr);
2759 UNSERIALIZE_SCALAR(regs.isr);
2760 UNSERIALIZE_SCALAR(regs.imr);
2761 UNSERIALIZE_SCALAR(regs.ier);
2762 UNSERIALIZE_SCALAR(regs.ihr);
2763 UNSERIALIZE_SCALAR(regs.txdp);
2764 UNSERIALIZE_SCALAR(regs.txdp_hi);
2765 UNSERIALIZE_SCALAR(regs.txcfg);
2766 UNSERIALIZE_SCALAR(regs.gpior);
2767 UNSERIALIZE_SCALAR(regs.rxdp);
2768 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2769 UNSERIALIZE_SCALAR(regs.rxcfg);
2770 UNSERIALIZE_SCALAR(regs.pqcr);
2771 UNSERIALIZE_SCALAR(regs.wcsr);
2772 UNSERIALIZE_SCALAR(regs.pcr);
2773 UNSERIALIZE_SCALAR(regs.rfcr);
2774 UNSERIALIZE_SCALAR(regs.rfdr);
2775 UNSERIALIZE_SCALAR(regs.brar);
2776 UNSERIALIZE_SCALAR(regs.brdr);
2777 UNSERIALIZE_SCALAR(regs.srr);
2778 UNSERIALIZE_SCALAR(regs.mibc);
2779 UNSERIALIZE_SCALAR(regs.vrcr);
2780 UNSERIALIZE_SCALAR(regs.vtcr);
2781 UNSERIALIZE_SCALAR(regs.vdr);
2782 UNSERIALIZE_SCALAR(regs.ccsr);
2783 UNSERIALIZE_SCALAR(regs.tbicr);
2784 UNSERIALIZE_SCALAR(regs.tbisr);
2785 UNSERIALIZE_SCALAR(regs.tanar);
2786 UNSERIALIZE_SCALAR(regs.tanlpar);
2787 UNSERIALIZE_SCALAR(regs.taner);
2788 UNSERIALIZE_SCALAR(regs.tesr);
2789
2790 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2791 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2792
2793 UNSERIALIZE_SCALAR(ioEnable);
2794
2795 /*
2796 * unserialize the data fifos
2797 */
2798 rxFifo.unserialize("rxFifo", cp, section);
2799 txFifo.unserialize("txFifo", cp, section);
2800
2801 /*
2802 * unserialize the various helper variables
2803 */
2804 bool txPacketExists;
2805 UNSERIALIZE_SCALAR(txPacketExists);
2806 if (txPacketExists) {
2807 txPacket = new PacketData(16384);
2808 txPacket->unserialize("txPacket", cp, section);
2809 uint32_t txPktBufPtr;
2810 UNSERIALIZE_SCALAR(txPktBufPtr);
2811 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2812 } else
2813 txPacket = 0;
2814
2815 bool rxPacketExists;
2816 UNSERIALIZE_SCALAR(rxPacketExists);
2817 rxPacket = 0;
2818 if (rxPacketExists) {
2819 rxPacket = new PacketData(16384);
2820 rxPacket->unserialize("rxPacket", cp, section);
2821 uint32_t rxPktBufPtr;
2822 UNSERIALIZE_SCALAR(rxPktBufPtr);
2823 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2824 } else
2825 rxPacket = 0;
2826
2827 UNSERIALIZE_SCALAR(txXferLen);
2828 UNSERIALIZE_SCALAR(rxXferLen);
2829
2830 /*
2831 * Unserialize Cached Descriptors
2832 */
2833 UNSERIALIZE_SCALAR(rxDesc64.link);
2834 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2835 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2836 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2837 UNSERIALIZE_SCALAR(txDesc64.link);
2838 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2839 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2840 UNSERIALIZE_SCALAR(txDesc64.extsts);
2841 UNSERIALIZE_SCALAR(rxDesc32.link);
2842 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2843 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2844 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2845 UNSERIALIZE_SCALAR(txDesc32.link);
2846 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2847 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2848 UNSERIALIZE_SCALAR(txDesc32.extsts);
2849 UNSERIALIZE_SCALAR(extstsEnable);
2850
2851 /*
2852 * unserialize tx state machine
2853 */
2854 int txState;
2855 UNSERIALIZE_SCALAR(txState);
2856 this->txState = (TxState) txState;
2857 UNSERIALIZE_SCALAR(txEnable);
2858 UNSERIALIZE_SCALAR(CTDD);
2859 UNSERIALIZE_SCALAR(txFragPtr);
2860 UNSERIALIZE_SCALAR(txDescCnt);
2861 int txDmaState;
2862 UNSERIALIZE_SCALAR(txDmaState);
2863 this->txDmaState = (DmaState) txDmaState;
2864 UNSERIALIZE_SCALAR(txKickTick);
2865 if (txKickTick)
2866 txKickEvent.schedule(txKickTick);
2867
2868 /*
2869 * unserialize rx state machine
2870 */
2871 int rxState;
2872 UNSERIALIZE_SCALAR(rxState);
2873 this->rxState = (RxState) rxState;
2874 UNSERIALIZE_SCALAR(rxEnable);
2875 UNSERIALIZE_SCALAR(CRDD);
2876 UNSERIALIZE_SCALAR(rxPktBytes);
2877 UNSERIALIZE_SCALAR(rxFragPtr);
2878 UNSERIALIZE_SCALAR(rxDescCnt);
2879 int rxDmaState;
2880 UNSERIALIZE_SCALAR(rxDmaState);
2881 this->rxDmaState = (DmaState) rxDmaState;
2882 UNSERIALIZE_SCALAR(rxKickTick);
2883 if (rxKickTick)
2884 rxKickEvent.schedule(rxKickTick);
2885
2886 /*
2887 * Unserialize EEPROM state machine
2888 */
2889 int eepromState;
2890 UNSERIALIZE_SCALAR(eepromState);
2891 this->eepromState = (EEPROMState) eepromState;
2892 UNSERIALIZE_SCALAR(eepromClk);
2893 UNSERIALIZE_SCALAR(eepromBitsToRx);
2894 UNSERIALIZE_SCALAR(eepromOpcode);
2895 UNSERIALIZE_SCALAR(eepromAddress);
2896 UNSERIALIZE_SCALAR(eepromData);
2897
2898 /*
2899 * If there's a pending transmit, reschedule it now
2900 */
2901 Tick transmitTick;
2902 UNSERIALIZE_SCALAR(transmitTick);
2903 if (transmitTick)
2904 txEvent.schedule(curTick + transmitTick);
2905
2906 /*
2907 * unserialize receive address filter settings
2908 */
2909 UNSERIALIZE_SCALAR(rxFilterEnable);
2910 UNSERIALIZE_SCALAR(acceptBroadcast);
2911 UNSERIALIZE_SCALAR(acceptMulticast);
2912 UNSERIALIZE_SCALAR(acceptUnicast);
2913 UNSERIALIZE_SCALAR(acceptPerfect);
2914 UNSERIALIZE_SCALAR(acceptArp);
2915 UNSERIALIZE_SCALAR(multicastHashEnable);
2916
2917 /*
2918 * Keep track of pending interrupt status.
2919 */
2920 UNSERIALIZE_SCALAR(intrTick);
2921 UNSERIALIZE_SCALAR(cpuPendingIntr);
2922 Tick intrEventTick;
2923 UNSERIALIZE_SCALAR(intrEventTick);
2924 if (intrEventTick) {
2925 intrEvent = new IntrEvent(this, true);
2926 intrEvent->schedule(intrEventTick);
2927 }
2928
2929 /*
2930 * re-add addrRanges to bus bridges
2931 */
2932 if (pioInterface) {
2933 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2934 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2935 }
2936 }
2937
2938 Tick
2939 NSGigE::cacheAccess(MemReqPtr &req)
2940 {
2941 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2942 req->paddr, req->paddr & 0xfff);
2943
2944 return curTick + pioLatency;
2945 }
2946
2947 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2948
2949 SimObjectParam<EtherInt *> peer;
2950 SimObjectParam<NSGigE *> device;
2951
2952 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2953
2954 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2955
2956 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2957 INIT_PARAM(device, "Ethernet device of this interface")
2958
2959 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2960
2961 CREATE_SIM_OBJECT(NSGigEInt)
2962 {
2963 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2964
2965 EtherInt *p = (EtherInt *)peer;
2966 if (p) {
2967 dev_int->setPeer(p);
2968 p->setPeer(dev_int);
2969 }
2970
2971 return dev_int;
2972 }
2973
2974 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2975
2976
2977 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2978
2979 Param<Tick> clock;
2980
2981 Param<Addr> addr;
2982 SimObjectParam<MemoryController *> mmu;
2983 SimObjectParam<PhysicalMemory *> physmem;
2984 SimObjectParam<PciConfigAll *> configspace;
2985 SimObjectParam<PciConfigData *> configdata;
2986 SimObjectParam<Platform *> platform;
2987 Param<uint32_t> pci_bus;
2988 Param<uint32_t> pci_dev;
2989 Param<uint32_t> pci_func;
2990
2991 SimObjectParam<HierParams *> hier;
2992 SimObjectParam<Bus*> pio_bus;
2993 SimObjectParam<Bus*> dma_bus;
2994 SimObjectParam<Bus*> payload_bus;
2995 Param<bool> dma_desc_free;
2996 Param<bool> dma_data_free;
2997 Param<Tick> dma_read_delay;
2998 Param<Tick> dma_write_delay;
2999 Param<Tick> dma_read_factor;
3000 Param<Tick> dma_write_factor;
3001 Param<bool> dma_no_allocate;
3002 Param<Tick> pio_latency;
3003 Param<Tick> intr_delay;
3004
3005 Param<Tick> rx_delay;
3006 Param<Tick> tx_delay;
3007 Param<uint32_t> rx_fifo_size;
3008 Param<uint32_t> tx_fifo_size;
3009
3010 Param<bool> rx_filter;
3011 Param<string> hardware_address;
3012 Param<bool> rx_thread;
3013 Param<bool> tx_thread;
3014 Param<bool> rss;
3015
3016 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
3017
3018 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
3019
3020 INIT_PARAM(clock, "State machine processor frequency"),
3021
3022 INIT_PARAM(addr, "Device Address"),
3023 INIT_PARAM(mmu, "Memory Controller"),
3024 INIT_PARAM(physmem, "Physical Memory"),
3025 INIT_PARAM(configspace, "PCI Configspace"),
3026 INIT_PARAM(configdata, "PCI Config data"),
3027 INIT_PARAM(platform, "Platform"),
3028 INIT_PARAM(pci_bus, "PCI bus"),
3029 INIT_PARAM(pci_dev, "PCI device number"),
3030 INIT_PARAM(pci_func, "PCI function code"),
3031
3032 INIT_PARAM(hier, "Hierarchy global variables"),
3033 INIT_PARAM(pio_bus, ""),
3034 INIT_PARAM(dma_bus, ""),
3035 INIT_PARAM(payload_bus, "The IO Bus to attach to for payload"),
3036 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
3037 INIT_PARAM(dma_data_free, "DMA of Data is free"),
3038 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
3039 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
3040 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
3041 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
3042 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
3043 INIT_PARAM(pio_latency, "Programmed IO latency in bus cycles"),
3044 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
3045
3046 INIT_PARAM(rx_delay, "Receive Delay"),
3047 INIT_PARAM(tx_delay, "Transmit Delay"),
3048 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
3049 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
3050
3051 INIT_PARAM(rx_filter, "Enable Receive Filter"),
3052 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
3053 INIT_PARAM(rx_thread, ""),
3054 INIT_PARAM(tx_thread, ""),
3055 INIT_PARAM(rss, "")
3056
3057 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
3058
3059
3060 CREATE_SIM_OBJECT(NSGigE)
3061 {
3062 NSGigE::Params *params = new NSGigE::Params;
3063
3064 params->name = getInstanceName();
3065
3066 params->clock = clock;
3067
3068 params->mmu = mmu;
3069 params->pmem = physmem;
3070 params->configSpace = configspace;
3071 params->configData = configdata;
3072 params->plat = platform;
3073 params->busNum = pci_bus;
3074 params->deviceNum = pci_dev;
3075 params->functionNum = pci_func;
3076
3077 params->hier = hier;
3078 params->pio_bus = pio_bus;
3079 params->header_bus = dma_bus;
3080 params->payload_bus = payload_bus;
3081 params->dma_desc_free = dma_desc_free;
3082 params->dma_data_free = dma_data_free;
3083 params->dma_read_delay = dma_read_delay;
3084 params->dma_write_delay = dma_write_delay;
3085 params->dma_read_factor = dma_read_factor;
3086 params->dma_write_factor = dma_write_factor;
3087 params->dma_no_allocate = dma_no_allocate;
3088 params->pio_latency = pio_latency;
3089 params->intr_delay = intr_delay;
3090
3091 params->rx_delay = rx_delay;
3092 params->tx_delay = tx_delay;
3093 params->rx_fifo_size = rx_fifo_size;
3094 params->tx_fifo_size = tx_fifo_size;
3095
3096 params->rx_filter = rx_filter;
3097 params->eaddr = hardware_address;
3098 params->rx_thread = rx_thread;
3099 params->tx_thread = tx_thread;
3100 params->rss = rss;
3101
3102 return new NSGigE(params);
3103 }
3104
3105 REGISTER_SIM_OBJECT("NSGigE", NSGigE)