5e27db58d9a59788b276747a35163a813778324e
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <deque>
34 #include <string>
35
36 #include "arch/alpha/ev5.hh"
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/packet.hh"
43 #include "sim/builder.hh"
44 #include "sim/debug.hh"
45 #include "sim/host.hh"
46 #include "sim/stats.hh"
47 #include "sim/system.hh"
48
49 const char *NsRxStateStrings[] =
50 {
51 "rxIdle",
52 "rxDescRefr",
53 "rxDescRead",
54 "rxFifoBlock",
55 "rxFragWrite",
56 "rxDescWrite",
57 "rxAdvance"
58 };
59
60 const char *NsTxStateStrings[] =
61 {
62 "txIdle",
63 "txDescRefr",
64 "txDescRead",
65 "txFifoBlock",
66 "txFragRead",
67 "txDescWrite",
68 "txAdvance"
69 };
70
71 const char *NsDmaState[] =
72 {
73 "dmaIdle",
74 "dmaReading",
75 "dmaWriting",
76 "dmaReadWaiting",
77 "dmaWriteWaiting"
78 };
79
80 using namespace std;
81 using namespace Net;
82 using namespace TheISA;
83
84 ///////////////////////////////////////////////////////////////////////
85 //
86 // NSGigE PCI Device
87 //
88 NSGigE::NSGigE(Params *p)
89 : PciDev(p), ioEnable(false),
90 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
91 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
92 txXferLen(0), rxXferLen(0), clock(p->clock),
93 txState(txIdle), txEnable(false), CTDD(false),
94 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
95 rxEnable(false), CRDD(false), rxPktBytes(0),
96 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
97 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
98 txDmaReadEvent(this), txDmaWriteEvent(this),
99 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
100 txDelay(p->tx_delay), rxDelay(p->rx_delay),
101 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
102 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
103 acceptMulticast(false), acceptUnicast(false),
104 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
105 intrTick(0), cpuPendingIntr(false),
106 intrEvent(0), interface(0)
107 {
108
109 intrDelay = p->intr_delay;
110 dmaReadDelay = p->dma_read_delay;
111 dmaWriteDelay = p->dma_write_delay;
112 dmaReadFactor = p->dma_read_factor;
113 dmaWriteFactor = p->dma_write_factor;
114
115 regsReset();
116 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
117
118 memset(&rxDesc32, 0, sizeof(rxDesc32));
119 memset(&txDesc32, 0, sizeof(txDesc32));
120 memset(&rxDesc64, 0, sizeof(rxDesc64));
121 memset(&txDesc64, 0, sizeof(txDesc64));
122 }
123
124 NSGigE::~NSGigE()
125 {}
126
127 void
128 NSGigE::regStats()
129 {
130 txBytes
131 .name(name() + ".txBytes")
132 .desc("Bytes Transmitted")
133 .prereq(txBytes)
134 ;
135
136 rxBytes
137 .name(name() + ".rxBytes")
138 .desc("Bytes Received")
139 .prereq(rxBytes)
140 ;
141
142 txPackets
143 .name(name() + ".txPackets")
144 .desc("Number of Packets Transmitted")
145 .prereq(txBytes)
146 ;
147
148 rxPackets
149 .name(name() + ".rxPackets")
150 .desc("Number of Packets Received")
151 .prereq(rxBytes)
152 ;
153
154 txIpChecksums
155 .name(name() + ".txIpChecksums")
156 .desc("Number of tx IP Checksums done by device")
157 .precision(0)
158 .prereq(txBytes)
159 ;
160
161 rxIpChecksums
162 .name(name() + ".rxIpChecksums")
163 .desc("Number of rx IP Checksums done by device")
164 .precision(0)
165 .prereq(rxBytes)
166 ;
167
168 txTcpChecksums
169 .name(name() + ".txTcpChecksums")
170 .desc("Number of tx TCP Checksums done by device")
171 .precision(0)
172 .prereq(txBytes)
173 ;
174
175 rxTcpChecksums
176 .name(name() + ".rxTcpChecksums")
177 .desc("Number of rx TCP Checksums done by device")
178 .precision(0)
179 .prereq(rxBytes)
180 ;
181
182 txUdpChecksums
183 .name(name() + ".txUdpChecksums")
184 .desc("Number of tx UDP Checksums done by device")
185 .precision(0)
186 .prereq(txBytes)
187 ;
188
189 rxUdpChecksums
190 .name(name() + ".rxUdpChecksums")
191 .desc("Number of rx UDP Checksums done by device")
192 .precision(0)
193 .prereq(rxBytes)
194 ;
195
196 descDmaReads
197 .name(name() + ".descDMAReads")
198 .desc("Number of descriptors the device read w/ DMA")
199 .precision(0)
200 ;
201
202 descDmaWrites
203 .name(name() + ".descDMAWrites")
204 .desc("Number of descriptors the device wrote w/ DMA")
205 .precision(0)
206 ;
207
208 descDmaRdBytes
209 .name(name() + ".descDmaReadBytes")
210 .desc("number of descriptor bytes read w/ DMA")
211 .precision(0)
212 ;
213
214 descDmaWrBytes
215 .name(name() + ".descDmaWriteBytes")
216 .desc("number of descriptor bytes write w/ DMA")
217 .precision(0)
218 ;
219
220 txBandwidth
221 .name(name() + ".txBandwidth")
222 .desc("Transmit Bandwidth (bits/s)")
223 .precision(0)
224 .prereq(txBytes)
225 ;
226
227 rxBandwidth
228 .name(name() + ".rxBandwidth")
229 .desc("Receive Bandwidth (bits/s)")
230 .precision(0)
231 .prereq(rxBytes)
232 ;
233
234 totBandwidth
235 .name(name() + ".totBandwidth")
236 .desc("Total Bandwidth (bits/s)")
237 .precision(0)
238 .prereq(totBytes)
239 ;
240
241 totPackets
242 .name(name() + ".totPackets")
243 .desc("Total Packets")
244 .precision(0)
245 .prereq(totBytes)
246 ;
247
248 totBytes
249 .name(name() + ".totBytes")
250 .desc("Total Bytes")
251 .precision(0)
252 .prereq(totBytes)
253 ;
254
255 totPacketRate
256 .name(name() + ".totPPS")
257 .desc("Total Tranmission Rate (packets/s)")
258 .precision(0)
259 .prereq(totBytes)
260 ;
261
262 txPacketRate
263 .name(name() + ".txPPS")
264 .desc("Packet Tranmission Rate (packets/s)")
265 .precision(0)
266 .prereq(txBytes)
267 ;
268
269 rxPacketRate
270 .name(name() + ".rxPPS")
271 .desc("Packet Reception Rate (packets/s)")
272 .precision(0)
273 .prereq(rxBytes)
274 ;
275
276 postedSwi
277 .name(name() + ".postedSwi")
278 .desc("number of software interrupts posted to CPU")
279 .precision(0)
280 ;
281
282 totalSwi
283 .name(name() + ".totalSwi")
284 .desc("total number of Swi written to ISR")
285 .precision(0)
286 ;
287
288 coalescedSwi
289 .name(name() + ".coalescedSwi")
290 .desc("average number of Swi's coalesced into each post")
291 .precision(0)
292 ;
293
294 postedRxIdle
295 .name(name() + ".postedRxIdle")
296 .desc("number of rxIdle interrupts posted to CPU")
297 .precision(0)
298 ;
299
300 totalRxIdle
301 .name(name() + ".totalRxIdle")
302 .desc("total number of RxIdle written to ISR")
303 .precision(0)
304 ;
305
306 coalescedRxIdle
307 .name(name() + ".coalescedRxIdle")
308 .desc("average number of RxIdle's coalesced into each post")
309 .precision(0)
310 ;
311
312 postedRxOk
313 .name(name() + ".postedRxOk")
314 .desc("number of RxOk interrupts posted to CPU")
315 .precision(0)
316 ;
317
318 totalRxOk
319 .name(name() + ".totalRxOk")
320 .desc("total number of RxOk written to ISR")
321 .precision(0)
322 ;
323
324 coalescedRxOk
325 .name(name() + ".coalescedRxOk")
326 .desc("average number of RxOk's coalesced into each post")
327 .precision(0)
328 ;
329
330 postedRxDesc
331 .name(name() + ".postedRxDesc")
332 .desc("number of RxDesc interrupts posted to CPU")
333 .precision(0)
334 ;
335
336 totalRxDesc
337 .name(name() + ".totalRxDesc")
338 .desc("total number of RxDesc written to ISR")
339 .precision(0)
340 ;
341
342 coalescedRxDesc
343 .name(name() + ".coalescedRxDesc")
344 .desc("average number of RxDesc's coalesced into each post")
345 .precision(0)
346 ;
347
348 postedTxOk
349 .name(name() + ".postedTxOk")
350 .desc("number of TxOk interrupts posted to CPU")
351 .precision(0)
352 ;
353
354 totalTxOk
355 .name(name() + ".totalTxOk")
356 .desc("total number of TxOk written to ISR")
357 .precision(0)
358 ;
359
360 coalescedTxOk
361 .name(name() + ".coalescedTxOk")
362 .desc("average number of TxOk's coalesced into each post")
363 .precision(0)
364 ;
365
366 postedTxIdle
367 .name(name() + ".postedTxIdle")
368 .desc("number of TxIdle interrupts posted to CPU")
369 .precision(0)
370 ;
371
372 totalTxIdle
373 .name(name() + ".totalTxIdle")
374 .desc("total number of TxIdle written to ISR")
375 .precision(0)
376 ;
377
378 coalescedTxIdle
379 .name(name() + ".coalescedTxIdle")
380 .desc("average number of TxIdle's coalesced into each post")
381 .precision(0)
382 ;
383
384 postedTxDesc
385 .name(name() + ".postedTxDesc")
386 .desc("number of TxDesc interrupts posted to CPU")
387 .precision(0)
388 ;
389
390 totalTxDesc
391 .name(name() + ".totalTxDesc")
392 .desc("total number of TxDesc written to ISR")
393 .precision(0)
394 ;
395
396 coalescedTxDesc
397 .name(name() + ".coalescedTxDesc")
398 .desc("average number of TxDesc's coalesced into each post")
399 .precision(0)
400 ;
401
402 postedRxOrn
403 .name(name() + ".postedRxOrn")
404 .desc("number of RxOrn posted to CPU")
405 .precision(0)
406 ;
407
408 totalRxOrn
409 .name(name() + ".totalRxOrn")
410 .desc("total number of RxOrn written to ISR")
411 .precision(0)
412 ;
413
414 coalescedRxOrn
415 .name(name() + ".coalescedRxOrn")
416 .desc("average number of RxOrn's coalesced into each post")
417 .precision(0)
418 ;
419
420 coalescedTotal
421 .name(name() + ".coalescedTotal")
422 .desc("average number of interrupts coalesced into each post")
423 .precision(0)
424 ;
425
426 postedInterrupts
427 .name(name() + ".postedInterrupts")
428 .desc("number of posts to CPU")
429 .precision(0)
430 ;
431
432 droppedPackets
433 .name(name() + ".droppedPackets")
434 .desc("number of packets dropped")
435 .precision(0)
436 ;
437
438 coalescedSwi = totalSwi / postedInterrupts;
439 coalescedRxIdle = totalRxIdle / postedInterrupts;
440 coalescedRxOk = totalRxOk / postedInterrupts;
441 coalescedRxDesc = totalRxDesc / postedInterrupts;
442 coalescedTxOk = totalTxOk / postedInterrupts;
443 coalescedTxIdle = totalTxIdle / postedInterrupts;
444 coalescedTxDesc = totalTxDesc / postedInterrupts;
445 coalescedRxOrn = totalRxOrn / postedInterrupts;
446
447 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
448 totalTxOk + totalTxIdle + totalTxDesc +
449 totalRxOrn) / postedInterrupts;
450
451 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
452 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
453 totBandwidth = txBandwidth + rxBandwidth;
454 totBytes = txBytes + rxBytes;
455 totPackets = txPackets + rxPackets;
456
457 txPacketRate = txPackets / simSeconds;
458 rxPacketRate = rxPackets / simSeconds;
459 }
460
461
462 /**
463 * This is to write to the PCI general configuration registers
464 */
465 void
466 NSGigE::writeConfig(int offset, const uint16_t data)
467 {
468 if (offset < PCI_DEVICE_SPECIFIC)
469 PciDev::writeConfig(offset, data);
470 else
471 panic("Device specific PCI config space not implemented!\n");
472
473 switch (offset) {
474 // seems to work fine without all these PCI settings, but i
475 // put in the IO to double check, an assertion will fail if we
476 // need to properly implement it
477 case PCI_COMMAND:
478 if (config.data[offset] & PCI_CMD_IOSE)
479 ioEnable = true;
480 else
481 ioEnable = false;
482 break;
483 }
484 }
485
486 /**
487 * This reads the device registers, which are detailed in the NS83820
488 * spec sheet
489 */
490 Tick
491 NSGigE::read(Packet *pkt)
492 {
493 assert(ioEnable);
494
495 pkt->time += pioDelay;
496 pkt->allocate();
497
498 //The mask is to give you only the offset into the device register file
499 Addr daddr = pkt->addr & 0xfff;
500 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
501 daddr, pkt->addr, pkt->size);
502
503
504 // there are some reserved registers, you can see ns_gige_reg.h and
505 // the spec sheet for details
506 if (daddr > LAST && daddr <= RESERVED) {
507 panic("Accessing reserved register");
508 } else if (daddr > RESERVED && daddr <= 0x3FC) {
509 if (pkt->size == sizeof(uint8_t))
510 readConfig(daddr & 0xff, pkt->getPtr<uint8_t>());
511 if (pkt->size == sizeof(uint16_t))
512 readConfig(daddr & 0xff, pkt->getPtr<uint16_t>());
513 if (pkt->size == sizeof(uint32_t))
514 readConfig(daddr & 0xff, pkt->getPtr<uint32_t>());
515 pkt->result = Success;
516 return pioDelay;
517 } else if (daddr >= MIB_START && daddr <= MIB_END) {
518 // don't implement all the MIB's. hopefully the kernel
519 // doesn't actually DEPEND upon their values
520 // MIB are just hardware stats keepers
521 pkt->set<uint32_t>(0);
522 pkt->result = Success;
523 return pioDelay;
524 } else if (daddr > 0x3FC)
525 panic("Something is messed up!\n");
526
527 assert(pkt->size == sizeof(uint32_t));
528 uint32_t &reg = *pkt->getPtr<uint32_t>();
529 uint16_t rfaddr;
530
531 switch (daddr) {
532 case CR:
533 reg = regs.command;
534 //these are supposed to be cleared on a read
535 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
536 break;
537
538 case CFGR:
539 reg = regs.config;
540 break;
541
542 case MEAR:
543 reg = regs.mear;
544 break;
545
546 case PTSCR:
547 reg = regs.ptscr;
548 break;
549
550 case ISR:
551 reg = regs.isr;
552 devIntrClear(ISR_ALL);
553 break;
554
555 case IMR:
556 reg = regs.imr;
557 break;
558
559 case IER:
560 reg = regs.ier;
561 break;
562
563 case IHR:
564 reg = regs.ihr;
565 break;
566
567 case TXDP:
568 reg = regs.txdp;
569 break;
570
571 case TXDP_HI:
572 reg = regs.txdp_hi;
573 break;
574
575 case TX_CFG:
576 reg = regs.txcfg;
577 break;
578
579 case GPIOR:
580 reg = regs.gpior;
581 break;
582
583 case RXDP:
584 reg = regs.rxdp;
585 break;
586
587 case RXDP_HI:
588 reg = regs.rxdp_hi;
589 break;
590
591 case RX_CFG:
592 reg = regs.rxcfg;
593 break;
594
595 case PQCR:
596 reg = regs.pqcr;
597 break;
598
599 case WCSR:
600 reg = regs.wcsr;
601 break;
602
603 case PCR:
604 reg = regs.pcr;
605 break;
606
607 // see the spec sheet for how RFCR and RFDR work
608 // basically, you write to RFCR to tell the machine
609 // what you want to do next, then you act upon RFDR,
610 // and the device will be prepared b/c of what you
611 // wrote to RFCR
612 case RFCR:
613 reg = regs.rfcr;
614 break;
615
616 case RFDR:
617 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
618 switch (rfaddr) {
619 // Read from perfect match ROM octets
620 case 0x000:
621 reg = rom.perfectMatch[1];
622 reg = reg << 8;
623 reg += rom.perfectMatch[0];
624 break;
625 case 0x002:
626 reg = rom.perfectMatch[3] << 8;
627 reg += rom.perfectMatch[2];
628 break;
629 case 0x004:
630 reg = rom.perfectMatch[5] << 8;
631 reg += rom.perfectMatch[4];
632 break;
633 default:
634 // Read filter hash table
635 if (rfaddr >= FHASH_ADDR &&
636 rfaddr < FHASH_ADDR + FHASH_SIZE) {
637
638 // Only word-aligned reads supported
639 if (rfaddr % 2)
640 panic("unaligned read from filter hash table!");
641
642 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
643 reg += rom.filterHash[rfaddr - FHASH_ADDR];
644 break;
645 }
646
647 panic("reading RFDR for something other than pattern"
648 " matching or hashing! %#x\n", rfaddr);
649 }
650 break;
651
652 case SRR:
653 reg = regs.srr;
654 break;
655
656 case MIBC:
657 reg = regs.mibc;
658 reg &= ~(MIBC_MIBS | MIBC_ACLR);
659 break;
660
661 case VRCR:
662 reg = regs.vrcr;
663 break;
664
665 case VTCR:
666 reg = regs.vtcr;
667 break;
668
669 case VDR:
670 reg = regs.vdr;
671 break;
672
673 case CCSR:
674 reg = regs.ccsr;
675 break;
676
677 case TBICR:
678 reg = regs.tbicr;
679 break;
680
681 case TBISR:
682 reg = regs.tbisr;
683 break;
684
685 case TANAR:
686 reg = regs.tanar;
687 break;
688
689 case TANLPAR:
690 reg = regs.tanlpar;
691 break;
692
693 case TANER:
694 reg = regs.taner;
695 break;
696
697 case TESR:
698 reg = regs.tesr;
699 break;
700
701 case M5REG:
702 reg = 0;
703 if (params()->rx_thread)
704 reg |= M5REG_RX_THREAD;
705 if (params()->tx_thread)
706 reg |= M5REG_TX_THREAD;
707 if (params()->rss)
708 reg |= M5REG_RSS;
709 break;
710
711 default:
712 panic("reading unimplemented register: addr=%#x", daddr);
713 }
714
715 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
716 daddr, reg, reg);
717
718 pkt->result = Success;
719 return pioDelay;
720 }
721
722 Tick
723 NSGigE::write(Packet *pkt)
724 {
725 assert(ioEnable);
726
727 Addr daddr = pkt->addr & 0xfff;
728 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
729 daddr, pkt->addr, pkt->size);
730
731 pkt->time += pioDelay;
732
733 if (daddr > LAST && daddr <= RESERVED) {
734 panic("Accessing reserved register");
735 } else if (daddr > RESERVED && daddr <= 0x3FC) {
736 if (pkt->size == sizeof(uint8_t))
737 writeConfig(daddr & 0xff, pkt->get<uint8_t>());
738 if (pkt->size == sizeof(uint16_t))
739 writeConfig(daddr & 0xff, pkt->get<uint16_t>());
740 if (pkt->size == sizeof(uint32_t))
741 writeConfig(daddr & 0xff, pkt->get<uint32_t>());
742 pkt->result = Success;
743 return pioDelay;
744 } else if (daddr > 0x3FC)
745 panic("Something is messed up!\n");
746
747 if (pkt->size == sizeof(uint32_t)) {
748 uint32_t reg = pkt->get<uint32_t>();
749 uint16_t rfaddr;
750
751 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
752
753 switch (daddr) {
754 case CR:
755 regs.command = reg;
756 if (reg & CR_TXD) {
757 txEnable = false;
758 } else if (reg & CR_TXE) {
759 txEnable = true;
760
761 // the kernel is enabling the transmit machine
762 if (txState == txIdle)
763 txKick();
764 }
765
766 if (reg & CR_RXD) {
767 rxEnable = false;
768 } else if (reg & CR_RXE) {
769 rxEnable = true;
770
771 if (rxState == rxIdle)
772 rxKick();
773 }
774
775 if (reg & CR_TXR)
776 txReset();
777
778 if (reg & CR_RXR)
779 rxReset();
780
781 if (reg & CR_SWI)
782 devIntrPost(ISR_SWI);
783
784 if (reg & CR_RST) {
785 txReset();
786 rxReset();
787
788 regsReset();
789 }
790 break;
791
792 case CFGR:
793 if (reg & CFGR_LNKSTS ||
794 reg & CFGR_SPDSTS ||
795 reg & CFGR_DUPSTS ||
796 reg & CFGR_RESERVED ||
797 reg & CFGR_T64ADDR ||
798 reg & CFGR_PCI64_DET)
799
800 // First clear all writable bits
801 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
802 CFGR_RESERVED | CFGR_T64ADDR |
803 CFGR_PCI64_DET;
804 // Now set the appropriate writable bits
805 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
806 CFGR_RESERVED | CFGR_T64ADDR |
807 CFGR_PCI64_DET);
808
809 // all these #if 0's are because i don't THINK the kernel needs to
810 // have these implemented. if there is a problem relating to one of
811 // these, you may need to add functionality in.
812 if (reg & CFGR_TBI_EN) ;
813 if (reg & CFGR_MODE_1000) ;
814
815 if (reg & CFGR_AUTO_1000)
816 panic("CFGR_AUTO_1000 not implemented!\n");
817
818 if (reg & CFGR_PINT_DUPSTS ||
819 reg & CFGR_PINT_LNKSTS ||
820 reg & CFGR_PINT_SPDSTS)
821 ;
822
823 if (reg & CFGR_TMRTEST) ;
824 if (reg & CFGR_MRM_DIS) ;
825 if (reg & CFGR_MWI_DIS) ;
826
827 if (reg & CFGR_T64ADDR) ;
828 // panic("CFGR_T64ADDR is read only register!\n");
829
830 if (reg & CFGR_PCI64_DET)
831 panic("CFGR_PCI64_DET is read only register!\n");
832
833 if (reg & CFGR_DATA64_EN) ;
834 if (reg & CFGR_M64ADDR) ;
835 if (reg & CFGR_PHY_RST) ;
836 if (reg & CFGR_PHY_DIS) ;
837
838 if (reg & CFGR_EXTSTS_EN)
839 extstsEnable = true;
840 else
841 extstsEnable = false;
842
843 if (reg & CFGR_REQALG) ;
844 if (reg & CFGR_SB) ;
845 if (reg & CFGR_POW) ;
846 if (reg & CFGR_EXD) ;
847 if (reg & CFGR_PESEL) ;
848 if (reg & CFGR_BROM_DIS) ;
849 if (reg & CFGR_EXT_125) ;
850 if (reg & CFGR_BEM) ;
851 break;
852
853 case MEAR:
854 // Clear writable bits
855 regs.mear &= MEAR_EEDO;
856 // Set appropriate writable bits
857 regs.mear |= reg & ~MEAR_EEDO;
858
859 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
860 // even though it could get it through RFDR
861 if (reg & MEAR_EESEL) {
862 // Rising edge of clock
863 if (reg & MEAR_EECLK && !eepromClk)
864 eepromKick();
865 }
866 else {
867 eepromState = eepromStart;
868 regs.mear &= ~MEAR_EEDI;
869 }
870
871 eepromClk = reg & MEAR_EECLK;
872
873 // since phy is completely faked, MEAR_MD* don't matter
874 if (reg & MEAR_MDIO) ;
875 if (reg & MEAR_MDDIR) ;
876 if (reg & MEAR_MDC) ;
877 break;
878
879 case PTSCR:
880 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
881 // these control BISTs for various parts of chip - we
882 // don't care or do just fake that the BIST is done
883 if (reg & PTSCR_RBIST_EN)
884 regs.ptscr |= PTSCR_RBIST_DONE;
885 if (reg & PTSCR_EEBIST_EN)
886 regs.ptscr &= ~PTSCR_EEBIST_EN;
887 if (reg & PTSCR_EELOAD_EN)
888 regs.ptscr &= ~PTSCR_EELOAD_EN;
889 break;
890
891 case ISR: /* writing to the ISR has no effect */
892 panic("ISR is a read only register!\n");
893
894 case IMR:
895 regs.imr = reg;
896 devIntrChangeMask();
897 break;
898
899 case IER:
900 regs.ier = reg;
901 break;
902
903 case IHR:
904 regs.ihr = reg;
905 /* not going to implement real interrupt holdoff */
906 break;
907
908 case TXDP:
909 regs.txdp = (reg & 0xFFFFFFFC);
910 assert(txState == txIdle);
911 CTDD = false;
912 break;
913
914 case TXDP_HI:
915 regs.txdp_hi = reg;
916 break;
917
918 case TX_CFG:
919 regs.txcfg = reg;
920 #if 0
921 if (reg & TX_CFG_CSI) ;
922 if (reg & TX_CFG_HBI) ;
923 if (reg & TX_CFG_MLB) ;
924 if (reg & TX_CFG_ATP) ;
925 if (reg & TX_CFG_ECRETRY) {
926 /*
927 * this could easily be implemented, but considering
928 * the network is just a fake pipe, wouldn't make
929 * sense to do this
930 */
931 }
932
933 if (reg & TX_CFG_BRST_DIS) ;
934 #endif
935
936 #if 0
937 /* we handle our own DMA, ignore the kernel's exhortations */
938 if (reg & TX_CFG_MXDMA) ;
939 #endif
940
941 // also, we currently don't care about fill/drain
942 // thresholds though this may change in the future with
943 // more realistic networks or a driver which changes it
944 // according to feedback
945
946 break;
947
948 case GPIOR:
949 // Only write writable bits
950 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
951 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
952 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
953 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
954 /* these just control general purpose i/o pins, don't matter */
955 break;
956
957 case RXDP:
958 regs.rxdp = reg;
959 CRDD = false;
960 break;
961
962 case RXDP_HI:
963 regs.rxdp_hi = reg;
964 break;
965
966 case RX_CFG:
967 regs.rxcfg = reg;
968 #if 0
969 if (reg & RX_CFG_AEP) ;
970 if (reg & RX_CFG_ARP) ;
971 if (reg & RX_CFG_STRIPCRC) ;
972 if (reg & RX_CFG_RX_RD) ;
973 if (reg & RX_CFG_ALP) ;
974 if (reg & RX_CFG_AIRL) ;
975
976 /* we handle our own DMA, ignore what kernel says about it */
977 if (reg & RX_CFG_MXDMA) ;
978
979 //also, we currently don't care about fill/drain thresholds
980 //though this may change in the future with more realistic
981 //networks or a driver which changes it according to feedback
982 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
983 #endif
984 break;
985
986 case PQCR:
987 /* there is no priority queueing used in the linux 2.6 driver */
988 regs.pqcr = reg;
989 break;
990
991 case WCSR:
992 /* not going to implement wake on LAN */
993 regs.wcsr = reg;
994 break;
995
996 case PCR:
997 /* not going to implement pause control */
998 regs.pcr = reg;
999 break;
1000
1001 case RFCR:
1002 regs.rfcr = reg;
1003
1004 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1005 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1006 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1007 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1008 acceptPerfect = (reg & RFCR_APM) ? true : false;
1009 acceptArp = (reg & RFCR_AARP) ? true : false;
1010 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1011
1012 #if 0
1013 if (reg & RFCR_APAT)
1014 panic("RFCR_APAT not implemented!\n");
1015 #endif
1016 if (reg & RFCR_UHEN)
1017 panic("Unicast hash filtering not used by drivers!\n");
1018
1019 if (reg & RFCR_ULM)
1020 panic("RFCR_ULM not implemented!\n");
1021
1022 break;
1023
1024 case RFDR:
1025 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1026 switch (rfaddr) {
1027 case 0x000:
1028 rom.perfectMatch[0] = (uint8_t)reg;
1029 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1030 break;
1031 case 0x002:
1032 rom.perfectMatch[2] = (uint8_t)reg;
1033 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1034 break;
1035 case 0x004:
1036 rom.perfectMatch[4] = (uint8_t)reg;
1037 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1038 break;
1039 default:
1040
1041 if (rfaddr >= FHASH_ADDR &&
1042 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1043
1044 // Only word-aligned writes supported
1045 if (rfaddr % 2)
1046 panic("unaligned write to filter hash table!");
1047
1048 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1049 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1050 = (uint8_t)(reg >> 8);
1051 break;
1052 }
1053 panic("writing RFDR for something other than pattern matching\
1054 or hashing! %#x\n", rfaddr);
1055 }
1056
1057 case BRAR:
1058 regs.brar = reg;
1059 break;
1060
1061 case BRDR:
1062 panic("the driver never uses BRDR, something is wrong!\n");
1063
1064 case SRR:
1065 panic("SRR is read only register!\n");
1066
1067 case MIBC:
1068 panic("the driver never uses MIBC, something is wrong!\n");
1069
1070 case VRCR:
1071 regs.vrcr = reg;
1072 break;
1073
1074 case VTCR:
1075 regs.vtcr = reg;
1076 break;
1077
1078 case VDR:
1079 panic("the driver never uses VDR, something is wrong!\n");
1080
1081 case CCSR:
1082 /* not going to implement clockrun stuff */
1083 regs.ccsr = reg;
1084 break;
1085
1086 case TBICR:
1087 regs.tbicr = reg;
1088 if (reg & TBICR_MR_LOOPBACK)
1089 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1090
1091 if (reg & TBICR_MR_AN_ENABLE) {
1092 regs.tanlpar = regs.tanar;
1093 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1094 }
1095
1096 #if 0
1097 if (reg & TBICR_MR_RESTART_AN) ;
1098 #endif
1099
1100 break;
1101
1102 case TBISR:
1103 panic("TBISR is read only register!\n");
1104
1105 case TANAR:
1106 // Only write the writable bits
1107 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1108 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1109
1110 // Pause capability unimplemented
1111 #if 0
1112 if (reg & TANAR_PS2) ;
1113 if (reg & TANAR_PS1) ;
1114 #endif
1115
1116 break;
1117
1118 case TANLPAR:
1119 panic("this should only be written to by the fake phy!\n");
1120
1121 case TANER:
1122 panic("TANER is read only register!\n");
1123
1124 case TESR:
1125 regs.tesr = reg;
1126 break;
1127
1128 default:
1129 panic("invalid register access daddr=%#x", daddr);
1130 }
1131 } else {
1132 panic("Invalid Request Size");
1133 }
1134 pkt->result = Success;
1135 return pioDelay;
1136 }
1137
1138 void
1139 NSGigE::devIntrPost(uint32_t interrupts)
1140 {
1141 if (interrupts & ISR_RESERVE)
1142 panic("Cannot set a reserved interrupt");
1143
1144 if (interrupts & ISR_NOIMPL)
1145 warn("interrupt not implemented %#x\n", interrupts);
1146
1147 interrupts &= ISR_IMPL;
1148 regs.isr |= interrupts;
1149
1150 if (interrupts & regs.imr) {
1151 if (interrupts & ISR_SWI) {
1152 totalSwi++;
1153 }
1154 if (interrupts & ISR_RXIDLE) {
1155 totalRxIdle++;
1156 }
1157 if (interrupts & ISR_RXOK) {
1158 totalRxOk++;
1159 }
1160 if (interrupts & ISR_RXDESC) {
1161 totalRxDesc++;
1162 }
1163 if (interrupts & ISR_TXOK) {
1164 totalTxOk++;
1165 }
1166 if (interrupts & ISR_TXIDLE) {
1167 totalTxIdle++;
1168 }
1169 if (interrupts & ISR_TXDESC) {
1170 totalTxDesc++;
1171 }
1172 if (interrupts & ISR_RXORN) {
1173 totalRxOrn++;
1174 }
1175 }
1176
1177 DPRINTF(EthernetIntr,
1178 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1179 interrupts, regs.isr, regs.imr);
1180
1181 if ((regs.isr & regs.imr)) {
1182 Tick when = curTick;
1183 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1184 when += intrDelay;
1185 cpuIntrPost(when);
1186 }
1187 }
1188
1189 /* writing this interrupt counting stats inside this means that this function
1190 is now limited to being used to clear all interrupts upon the kernel
1191 reading isr and servicing. just telling you in case you were thinking
1192 of expanding use.
1193 */
1194 void
1195 NSGigE::devIntrClear(uint32_t interrupts)
1196 {
1197 if (interrupts & ISR_RESERVE)
1198 panic("Cannot clear a reserved interrupt");
1199
1200 if (regs.isr & regs.imr & ISR_SWI) {
1201 postedSwi++;
1202 }
1203 if (regs.isr & regs.imr & ISR_RXIDLE) {
1204 postedRxIdle++;
1205 }
1206 if (regs.isr & regs.imr & ISR_RXOK) {
1207 postedRxOk++;
1208 }
1209 if (regs.isr & regs.imr & ISR_RXDESC) {
1210 postedRxDesc++;
1211 }
1212 if (regs.isr & regs.imr & ISR_TXOK) {
1213 postedTxOk++;
1214 }
1215 if (regs.isr & regs.imr & ISR_TXIDLE) {
1216 postedTxIdle++;
1217 }
1218 if (regs.isr & regs.imr & ISR_TXDESC) {
1219 postedTxDesc++;
1220 }
1221 if (regs.isr & regs.imr & ISR_RXORN) {
1222 postedRxOrn++;
1223 }
1224
1225 if (regs.isr & regs.imr & ISR_IMPL)
1226 postedInterrupts++;
1227
1228 interrupts &= ~ISR_NOIMPL;
1229 regs.isr &= ~interrupts;
1230
1231 DPRINTF(EthernetIntr,
1232 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1233 interrupts, regs.isr, regs.imr);
1234
1235 if (!(regs.isr & regs.imr))
1236 cpuIntrClear();
1237 }
1238
1239 void
1240 NSGigE::devIntrChangeMask()
1241 {
1242 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1243 regs.isr, regs.imr, regs.isr & regs.imr);
1244
1245 if (regs.isr & regs.imr)
1246 cpuIntrPost(curTick);
1247 else
1248 cpuIntrClear();
1249 }
1250
1251 void
1252 NSGigE::cpuIntrPost(Tick when)
1253 {
1254 // If the interrupt you want to post is later than an interrupt
1255 // already scheduled, just let it post in the coming one and don't
1256 // schedule another.
1257 // HOWEVER, must be sure that the scheduled intrTick is in the
1258 // future (this was formerly the source of a bug)
1259 /**
1260 * @todo this warning should be removed and the intrTick code should
1261 * be fixed.
1262 */
1263 assert(when >= curTick);
1264 assert(intrTick >= curTick || intrTick == 0);
1265 if (when > intrTick && intrTick != 0) {
1266 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1267 intrTick);
1268 return;
1269 }
1270
1271 intrTick = when;
1272 if (intrTick < curTick) {
1273 debug_break();
1274 intrTick = curTick;
1275 }
1276
1277 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1278 intrTick);
1279
1280 if (intrEvent)
1281 intrEvent->squash();
1282 intrEvent = new IntrEvent(this, true);
1283 intrEvent->schedule(intrTick);
1284 }
1285
1286 void
1287 NSGigE::cpuInterrupt()
1288 {
1289 assert(intrTick == curTick);
1290
1291 // Whether or not there's a pending interrupt, we don't care about
1292 // it anymore
1293 intrEvent = 0;
1294 intrTick = 0;
1295
1296 // Don't send an interrupt if there's already one
1297 if (cpuPendingIntr) {
1298 DPRINTF(EthernetIntr,
1299 "would send an interrupt now, but there's already pending\n");
1300 } else {
1301 // Send interrupt
1302 cpuPendingIntr = true;
1303
1304 DPRINTF(EthernetIntr, "posting interrupt\n");
1305 intrPost();
1306 }
1307 }
1308
1309 void
1310 NSGigE::cpuIntrClear()
1311 {
1312 if (!cpuPendingIntr)
1313 return;
1314
1315 if (intrEvent) {
1316 intrEvent->squash();
1317 intrEvent = 0;
1318 }
1319
1320 intrTick = 0;
1321
1322 cpuPendingIntr = false;
1323
1324 DPRINTF(EthernetIntr, "clearing interrupt\n");
1325 intrClear();
1326 }
1327
1328 bool
1329 NSGigE::cpuIntrPending() const
1330 { return cpuPendingIntr; }
1331
1332 void
1333 NSGigE::txReset()
1334 {
1335
1336 DPRINTF(Ethernet, "transmit reset\n");
1337
1338 CTDD = false;
1339 txEnable = false;;
1340 txFragPtr = 0;
1341 assert(txDescCnt == 0);
1342 txFifo.clear();
1343 txState = txIdle;
1344 assert(txDmaState == dmaIdle);
1345 }
1346
1347 void
1348 NSGigE::rxReset()
1349 {
1350 DPRINTF(Ethernet, "receive reset\n");
1351
1352 CRDD = false;
1353 assert(rxPktBytes == 0);
1354 rxEnable = false;
1355 rxFragPtr = 0;
1356 assert(rxDescCnt == 0);
1357 assert(rxDmaState == dmaIdle);
1358 rxFifo.clear();
1359 rxState = rxIdle;
1360 }
1361
1362 void
1363 NSGigE::regsReset()
1364 {
1365 memset(&regs, 0, sizeof(regs));
1366 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1367 regs.mear = 0x12;
1368 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1369 // fill threshold to 32 bytes
1370 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1371 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1372 regs.mibc = MIBC_FRZ;
1373 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1374 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1375 regs.brar = 0xffffffff;
1376
1377 extstsEnable = false;
1378 acceptBroadcast = false;
1379 acceptMulticast = false;
1380 acceptUnicast = false;
1381 acceptPerfect = false;
1382 acceptArp = false;
1383 }
1384
1385 bool
1386 NSGigE::doRxDmaRead()
1387 {
1388 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1389 rxDmaState = dmaReading;
1390
1391 if (dmaPending())
1392 rxDmaState = dmaReadWaiting;
1393 else
1394 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1395
1396 return true;
1397 }
1398
1399 void
1400 NSGigE::rxDmaReadDone()
1401 {
1402 assert(rxDmaState == dmaReading);
1403 rxDmaState = dmaIdle;
1404
1405 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1406 rxDmaAddr, rxDmaLen);
1407 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1408
1409 // If the transmit state machine has a pending DMA, let it go first
1410 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1411 txKick();
1412
1413 rxKick();
1414 }
1415
1416 bool
1417 NSGigE::doRxDmaWrite()
1418 {
1419 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1420 rxDmaState = dmaWriting;
1421
1422 if (dmaPending())
1423 rxDmaState = dmaWriteWaiting;
1424 else
1425 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1426 return true;
1427 }
1428
1429 void
1430 NSGigE::rxDmaWriteDone()
1431 {
1432 assert(rxDmaState == dmaWriting);
1433 rxDmaState = dmaIdle;
1434
1435 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1436 rxDmaAddr, rxDmaLen);
1437 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1438
1439 // If the transmit state machine has a pending DMA, let it go first
1440 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1441 txKick();
1442
1443 rxKick();
1444 }
1445
1446 void
1447 NSGigE::rxKick()
1448 {
1449 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1450
1451 DPRINTF(EthernetSM,
1452 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1453 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1454
1455 Addr link, bufptr;
1456 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1457 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1458
1459 next:
1460 if (clock) {
1461 if (rxKickTick > curTick) {
1462 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1463 rxKickTick);
1464
1465 goto exit;
1466 }
1467
1468 // Go to the next state machine clock tick.
1469 rxKickTick = curTick + cycles(1);
1470 }
1471
1472 switch(rxDmaState) {
1473 case dmaReadWaiting:
1474 if (doRxDmaRead())
1475 goto exit;
1476 break;
1477 case dmaWriteWaiting:
1478 if (doRxDmaWrite())
1479 goto exit;
1480 break;
1481 default:
1482 break;
1483 }
1484
1485 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1486 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1487
1488 // see state machine from spec for details
1489 // the way this works is, if you finish work on one state and can
1490 // go directly to another, you do that through jumping to the
1491 // label "next". however, if you have intermediate work, like DMA
1492 // so that you can't go to the next state yet, you go to exit and
1493 // exit the loop. however, when the DMA is done it will trigger
1494 // an event and come back to this loop.
1495 switch (rxState) {
1496 case rxIdle:
1497 if (!rxEnable) {
1498 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1499 goto exit;
1500 }
1501
1502 if (CRDD) {
1503 rxState = rxDescRefr;
1504
1505 rxDmaAddr = regs.rxdp & 0x3fffffff;
1506 rxDmaData =
1507 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1508 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1509 rxDmaFree = dmaDescFree;
1510
1511 descDmaReads++;
1512 descDmaRdBytes += rxDmaLen;
1513
1514 if (doRxDmaRead())
1515 goto exit;
1516 } else {
1517 rxState = rxDescRead;
1518
1519 rxDmaAddr = regs.rxdp & 0x3fffffff;
1520 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1521 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1522 rxDmaFree = dmaDescFree;
1523
1524 descDmaReads++;
1525 descDmaRdBytes += rxDmaLen;
1526
1527 if (doRxDmaRead())
1528 goto exit;
1529 }
1530 break;
1531
1532 case rxDescRefr:
1533 if (rxDmaState != dmaIdle)
1534 goto exit;
1535
1536 rxState = rxAdvance;
1537 break;
1538
1539 case rxDescRead:
1540 if (rxDmaState != dmaIdle)
1541 goto exit;
1542
1543 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1544 regs.rxdp & 0x3fffffff);
1545 DPRINTF(EthernetDesc,
1546 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1547 link, bufptr, cmdsts, extsts);
1548
1549 if (cmdsts & CMDSTS_OWN) {
1550 devIntrPost(ISR_RXIDLE);
1551 rxState = rxIdle;
1552 goto exit;
1553 } else {
1554 rxState = rxFifoBlock;
1555 rxFragPtr = bufptr;
1556 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1557 }
1558 break;
1559
1560 case rxFifoBlock:
1561 if (!rxPacket) {
1562 /**
1563 * @todo in reality, we should be able to start processing
1564 * the packet as it arrives, and not have to wait for the
1565 * full packet ot be in the receive fifo.
1566 */
1567 if (rxFifo.empty())
1568 goto exit;
1569
1570 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1571
1572 // If we don't have a packet, grab a new one from the fifo.
1573 rxPacket = rxFifo.front();
1574 rxPktBytes = rxPacket->length;
1575 rxPacketBufPtr = rxPacket->data;
1576
1577 #if TRACING_ON
1578 if (DTRACE(Ethernet)) {
1579 IpPtr ip(rxPacket);
1580 if (ip) {
1581 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1582 TcpPtr tcp(ip);
1583 if (tcp) {
1584 DPRINTF(Ethernet,
1585 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1586 tcp->sport(), tcp->dport(), tcp->seq(),
1587 tcp->ack());
1588 }
1589 }
1590 }
1591 #endif
1592
1593 // sanity check - i think the driver behaves like this
1594 assert(rxDescCnt >= rxPktBytes);
1595 rxFifo.pop();
1596 }
1597
1598
1599 // dont' need the && rxDescCnt > 0 if driver sanity check
1600 // above holds
1601 if (rxPktBytes > 0) {
1602 rxState = rxFragWrite;
1603 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1604 // check holds
1605 rxXferLen = rxPktBytes;
1606
1607 rxDmaAddr = rxFragPtr & 0x3fffffff;
1608 rxDmaData = rxPacketBufPtr;
1609 rxDmaLen = rxXferLen;
1610 rxDmaFree = dmaDataFree;
1611
1612 if (doRxDmaWrite())
1613 goto exit;
1614
1615 } else {
1616 rxState = rxDescWrite;
1617
1618 //if (rxPktBytes == 0) { /* packet is done */
1619 assert(rxPktBytes == 0);
1620 DPRINTF(EthernetSM, "done with receiving packet\n");
1621
1622 cmdsts |= CMDSTS_OWN;
1623 cmdsts &= ~CMDSTS_MORE;
1624 cmdsts |= CMDSTS_OK;
1625 cmdsts &= 0xffff0000;
1626 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1627
1628 #if 0
1629 /*
1630 * all the driver uses these are for its own stats keeping
1631 * which we don't care about, aren't necessary for
1632 * functionality and doing this would just slow us down.
1633 * if they end up using this in a later version for
1634 * functional purposes, just undef
1635 */
1636 if (rxFilterEnable) {
1637 cmdsts &= ~CMDSTS_DEST_MASK;
1638 const EthAddr &dst = rxFifoFront()->dst();
1639 if (dst->unicast())
1640 cmdsts |= CMDSTS_DEST_SELF;
1641 if (dst->multicast())
1642 cmdsts |= CMDSTS_DEST_MULTI;
1643 if (dst->broadcast())
1644 cmdsts |= CMDSTS_DEST_MASK;
1645 }
1646 #endif
1647
1648 IpPtr ip(rxPacket);
1649 if (extstsEnable && ip) {
1650 extsts |= EXTSTS_IPPKT;
1651 rxIpChecksums++;
1652 if (cksum(ip) != 0) {
1653 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1654 extsts |= EXTSTS_IPERR;
1655 }
1656 TcpPtr tcp(ip);
1657 UdpPtr udp(ip);
1658 if (tcp) {
1659 extsts |= EXTSTS_TCPPKT;
1660 rxTcpChecksums++;
1661 if (cksum(tcp) != 0) {
1662 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1663 extsts |= EXTSTS_TCPERR;
1664
1665 }
1666 } else if (udp) {
1667 extsts |= EXTSTS_UDPPKT;
1668 rxUdpChecksums++;
1669 if (cksum(udp) != 0) {
1670 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1671 extsts |= EXTSTS_UDPERR;
1672 }
1673 }
1674 }
1675 rxPacket = 0;
1676
1677 /*
1678 * the driver seems to always receive into desc buffers
1679 * of size 1514, so you never have a pkt that is split
1680 * into multiple descriptors on the receive side, so
1681 * i don't implement that case, hence the assert above.
1682 */
1683
1684 DPRINTF(EthernetDesc,
1685 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1686 regs.rxdp & 0x3fffffff);
1687 DPRINTF(EthernetDesc,
1688 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1689 link, bufptr, cmdsts, extsts);
1690
1691 rxDmaAddr = regs.rxdp & 0x3fffffff;
1692 rxDmaData = &cmdsts;
1693 if (is64bit) {
1694 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1695 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1696 } else {
1697 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1698 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1699 }
1700 rxDmaFree = dmaDescFree;
1701
1702 descDmaWrites++;
1703 descDmaWrBytes += rxDmaLen;
1704
1705 if (doRxDmaWrite())
1706 goto exit;
1707 }
1708 break;
1709
1710 case rxFragWrite:
1711 if (rxDmaState != dmaIdle)
1712 goto exit;
1713
1714 rxPacketBufPtr += rxXferLen;
1715 rxFragPtr += rxXferLen;
1716 rxPktBytes -= rxXferLen;
1717
1718 rxState = rxFifoBlock;
1719 break;
1720
1721 case rxDescWrite:
1722 if (rxDmaState != dmaIdle)
1723 goto exit;
1724
1725 assert(cmdsts & CMDSTS_OWN);
1726
1727 assert(rxPacket == 0);
1728 devIntrPost(ISR_RXOK);
1729
1730 if (cmdsts & CMDSTS_INTR)
1731 devIntrPost(ISR_RXDESC);
1732
1733 if (!rxEnable) {
1734 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1735 rxState = rxIdle;
1736 goto exit;
1737 } else
1738 rxState = rxAdvance;
1739 break;
1740
1741 case rxAdvance:
1742 if (link == 0) {
1743 devIntrPost(ISR_RXIDLE);
1744 rxState = rxIdle;
1745 CRDD = true;
1746 goto exit;
1747 } else {
1748 if (rxDmaState != dmaIdle)
1749 goto exit;
1750 rxState = rxDescRead;
1751 regs.rxdp = link;
1752 CRDD = false;
1753
1754 rxDmaAddr = regs.rxdp & 0x3fffffff;
1755 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1756 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1757 rxDmaFree = dmaDescFree;
1758
1759 if (doRxDmaRead())
1760 goto exit;
1761 }
1762 break;
1763
1764 default:
1765 panic("Invalid rxState!");
1766 }
1767
1768 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1769 NsRxStateStrings[rxState]);
1770 goto next;
1771
1772 exit:
1773 /**
1774 * @todo do we want to schedule a future kick?
1775 */
1776 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1777 NsRxStateStrings[rxState]);
1778
1779 if (clock && !rxKickEvent.scheduled())
1780 rxKickEvent.schedule(rxKickTick);
1781 }
1782
1783 void
1784 NSGigE::transmit()
1785 {
1786 if (txFifo.empty()) {
1787 DPRINTF(Ethernet, "nothing to transmit\n");
1788 return;
1789 }
1790
1791 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1792 txFifo.size());
1793 if (interface->sendPacket(txFifo.front())) {
1794 #if TRACING_ON
1795 if (DTRACE(Ethernet)) {
1796 IpPtr ip(txFifo.front());
1797 if (ip) {
1798 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1799 TcpPtr tcp(ip);
1800 if (tcp) {
1801 DPRINTF(Ethernet,
1802 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1803 tcp->sport(), tcp->dport(), tcp->seq(),
1804 tcp->ack());
1805 }
1806 }
1807 }
1808 #endif
1809
1810 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1811 txBytes += txFifo.front()->length;
1812 txPackets++;
1813
1814 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1815 txFifo.avail());
1816 txFifo.pop();
1817
1818 /*
1819 * normally do a writeback of the descriptor here, and ONLY
1820 * after that is done, send this interrupt. but since our
1821 * stuff never actually fails, just do this interrupt here,
1822 * otherwise the code has to stray from this nice format.
1823 * besides, it's functionally the same.
1824 */
1825 devIntrPost(ISR_TXOK);
1826 }
1827
1828 if (!txFifo.empty() && !txEvent.scheduled()) {
1829 DPRINTF(Ethernet, "reschedule transmit\n");
1830 txEvent.schedule(curTick + retryTime);
1831 }
1832 }
1833
1834 bool
1835 NSGigE::doTxDmaRead()
1836 {
1837 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1838 txDmaState = dmaReading;
1839
1840 if (dmaPending())
1841 txDmaState = dmaReadWaiting;
1842 else
1843 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1844
1845 return true;
1846 }
1847
1848 void
1849 NSGigE::txDmaReadDone()
1850 {
1851 assert(txDmaState == dmaReading);
1852 txDmaState = dmaIdle;
1853
1854 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1855 txDmaAddr, txDmaLen);
1856 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1857
1858 // If the receive state machine has a pending DMA, let it go first
1859 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1860 rxKick();
1861
1862 txKick();
1863 }
1864
1865 bool
1866 NSGigE::doTxDmaWrite()
1867 {
1868 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1869 txDmaState = dmaWriting;
1870
1871 if (dmaPending())
1872 txDmaState = dmaWriteWaiting;
1873 else
1874 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1875 return true;
1876 }
1877
1878 void
1879 NSGigE::txDmaWriteDone()
1880 {
1881 assert(txDmaState == dmaWriting);
1882 txDmaState = dmaIdle;
1883
1884 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1885 txDmaAddr, txDmaLen);
1886 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1887
1888 // If the receive state machine has a pending DMA, let it go first
1889 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1890 rxKick();
1891
1892 txKick();
1893 }
1894
1895 void
1896 NSGigE::txKick()
1897 {
1898 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1899
1900 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1901 NsTxStateStrings[txState], is64bit ? 64 : 32);
1902
1903 Addr link, bufptr;
1904 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1905 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1906
1907 next:
1908 if (clock) {
1909 if (txKickTick > curTick) {
1910 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1911 txKickTick);
1912 goto exit;
1913 }
1914
1915 // Go to the next state machine clock tick.
1916 txKickTick = curTick + cycles(1);
1917 }
1918
1919 switch(txDmaState) {
1920 case dmaReadWaiting:
1921 if (doTxDmaRead())
1922 goto exit;
1923 break;
1924 case dmaWriteWaiting:
1925 if (doTxDmaWrite())
1926 goto exit;
1927 break;
1928 default:
1929 break;
1930 }
1931
1932 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1933 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1934 switch (txState) {
1935 case txIdle:
1936 if (!txEnable) {
1937 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1938 goto exit;
1939 }
1940
1941 if (CTDD) {
1942 txState = txDescRefr;
1943
1944 txDmaAddr = regs.txdp & 0x3fffffff;
1945 txDmaData =
1946 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1947 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1948 txDmaFree = dmaDescFree;
1949
1950 descDmaReads++;
1951 descDmaRdBytes += txDmaLen;
1952
1953 if (doTxDmaRead())
1954 goto exit;
1955
1956 } else {
1957 txState = txDescRead;
1958
1959 txDmaAddr = regs.txdp & 0x3fffffff;
1960 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1961 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1962 txDmaFree = dmaDescFree;
1963
1964 descDmaReads++;
1965 descDmaRdBytes += txDmaLen;
1966
1967 if (doTxDmaRead())
1968 goto exit;
1969 }
1970 break;
1971
1972 case txDescRefr:
1973 if (txDmaState != dmaIdle)
1974 goto exit;
1975
1976 txState = txAdvance;
1977 break;
1978
1979 case txDescRead:
1980 if (txDmaState != dmaIdle)
1981 goto exit;
1982
1983 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1984 regs.txdp & 0x3fffffff);
1985 DPRINTF(EthernetDesc,
1986 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1987 link, bufptr, cmdsts, extsts);
1988
1989 if (cmdsts & CMDSTS_OWN) {
1990 txState = txFifoBlock;
1991 txFragPtr = bufptr;
1992 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1993 } else {
1994 devIntrPost(ISR_TXIDLE);
1995 txState = txIdle;
1996 goto exit;
1997 }
1998 break;
1999
2000 case txFifoBlock:
2001 if (!txPacket) {
2002 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2003 txPacket = new EthPacketData(16384);
2004 txPacketBufPtr = txPacket->data;
2005 }
2006
2007 if (txDescCnt == 0) {
2008 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2009 if (cmdsts & CMDSTS_MORE) {
2010 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2011 txState = txDescWrite;
2012
2013 cmdsts &= ~CMDSTS_OWN;
2014
2015 txDmaAddr = regs.txdp & 0x3fffffff;
2016 txDmaData = &cmdsts;
2017 if (is64bit) {
2018 txDmaAddr += offsetof(ns_desc64, cmdsts);
2019 txDmaLen = sizeof(txDesc64.cmdsts);
2020 } else {
2021 txDmaAddr += offsetof(ns_desc32, cmdsts);
2022 txDmaLen = sizeof(txDesc32.cmdsts);
2023 }
2024 txDmaFree = dmaDescFree;
2025
2026 if (doTxDmaWrite())
2027 goto exit;
2028
2029 } else { /* this packet is totally done */
2030 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2031 /* deal with the the packet that just finished */
2032 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2033 IpPtr ip(txPacket);
2034 if (extsts & EXTSTS_UDPPKT) {
2035 UdpPtr udp(ip);
2036 udp->sum(0);
2037 udp->sum(cksum(udp));
2038 txUdpChecksums++;
2039 } else if (extsts & EXTSTS_TCPPKT) {
2040 TcpPtr tcp(ip);
2041 tcp->sum(0);
2042 tcp->sum(cksum(tcp));
2043 txTcpChecksums++;
2044 }
2045 if (extsts & EXTSTS_IPPKT) {
2046 ip->sum(0);
2047 ip->sum(cksum(ip));
2048 txIpChecksums++;
2049 }
2050 }
2051
2052 txPacket->length = txPacketBufPtr - txPacket->data;
2053 // this is just because the receive can't handle a
2054 // packet bigger want to make sure
2055 if (txPacket->length > 1514)
2056 panic("transmit packet too large, %s > 1514\n",
2057 txPacket->length);
2058
2059 #ifndef NDEBUG
2060 bool success =
2061 #endif
2062 txFifo.push(txPacket);
2063 assert(success);
2064
2065 /*
2066 * this following section is not tqo spec, but
2067 * functionally shouldn't be any different. normally,
2068 * the chip will wait til the transmit has occurred
2069 * before writing back the descriptor because it has
2070 * to wait to see that it was successfully transmitted
2071 * to decide whether to set CMDSTS_OK or not.
2072 * however, in the simulator since it is always
2073 * successfully transmitted, and writing it exactly to
2074 * spec would complicate the code, we just do it here
2075 */
2076
2077 cmdsts &= ~CMDSTS_OWN;
2078 cmdsts |= CMDSTS_OK;
2079
2080 DPRINTF(EthernetDesc,
2081 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2082 cmdsts, extsts);
2083
2084 txDmaFree = dmaDescFree;
2085 txDmaAddr = regs.txdp & 0x3fffffff;
2086 txDmaData = &cmdsts;
2087 if (is64bit) {
2088 txDmaAddr += offsetof(ns_desc64, cmdsts);
2089 txDmaLen =
2090 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2091 } else {
2092 txDmaAddr += offsetof(ns_desc32, cmdsts);
2093 txDmaLen =
2094 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2095 }
2096
2097 descDmaWrites++;
2098 descDmaWrBytes += txDmaLen;
2099
2100 transmit();
2101 txPacket = 0;
2102
2103 if (!txEnable) {
2104 DPRINTF(EthernetSM, "halting TX state machine\n");
2105 txState = txIdle;
2106 goto exit;
2107 } else
2108 txState = txAdvance;
2109
2110 if (doTxDmaWrite())
2111 goto exit;
2112 }
2113 } else {
2114 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2115 if (!txFifo.full()) {
2116 txState = txFragRead;
2117
2118 /*
2119 * The number of bytes transferred is either whatever
2120 * is left in the descriptor (txDescCnt), or if there
2121 * is not enough room in the fifo, just whatever room
2122 * is left in the fifo
2123 */
2124 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2125
2126 txDmaAddr = txFragPtr & 0x3fffffff;
2127 txDmaData = txPacketBufPtr;
2128 txDmaLen = txXferLen;
2129 txDmaFree = dmaDataFree;
2130
2131 if (doTxDmaRead())
2132 goto exit;
2133 } else {
2134 txState = txFifoBlock;
2135 transmit();
2136
2137 goto exit;
2138 }
2139
2140 }
2141 break;
2142
2143 case txFragRead:
2144 if (txDmaState != dmaIdle)
2145 goto exit;
2146
2147 txPacketBufPtr += txXferLen;
2148 txFragPtr += txXferLen;
2149 txDescCnt -= txXferLen;
2150 txFifo.reserve(txXferLen);
2151
2152 txState = txFifoBlock;
2153 break;
2154
2155 case txDescWrite:
2156 if (txDmaState != dmaIdle)
2157 goto exit;
2158
2159 if (cmdsts & CMDSTS_INTR)
2160 devIntrPost(ISR_TXDESC);
2161
2162 if (!txEnable) {
2163 DPRINTF(EthernetSM, "halting TX state machine\n");
2164 txState = txIdle;
2165 goto exit;
2166 } else
2167 txState = txAdvance;
2168 break;
2169
2170 case txAdvance:
2171 if (link == 0) {
2172 devIntrPost(ISR_TXIDLE);
2173 txState = txIdle;
2174 goto exit;
2175 } else {
2176 if (txDmaState != dmaIdle)
2177 goto exit;
2178 txState = txDescRead;
2179 regs.txdp = link;
2180 CTDD = false;
2181
2182 txDmaAddr = link & 0x3fffffff;
2183 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2184 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2185 txDmaFree = dmaDescFree;
2186
2187 if (doTxDmaRead())
2188 goto exit;
2189 }
2190 break;
2191
2192 default:
2193 panic("invalid state");
2194 }
2195
2196 DPRINTF(EthernetSM, "entering next txState=%s\n",
2197 NsTxStateStrings[txState]);
2198 goto next;
2199
2200 exit:
2201 /**
2202 * @todo do we want to schedule a future kick?
2203 */
2204 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2205 NsTxStateStrings[txState]);
2206
2207 if (clock && !txKickEvent.scheduled())
2208 txKickEvent.schedule(txKickTick);
2209 }
2210
2211 /**
2212 * Advance the EEPROM state machine
2213 * Called on rising edge of EEPROM clock bit in MEAR
2214 */
2215 void
2216 NSGigE::eepromKick()
2217 {
2218 switch (eepromState) {
2219
2220 case eepromStart:
2221
2222 // Wait for start bit
2223 if (regs.mear & MEAR_EEDI) {
2224 // Set up to get 2 opcode bits
2225 eepromState = eepromGetOpcode;
2226 eepromBitsToRx = 2;
2227 eepromOpcode = 0;
2228 }
2229 break;
2230
2231 case eepromGetOpcode:
2232 eepromOpcode <<= 1;
2233 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2234 --eepromBitsToRx;
2235
2236 // Done getting opcode
2237 if (eepromBitsToRx == 0) {
2238 if (eepromOpcode != EEPROM_READ)
2239 panic("only EEPROM reads are implemented!");
2240
2241 // Set up to get address
2242 eepromState = eepromGetAddress;
2243 eepromBitsToRx = 6;
2244 eepromAddress = 0;
2245 }
2246 break;
2247
2248 case eepromGetAddress:
2249 eepromAddress <<= 1;
2250 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2251 --eepromBitsToRx;
2252
2253 // Done getting address
2254 if (eepromBitsToRx == 0) {
2255
2256 if (eepromAddress >= EEPROM_SIZE)
2257 panic("EEPROM read access out of range!");
2258
2259 switch (eepromAddress) {
2260
2261 case EEPROM_PMATCH2_ADDR:
2262 eepromData = rom.perfectMatch[5];
2263 eepromData <<= 8;
2264 eepromData += rom.perfectMatch[4];
2265 break;
2266
2267 case EEPROM_PMATCH1_ADDR:
2268 eepromData = rom.perfectMatch[3];
2269 eepromData <<= 8;
2270 eepromData += rom.perfectMatch[2];
2271 break;
2272
2273 case EEPROM_PMATCH0_ADDR:
2274 eepromData = rom.perfectMatch[1];
2275 eepromData <<= 8;
2276 eepromData += rom.perfectMatch[0];
2277 break;
2278
2279 default:
2280 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2281 }
2282 // Set up to read data
2283 eepromState = eepromRead;
2284 eepromBitsToRx = 16;
2285
2286 // Clear data in bit
2287 regs.mear &= ~MEAR_EEDI;
2288 }
2289 break;
2290
2291 case eepromRead:
2292 // Clear Data Out bit
2293 regs.mear &= ~MEAR_EEDO;
2294 // Set bit to value of current EEPROM bit
2295 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2296
2297 eepromData <<= 1;
2298 --eepromBitsToRx;
2299
2300 // All done
2301 if (eepromBitsToRx == 0) {
2302 eepromState = eepromStart;
2303 }
2304 break;
2305
2306 default:
2307 panic("invalid EEPROM state");
2308 }
2309
2310 }
2311
2312 void
2313 NSGigE::transferDone()
2314 {
2315 if (txFifo.empty()) {
2316 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2317 return;
2318 }
2319
2320 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2321
2322 if (txEvent.scheduled())
2323 txEvent.reschedule(curTick + cycles(1));
2324 else
2325 txEvent.schedule(curTick + cycles(1));
2326 }
2327
2328 bool
2329 NSGigE::rxFilter(const EthPacketPtr &packet)
2330 {
2331 EthPtr eth = packet;
2332 bool drop = true;
2333 string type;
2334
2335 const EthAddr &dst = eth->dst();
2336 if (dst.unicast()) {
2337 // If we're accepting all unicast addresses
2338 if (acceptUnicast)
2339 drop = false;
2340
2341 // If we make a perfect match
2342 if (acceptPerfect && dst == rom.perfectMatch)
2343 drop = false;
2344
2345 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2346 drop = false;
2347
2348 } else if (dst.broadcast()) {
2349 // if we're accepting broadcasts
2350 if (acceptBroadcast)
2351 drop = false;
2352
2353 } else if (dst.multicast()) {
2354 // if we're accepting all multicasts
2355 if (acceptMulticast)
2356 drop = false;
2357
2358 // Multicast hashing faked - all packets accepted
2359 if (multicastHashEnable)
2360 drop = false;
2361 }
2362
2363 if (drop) {
2364 DPRINTF(Ethernet, "rxFilter drop\n");
2365 DDUMP(EthernetData, packet->data, packet->length);
2366 }
2367
2368 return drop;
2369 }
2370
2371 bool
2372 NSGigE::recvPacket(EthPacketPtr packet)
2373 {
2374 rxBytes += packet->length;
2375 rxPackets++;
2376
2377 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2378 rxFifo.avail());
2379
2380 if (!rxEnable) {
2381 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2382 return true;
2383 }
2384
2385 if (!rxFilterEnable) {
2386 DPRINTF(Ethernet,
2387 "receive packet filtering disabled . . . packet dropped\n");
2388 return true;
2389 }
2390
2391 if (rxFilter(packet)) {
2392 DPRINTF(Ethernet, "packet filtered...dropped\n");
2393 return true;
2394 }
2395
2396 if (rxFifo.avail() < packet->length) {
2397 #if TRACING_ON
2398 IpPtr ip(packet);
2399 TcpPtr tcp(ip);
2400 if (ip) {
2401 DPRINTF(Ethernet,
2402 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2403 ip->id());
2404 if (tcp) {
2405 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2406 }
2407 }
2408 #endif
2409 droppedPackets++;
2410 devIntrPost(ISR_RXORN);
2411 return false;
2412 }
2413
2414 rxFifo.push(packet);
2415
2416 rxKick();
2417 return true;
2418 }
2419
2420 //=====================================================================
2421 //
2422 //
2423 void
2424 NSGigE::serialize(ostream &os)
2425 {
2426 // Serialize the PciDev base class
2427 PciDev::serialize(os);
2428
2429 /*
2430 * Finalize any DMA events now.
2431 */
2432 // @todo will mem system save pending dma?
2433
2434 /*
2435 * Serialize the device registers
2436 */
2437 SERIALIZE_SCALAR(regs.command);
2438 SERIALIZE_SCALAR(regs.config);
2439 SERIALIZE_SCALAR(regs.mear);
2440 SERIALIZE_SCALAR(regs.ptscr);
2441 SERIALIZE_SCALAR(regs.isr);
2442 SERIALIZE_SCALAR(regs.imr);
2443 SERIALIZE_SCALAR(regs.ier);
2444 SERIALIZE_SCALAR(regs.ihr);
2445 SERIALIZE_SCALAR(regs.txdp);
2446 SERIALIZE_SCALAR(regs.txdp_hi);
2447 SERIALIZE_SCALAR(regs.txcfg);
2448 SERIALIZE_SCALAR(regs.gpior);
2449 SERIALIZE_SCALAR(regs.rxdp);
2450 SERIALIZE_SCALAR(regs.rxdp_hi);
2451 SERIALIZE_SCALAR(regs.rxcfg);
2452 SERIALIZE_SCALAR(regs.pqcr);
2453 SERIALIZE_SCALAR(regs.wcsr);
2454 SERIALIZE_SCALAR(regs.pcr);
2455 SERIALIZE_SCALAR(regs.rfcr);
2456 SERIALIZE_SCALAR(regs.rfdr);
2457 SERIALIZE_SCALAR(regs.brar);
2458 SERIALIZE_SCALAR(regs.brdr);
2459 SERIALIZE_SCALAR(regs.srr);
2460 SERIALIZE_SCALAR(regs.mibc);
2461 SERIALIZE_SCALAR(regs.vrcr);
2462 SERIALIZE_SCALAR(regs.vtcr);
2463 SERIALIZE_SCALAR(regs.vdr);
2464 SERIALIZE_SCALAR(regs.ccsr);
2465 SERIALIZE_SCALAR(regs.tbicr);
2466 SERIALIZE_SCALAR(regs.tbisr);
2467 SERIALIZE_SCALAR(regs.tanar);
2468 SERIALIZE_SCALAR(regs.tanlpar);
2469 SERIALIZE_SCALAR(regs.taner);
2470 SERIALIZE_SCALAR(regs.tesr);
2471
2472 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2473 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2474
2475 SERIALIZE_SCALAR(ioEnable);
2476
2477 /*
2478 * Serialize the data Fifos
2479 */
2480 rxFifo.serialize("rxFifo", os);
2481 txFifo.serialize("txFifo", os);
2482
2483 /*
2484 * Serialize the various helper variables
2485 */
2486 bool txPacketExists = txPacket;
2487 SERIALIZE_SCALAR(txPacketExists);
2488 if (txPacketExists) {
2489 txPacket->length = txPacketBufPtr - txPacket->data;
2490 txPacket->serialize("txPacket", os);
2491 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2492 SERIALIZE_SCALAR(txPktBufPtr);
2493 }
2494
2495 bool rxPacketExists = rxPacket;
2496 SERIALIZE_SCALAR(rxPacketExists);
2497 if (rxPacketExists) {
2498 rxPacket->serialize("rxPacket", os);
2499 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2500 SERIALIZE_SCALAR(rxPktBufPtr);
2501 }
2502
2503 SERIALIZE_SCALAR(txXferLen);
2504 SERIALIZE_SCALAR(rxXferLen);
2505
2506 /*
2507 * Serialize Cached Descriptors
2508 */
2509 SERIALIZE_SCALAR(rxDesc64.link);
2510 SERIALIZE_SCALAR(rxDesc64.bufptr);
2511 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2512 SERIALIZE_SCALAR(rxDesc64.extsts);
2513 SERIALIZE_SCALAR(txDesc64.link);
2514 SERIALIZE_SCALAR(txDesc64.bufptr);
2515 SERIALIZE_SCALAR(txDesc64.cmdsts);
2516 SERIALIZE_SCALAR(txDesc64.extsts);
2517 SERIALIZE_SCALAR(rxDesc32.link);
2518 SERIALIZE_SCALAR(rxDesc32.bufptr);
2519 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2520 SERIALIZE_SCALAR(rxDesc32.extsts);
2521 SERIALIZE_SCALAR(txDesc32.link);
2522 SERIALIZE_SCALAR(txDesc32.bufptr);
2523 SERIALIZE_SCALAR(txDesc32.cmdsts);
2524 SERIALIZE_SCALAR(txDesc32.extsts);
2525 SERIALIZE_SCALAR(extstsEnable);
2526
2527 /*
2528 * Serialize tx state machine
2529 */
2530 int txState = this->txState;
2531 SERIALIZE_SCALAR(txState);
2532 SERIALIZE_SCALAR(txEnable);
2533 SERIALIZE_SCALAR(CTDD);
2534 SERIALIZE_SCALAR(txFragPtr);
2535 SERIALIZE_SCALAR(txDescCnt);
2536 int txDmaState = this->txDmaState;
2537 SERIALIZE_SCALAR(txDmaState);
2538 SERIALIZE_SCALAR(txKickTick);
2539
2540 /*
2541 * Serialize rx state machine
2542 */
2543 int rxState = this->rxState;
2544 SERIALIZE_SCALAR(rxState);
2545 SERIALIZE_SCALAR(rxEnable);
2546 SERIALIZE_SCALAR(CRDD);
2547 SERIALIZE_SCALAR(rxPktBytes);
2548 SERIALIZE_SCALAR(rxFragPtr);
2549 SERIALIZE_SCALAR(rxDescCnt);
2550 int rxDmaState = this->rxDmaState;
2551 SERIALIZE_SCALAR(rxDmaState);
2552 SERIALIZE_SCALAR(rxKickTick);
2553
2554 /*
2555 * Serialize EEPROM state machine
2556 */
2557 int eepromState = this->eepromState;
2558 SERIALIZE_SCALAR(eepromState);
2559 SERIALIZE_SCALAR(eepromClk);
2560 SERIALIZE_SCALAR(eepromBitsToRx);
2561 SERIALIZE_SCALAR(eepromOpcode);
2562 SERIALIZE_SCALAR(eepromAddress);
2563 SERIALIZE_SCALAR(eepromData);
2564
2565 /*
2566 * If there's a pending transmit, store the time so we can
2567 * reschedule it later
2568 */
2569 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2570 SERIALIZE_SCALAR(transmitTick);
2571
2572 /*
2573 * receive address filter settings
2574 */
2575 SERIALIZE_SCALAR(rxFilterEnable);
2576 SERIALIZE_SCALAR(acceptBroadcast);
2577 SERIALIZE_SCALAR(acceptMulticast);
2578 SERIALIZE_SCALAR(acceptUnicast);
2579 SERIALIZE_SCALAR(acceptPerfect);
2580 SERIALIZE_SCALAR(acceptArp);
2581 SERIALIZE_SCALAR(multicastHashEnable);
2582
2583 /*
2584 * Keep track of pending interrupt status.
2585 */
2586 SERIALIZE_SCALAR(intrTick);
2587 SERIALIZE_SCALAR(cpuPendingIntr);
2588 Tick intrEventTick = 0;
2589 if (intrEvent)
2590 intrEventTick = intrEvent->when();
2591 SERIALIZE_SCALAR(intrEventTick);
2592
2593 }
2594
2595 void
2596 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2597 {
2598 // Unserialize the PciDev base class
2599 PciDev::unserialize(cp, section);
2600
2601 UNSERIALIZE_SCALAR(regs.command);
2602 UNSERIALIZE_SCALAR(regs.config);
2603 UNSERIALIZE_SCALAR(regs.mear);
2604 UNSERIALIZE_SCALAR(regs.ptscr);
2605 UNSERIALIZE_SCALAR(regs.isr);
2606 UNSERIALIZE_SCALAR(regs.imr);
2607 UNSERIALIZE_SCALAR(regs.ier);
2608 UNSERIALIZE_SCALAR(regs.ihr);
2609 UNSERIALIZE_SCALAR(regs.txdp);
2610 UNSERIALIZE_SCALAR(regs.txdp_hi);
2611 UNSERIALIZE_SCALAR(regs.txcfg);
2612 UNSERIALIZE_SCALAR(regs.gpior);
2613 UNSERIALIZE_SCALAR(regs.rxdp);
2614 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2615 UNSERIALIZE_SCALAR(regs.rxcfg);
2616 UNSERIALIZE_SCALAR(regs.pqcr);
2617 UNSERIALIZE_SCALAR(regs.wcsr);
2618 UNSERIALIZE_SCALAR(regs.pcr);
2619 UNSERIALIZE_SCALAR(regs.rfcr);
2620 UNSERIALIZE_SCALAR(regs.rfdr);
2621 UNSERIALIZE_SCALAR(regs.brar);
2622 UNSERIALIZE_SCALAR(regs.brdr);
2623 UNSERIALIZE_SCALAR(regs.srr);
2624 UNSERIALIZE_SCALAR(regs.mibc);
2625 UNSERIALIZE_SCALAR(regs.vrcr);
2626 UNSERIALIZE_SCALAR(regs.vtcr);
2627 UNSERIALIZE_SCALAR(regs.vdr);
2628 UNSERIALIZE_SCALAR(regs.ccsr);
2629 UNSERIALIZE_SCALAR(regs.tbicr);
2630 UNSERIALIZE_SCALAR(regs.tbisr);
2631 UNSERIALIZE_SCALAR(regs.tanar);
2632 UNSERIALIZE_SCALAR(regs.tanlpar);
2633 UNSERIALIZE_SCALAR(regs.taner);
2634 UNSERIALIZE_SCALAR(regs.tesr);
2635
2636 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2637 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2638
2639 UNSERIALIZE_SCALAR(ioEnable);
2640
2641 /*
2642 * unserialize the data fifos
2643 */
2644 rxFifo.unserialize("rxFifo", cp, section);
2645 txFifo.unserialize("txFifo", cp, section);
2646
2647 /*
2648 * unserialize the various helper variables
2649 */
2650 bool txPacketExists;
2651 UNSERIALIZE_SCALAR(txPacketExists);
2652 if (txPacketExists) {
2653 txPacket = new EthPacketData(16384);
2654 txPacket->unserialize("txPacket", cp, section);
2655 uint32_t txPktBufPtr;
2656 UNSERIALIZE_SCALAR(txPktBufPtr);
2657 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2658 } else
2659 txPacket = 0;
2660
2661 bool rxPacketExists;
2662 UNSERIALIZE_SCALAR(rxPacketExists);
2663 rxPacket = 0;
2664 if (rxPacketExists) {
2665 rxPacket = new EthPacketData(16384);
2666 rxPacket->unserialize("rxPacket", cp, section);
2667 uint32_t rxPktBufPtr;
2668 UNSERIALIZE_SCALAR(rxPktBufPtr);
2669 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2670 } else
2671 rxPacket = 0;
2672
2673 UNSERIALIZE_SCALAR(txXferLen);
2674 UNSERIALIZE_SCALAR(rxXferLen);
2675
2676 /*
2677 * Unserialize Cached Descriptors
2678 */
2679 UNSERIALIZE_SCALAR(rxDesc64.link);
2680 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2681 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2682 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2683 UNSERIALIZE_SCALAR(txDesc64.link);
2684 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2685 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2686 UNSERIALIZE_SCALAR(txDesc64.extsts);
2687 UNSERIALIZE_SCALAR(rxDesc32.link);
2688 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2689 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2690 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2691 UNSERIALIZE_SCALAR(txDesc32.link);
2692 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2693 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2694 UNSERIALIZE_SCALAR(txDesc32.extsts);
2695 UNSERIALIZE_SCALAR(extstsEnable);
2696
2697 /*
2698 * unserialize tx state machine
2699 */
2700 int txState;
2701 UNSERIALIZE_SCALAR(txState);
2702 this->txState = (TxState) txState;
2703 UNSERIALIZE_SCALAR(txEnable);
2704 UNSERIALIZE_SCALAR(CTDD);
2705 UNSERIALIZE_SCALAR(txFragPtr);
2706 UNSERIALIZE_SCALAR(txDescCnt);
2707 int txDmaState;
2708 UNSERIALIZE_SCALAR(txDmaState);
2709 this->txDmaState = (DmaState) txDmaState;
2710 UNSERIALIZE_SCALAR(txKickTick);
2711 if (txKickTick)
2712 txKickEvent.schedule(txKickTick);
2713
2714 /*
2715 * unserialize rx state machine
2716 */
2717 int rxState;
2718 UNSERIALIZE_SCALAR(rxState);
2719 this->rxState = (RxState) rxState;
2720 UNSERIALIZE_SCALAR(rxEnable);
2721 UNSERIALIZE_SCALAR(CRDD);
2722 UNSERIALIZE_SCALAR(rxPktBytes);
2723 UNSERIALIZE_SCALAR(rxFragPtr);
2724 UNSERIALIZE_SCALAR(rxDescCnt);
2725 int rxDmaState;
2726 UNSERIALIZE_SCALAR(rxDmaState);
2727 this->rxDmaState = (DmaState) rxDmaState;
2728 UNSERIALIZE_SCALAR(rxKickTick);
2729 if (rxKickTick)
2730 rxKickEvent.schedule(rxKickTick);
2731
2732 /*
2733 * Unserialize EEPROM state machine
2734 */
2735 int eepromState;
2736 UNSERIALIZE_SCALAR(eepromState);
2737 this->eepromState = (EEPROMState) eepromState;
2738 UNSERIALIZE_SCALAR(eepromClk);
2739 UNSERIALIZE_SCALAR(eepromBitsToRx);
2740 UNSERIALIZE_SCALAR(eepromOpcode);
2741 UNSERIALIZE_SCALAR(eepromAddress);
2742 UNSERIALIZE_SCALAR(eepromData);
2743
2744 /*
2745 * If there's a pending transmit, reschedule it now
2746 */
2747 Tick transmitTick;
2748 UNSERIALIZE_SCALAR(transmitTick);
2749 if (transmitTick)
2750 txEvent.schedule(curTick + transmitTick);
2751
2752 /*
2753 * unserialize receive address filter settings
2754 */
2755 UNSERIALIZE_SCALAR(rxFilterEnable);
2756 UNSERIALIZE_SCALAR(acceptBroadcast);
2757 UNSERIALIZE_SCALAR(acceptMulticast);
2758 UNSERIALIZE_SCALAR(acceptUnicast);
2759 UNSERIALIZE_SCALAR(acceptPerfect);
2760 UNSERIALIZE_SCALAR(acceptArp);
2761 UNSERIALIZE_SCALAR(multicastHashEnable);
2762
2763 /*
2764 * Keep track of pending interrupt status.
2765 */
2766 UNSERIALIZE_SCALAR(intrTick);
2767 UNSERIALIZE_SCALAR(cpuPendingIntr);
2768 Tick intrEventTick;
2769 UNSERIALIZE_SCALAR(intrEventTick);
2770 if (intrEventTick) {
2771 intrEvent = new IntrEvent(this, true);
2772 intrEvent->schedule(intrEventTick);
2773 }
2774 }
2775
2776 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2777
2778 SimObjectParam<EtherInt *> peer;
2779 SimObjectParam<NSGigE *> device;
2780
2781 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2782
2783 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2784
2785 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2786 INIT_PARAM(device, "Ethernet device of this interface")
2787
2788 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2789
2790 CREATE_SIM_OBJECT(NSGigEInt)
2791 {
2792 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2793
2794 EtherInt *p = (EtherInt *)peer;
2795 if (p) {
2796 dev_int->setPeer(p);
2797 p->setPeer(dev_int);
2798 }
2799
2800 return dev_int;
2801 }
2802
2803 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2804
2805
2806 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2807
2808 SimObjectParam<System *> system;
2809 SimObjectParam<Platform *> platform;
2810 SimObjectParam<PciConfigAll *> configspace;
2811 SimObjectParam<PciConfigData *> configdata;
2812 Param<uint32_t> pci_bus;
2813 Param<uint32_t> pci_dev;
2814 Param<uint32_t> pci_func;
2815 Param<Tick> pio_latency;
2816
2817 Param<Tick> clock;
2818 Param<bool> dma_desc_free;
2819 Param<bool> dma_data_free;
2820 Param<Tick> dma_read_delay;
2821 Param<Tick> dma_write_delay;
2822 Param<Tick> dma_read_factor;
2823 Param<Tick> dma_write_factor;
2824 Param<bool> dma_no_allocate;
2825 Param<Tick> intr_delay;
2826
2827 Param<Tick> rx_delay;
2828 Param<Tick> tx_delay;
2829 Param<uint32_t> rx_fifo_size;
2830 Param<uint32_t> tx_fifo_size;
2831
2832 Param<bool> rx_filter;
2833 Param<string> hardware_address;
2834 Param<bool> rx_thread;
2835 Param<bool> tx_thread;
2836 Param<bool> rss;
2837
2838 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2839
2840 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2841
2842 INIT_PARAM(system, "System pointer"),
2843 INIT_PARAM(platform, "Platform pointer"),
2844 INIT_PARAM(configspace, "PCI Configspace"),
2845 INIT_PARAM(configdata, "PCI Config data"),
2846 INIT_PARAM(pci_bus, "PCI bus ID"),
2847 INIT_PARAM(pci_dev, "PCI device number"),
2848 INIT_PARAM(pci_func, "PCI function code"),
2849 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2850 INIT_PARAM(clock, "State machine cycle time"),
2851
2852 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
2853 INIT_PARAM(dma_data_free, "DMA of Data is free"),
2854 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
2855 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
2856 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
2857 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
2858 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
2859 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2860
2861 INIT_PARAM(rx_delay, "Receive Delay"),
2862 INIT_PARAM(tx_delay, "Transmit Delay"),
2863 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
2864 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
2865
2866 INIT_PARAM(rx_filter, "Enable Receive Filter"),
2867 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
2868 INIT_PARAM(rx_thread, ""),
2869 INIT_PARAM(tx_thread, ""),
2870 INIT_PARAM(rss, "")
2871
2872 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2873
2874
2875 CREATE_SIM_OBJECT(NSGigE)
2876 {
2877 NSGigE::Params *params = new NSGigE::Params;
2878
2879 params->name = getInstanceName();
2880 params->platform = platform;
2881 params->system = system;
2882 params->configSpace = configspace;
2883 params->configData = configdata;
2884 params->busNum = pci_bus;
2885 params->deviceNum = pci_dev;
2886 params->functionNum = pci_func;
2887 params->pio_delay = pio_latency;
2888
2889 params->clock = clock;
2890 params->dma_desc_free = dma_desc_free;
2891 params->dma_data_free = dma_data_free;
2892 params->dma_read_delay = dma_read_delay;
2893 params->dma_write_delay = dma_write_delay;
2894 params->dma_read_factor = dma_read_factor;
2895 params->dma_write_factor = dma_write_factor;
2896 params->dma_no_allocate = dma_no_allocate;
2897 params->pio_delay = pio_latency;
2898 params->intr_delay = intr_delay;
2899
2900 params->rx_delay = rx_delay;
2901 params->tx_delay = tx_delay;
2902 params->rx_fifo_size = rx_fifo_size;
2903 params->tx_fifo_size = tx_fifo_size;
2904
2905 params->rx_filter = rx_filter;
2906 params->eaddr = hardware_address;
2907 params->rx_thread = rx_thread;
2908 params->tx_thread = tx_thread;
2909 params->rss = rss;
2910
2911 return new NSGigE(params);
2912 }
2913
2914 REGISTER_SIM_OBJECT("NSGigE", NSGigE)