Updated Authors from bk prs info
[gem5.git] / src / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Lisa Hsu
29 */
30
31 /** @file
32 * Device module for modelling the National Semiconductor
33 * DP83820 ethernet controller. Does not support priority queueing
34 */
35 #include <deque>
36 #include <string>
37
38 #include "arch/alpha/ev5.hh"
39 #include "base/inet.hh"
40 #include "cpu/exec_context.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/packet.hh"
45 #include "sim/builder.hh"
46 #include "sim/debug.hh"
47 #include "sim/host.hh"
48 #include "sim/stats.hh"
49 #include "sim/system.hh"
50
51 const char *NsRxStateStrings[] =
52 {
53 "rxIdle",
54 "rxDescRefr",
55 "rxDescRead",
56 "rxFifoBlock",
57 "rxFragWrite",
58 "rxDescWrite",
59 "rxAdvance"
60 };
61
62 const char *NsTxStateStrings[] =
63 {
64 "txIdle",
65 "txDescRefr",
66 "txDescRead",
67 "txFifoBlock",
68 "txFragRead",
69 "txDescWrite",
70 "txAdvance"
71 };
72
73 const char *NsDmaState[] =
74 {
75 "dmaIdle",
76 "dmaReading",
77 "dmaWriting",
78 "dmaReadWaiting",
79 "dmaWriteWaiting"
80 };
81
82 using namespace std;
83 using namespace Net;
84 using namespace TheISA;
85
86 ///////////////////////////////////////////////////////////////////////
87 //
88 // NSGigE PCI Device
89 //
90 NSGigE::NSGigE(Params *p)
91 : PciDev(p), ioEnable(false),
92 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
93 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
94 txXferLen(0), rxXferLen(0), clock(p->clock),
95 txState(txIdle), txEnable(false), CTDD(false),
96 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
97 rxEnable(false), CRDD(false), rxPktBytes(0),
98 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
99 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
100 txDmaReadEvent(this), txDmaWriteEvent(this),
101 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
102 txDelay(p->tx_delay), rxDelay(p->rx_delay),
103 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
104 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
105 acceptMulticast(false), acceptUnicast(false),
106 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
107 intrTick(0), cpuPendingIntr(false),
108 intrEvent(0), interface(0)
109 {
110
111 intrDelay = p->intr_delay;
112 dmaReadDelay = p->dma_read_delay;
113 dmaWriteDelay = p->dma_write_delay;
114 dmaReadFactor = p->dma_read_factor;
115 dmaWriteFactor = p->dma_write_factor;
116
117 regsReset();
118 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
119
120 memset(&rxDesc32, 0, sizeof(rxDesc32));
121 memset(&txDesc32, 0, sizeof(txDesc32));
122 memset(&rxDesc64, 0, sizeof(rxDesc64));
123 memset(&txDesc64, 0, sizeof(txDesc64));
124 }
125
126 NSGigE::~NSGigE()
127 {}
128
129 void
130 NSGigE::regStats()
131 {
132 txBytes
133 .name(name() + ".txBytes")
134 .desc("Bytes Transmitted")
135 .prereq(txBytes)
136 ;
137
138 rxBytes
139 .name(name() + ".rxBytes")
140 .desc("Bytes Received")
141 .prereq(rxBytes)
142 ;
143
144 txPackets
145 .name(name() + ".txPackets")
146 .desc("Number of Packets Transmitted")
147 .prereq(txBytes)
148 ;
149
150 rxPackets
151 .name(name() + ".rxPackets")
152 .desc("Number of Packets Received")
153 .prereq(rxBytes)
154 ;
155
156 txIpChecksums
157 .name(name() + ".txIpChecksums")
158 .desc("Number of tx IP Checksums done by device")
159 .precision(0)
160 .prereq(txBytes)
161 ;
162
163 rxIpChecksums
164 .name(name() + ".rxIpChecksums")
165 .desc("Number of rx IP Checksums done by device")
166 .precision(0)
167 .prereq(rxBytes)
168 ;
169
170 txTcpChecksums
171 .name(name() + ".txTcpChecksums")
172 .desc("Number of tx TCP Checksums done by device")
173 .precision(0)
174 .prereq(txBytes)
175 ;
176
177 rxTcpChecksums
178 .name(name() + ".rxTcpChecksums")
179 .desc("Number of rx TCP Checksums done by device")
180 .precision(0)
181 .prereq(rxBytes)
182 ;
183
184 txUdpChecksums
185 .name(name() + ".txUdpChecksums")
186 .desc("Number of tx UDP Checksums done by device")
187 .precision(0)
188 .prereq(txBytes)
189 ;
190
191 rxUdpChecksums
192 .name(name() + ".rxUdpChecksums")
193 .desc("Number of rx UDP Checksums done by device")
194 .precision(0)
195 .prereq(rxBytes)
196 ;
197
198 descDmaReads
199 .name(name() + ".descDMAReads")
200 .desc("Number of descriptors the device read w/ DMA")
201 .precision(0)
202 ;
203
204 descDmaWrites
205 .name(name() + ".descDMAWrites")
206 .desc("Number of descriptors the device wrote w/ DMA")
207 .precision(0)
208 ;
209
210 descDmaRdBytes
211 .name(name() + ".descDmaReadBytes")
212 .desc("number of descriptor bytes read w/ DMA")
213 .precision(0)
214 ;
215
216 descDmaWrBytes
217 .name(name() + ".descDmaWriteBytes")
218 .desc("number of descriptor bytes write w/ DMA")
219 .precision(0)
220 ;
221
222 txBandwidth
223 .name(name() + ".txBandwidth")
224 .desc("Transmit Bandwidth (bits/s)")
225 .precision(0)
226 .prereq(txBytes)
227 ;
228
229 rxBandwidth
230 .name(name() + ".rxBandwidth")
231 .desc("Receive Bandwidth (bits/s)")
232 .precision(0)
233 .prereq(rxBytes)
234 ;
235
236 totBandwidth
237 .name(name() + ".totBandwidth")
238 .desc("Total Bandwidth (bits/s)")
239 .precision(0)
240 .prereq(totBytes)
241 ;
242
243 totPackets
244 .name(name() + ".totPackets")
245 .desc("Total Packets")
246 .precision(0)
247 .prereq(totBytes)
248 ;
249
250 totBytes
251 .name(name() + ".totBytes")
252 .desc("Total Bytes")
253 .precision(0)
254 .prereq(totBytes)
255 ;
256
257 totPacketRate
258 .name(name() + ".totPPS")
259 .desc("Total Tranmission Rate (packets/s)")
260 .precision(0)
261 .prereq(totBytes)
262 ;
263
264 txPacketRate
265 .name(name() + ".txPPS")
266 .desc("Packet Tranmission Rate (packets/s)")
267 .precision(0)
268 .prereq(txBytes)
269 ;
270
271 rxPacketRate
272 .name(name() + ".rxPPS")
273 .desc("Packet Reception Rate (packets/s)")
274 .precision(0)
275 .prereq(rxBytes)
276 ;
277
278 postedSwi
279 .name(name() + ".postedSwi")
280 .desc("number of software interrupts posted to CPU")
281 .precision(0)
282 ;
283
284 totalSwi
285 .name(name() + ".totalSwi")
286 .desc("total number of Swi written to ISR")
287 .precision(0)
288 ;
289
290 coalescedSwi
291 .name(name() + ".coalescedSwi")
292 .desc("average number of Swi's coalesced into each post")
293 .precision(0)
294 ;
295
296 postedRxIdle
297 .name(name() + ".postedRxIdle")
298 .desc("number of rxIdle interrupts posted to CPU")
299 .precision(0)
300 ;
301
302 totalRxIdle
303 .name(name() + ".totalRxIdle")
304 .desc("total number of RxIdle written to ISR")
305 .precision(0)
306 ;
307
308 coalescedRxIdle
309 .name(name() + ".coalescedRxIdle")
310 .desc("average number of RxIdle's coalesced into each post")
311 .precision(0)
312 ;
313
314 postedRxOk
315 .name(name() + ".postedRxOk")
316 .desc("number of RxOk interrupts posted to CPU")
317 .precision(0)
318 ;
319
320 totalRxOk
321 .name(name() + ".totalRxOk")
322 .desc("total number of RxOk written to ISR")
323 .precision(0)
324 ;
325
326 coalescedRxOk
327 .name(name() + ".coalescedRxOk")
328 .desc("average number of RxOk's coalesced into each post")
329 .precision(0)
330 ;
331
332 postedRxDesc
333 .name(name() + ".postedRxDesc")
334 .desc("number of RxDesc interrupts posted to CPU")
335 .precision(0)
336 ;
337
338 totalRxDesc
339 .name(name() + ".totalRxDesc")
340 .desc("total number of RxDesc written to ISR")
341 .precision(0)
342 ;
343
344 coalescedRxDesc
345 .name(name() + ".coalescedRxDesc")
346 .desc("average number of RxDesc's coalesced into each post")
347 .precision(0)
348 ;
349
350 postedTxOk
351 .name(name() + ".postedTxOk")
352 .desc("number of TxOk interrupts posted to CPU")
353 .precision(0)
354 ;
355
356 totalTxOk
357 .name(name() + ".totalTxOk")
358 .desc("total number of TxOk written to ISR")
359 .precision(0)
360 ;
361
362 coalescedTxOk
363 .name(name() + ".coalescedTxOk")
364 .desc("average number of TxOk's coalesced into each post")
365 .precision(0)
366 ;
367
368 postedTxIdle
369 .name(name() + ".postedTxIdle")
370 .desc("number of TxIdle interrupts posted to CPU")
371 .precision(0)
372 ;
373
374 totalTxIdle
375 .name(name() + ".totalTxIdle")
376 .desc("total number of TxIdle written to ISR")
377 .precision(0)
378 ;
379
380 coalescedTxIdle
381 .name(name() + ".coalescedTxIdle")
382 .desc("average number of TxIdle's coalesced into each post")
383 .precision(0)
384 ;
385
386 postedTxDesc
387 .name(name() + ".postedTxDesc")
388 .desc("number of TxDesc interrupts posted to CPU")
389 .precision(0)
390 ;
391
392 totalTxDesc
393 .name(name() + ".totalTxDesc")
394 .desc("total number of TxDesc written to ISR")
395 .precision(0)
396 ;
397
398 coalescedTxDesc
399 .name(name() + ".coalescedTxDesc")
400 .desc("average number of TxDesc's coalesced into each post")
401 .precision(0)
402 ;
403
404 postedRxOrn
405 .name(name() + ".postedRxOrn")
406 .desc("number of RxOrn posted to CPU")
407 .precision(0)
408 ;
409
410 totalRxOrn
411 .name(name() + ".totalRxOrn")
412 .desc("total number of RxOrn written to ISR")
413 .precision(0)
414 ;
415
416 coalescedRxOrn
417 .name(name() + ".coalescedRxOrn")
418 .desc("average number of RxOrn's coalesced into each post")
419 .precision(0)
420 ;
421
422 coalescedTotal
423 .name(name() + ".coalescedTotal")
424 .desc("average number of interrupts coalesced into each post")
425 .precision(0)
426 ;
427
428 postedInterrupts
429 .name(name() + ".postedInterrupts")
430 .desc("number of posts to CPU")
431 .precision(0)
432 ;
433
434 droppedPackets
435 .name(name() + ".droppedPackets")
436 .desc("number of packets dropped")
437 .precision(0)
438 ;
439
440 coalescedSwi = totalSwi / postedInterrupts;
441 coalescedRxIdle = totalRxIdle / postedInterrupts;
442 coalescedRxOk = totalRxOk / postedInterrupts;
443 coalescedRxDesc = totalRxDesc / postedInterrupts;
444 coalescedTxOk = totalTxOk / postedInterrupts;
445 coalescedTxIdle = totalTxIdle / postedInterrupts;
446 coalescedTxDesc = totalTxDesc / postedInterrupts;
447 coalescedRxOrn = totalRxOrn / postedInterrupts;
448
449 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
450 totalTxOk + totalTxIdle + totalTxDesc +
451 totalRxOrn) / postedInterrupts;
452
453 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
454 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
455 totBandwidth = txBandwidth + rxBandwidth;
456 totBytes = txBytes + rxBytes;
457 totPackets = txPackets + rxPackets;
458
459 txPacketRate = txPackets / simSeconds;
460 rxPacketRate = rxPackets / simSeconds;
461 }
462
463
464 /**
465 * This is to write to the PCI general configuration registers
466 */
467 void
468 NSGigE::writeConfig(int offset, const uint16_t data)
469 {
470 if (offset < PCI_DEVICE_SPECIFIC)
471 PciDev::writeConfig(offset, data);
472 else
473 panic("Device specific PCI config space not implemented!\n");
474
475 switch (offset) {
476 // seems to work fine without all these PCI settings, but i
477 // put in the IO to double check, an assertion will fail if we
478 // need to properly implement it
479 case PCI_COMMAND:
480 if (config.data[offset] & PCI_CMD_IOSE)
481 ioEnable = true;
482 else
483 ioEnable = false;
484 break;
485 }
486 }
487
488 /**
489 * This reads the device registers, which are detailed in the NS83820
490 * spec sheet
491 */
492 Tick
493 NSGigE::read(Packet *pkt)
494 {
495 assert(ioEnable);
496
497 pkt->allocate();
498
499 //The mask is to give you only the offset into the device register file
500 Addr daddr = pkt->getAddr() & 0xfff;
501 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
502 daddr, pkt->getAddr(), pkt->getSize());
503
504
505 // there are some reserved registers, you can see ns_gige_reg.h and
506 // the spec sheet for details
507 if (daddr > LAST && daddr <= RESERVED) {
508 panic("Accessing reserved register");
509 } else if (daddr > RESERVED && daddr <= 0x3FC) {
510 if (pkt->getSize() == sizeof(uint8_t))
511 readConfig(daddr & 0xff, pkt->getPtr<uint8_t>());
512 if (pkt->getSize() == sizeof(uint16_t))
513 readConfig(daddr & 0xff, pkt->getPtr<uint16_t>());
514 if (pkt->getSize() == sizeof(uint32_t))
515 readConfig(daddr & 0xff, pkt->getPtr<uint32_t>());
516 pkt->result = Packet::Success;
517 return pioDelay;
518 } else if (daddr >= MIB_START && daddr <= MIB_END) {
519 // don't implement all the MIB's. hopefully the kernel
520 // doesn't actually DEPEND upon their values
521 // MIB are just hardware stats keepers
522 pkt->set<uint32_t>(0);
523 pkt->result = Packet::Success;
524 return pioDelay;
525 } else if (daddr > 0x3FC)
526 panic("Something is messed up!\n");
527
528 assert(pkt->getSize() == sizeof(uint32_t));
529 uint32_t &reg = *pkt->getPtr<uint32_t>();
530 uint16_t rfaddr;
531
532 switch (daddr) {
533 case CR:
534 reg = regs.command;
535 //these are supposed to be cleared on a read
536 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
537 break;
538
539 case CFGR:
540 reg = regs.config;
541 break;
542
543 case MEAR:
544 reg = regs.mear;
545 break;
546
547 case PTSCR:
548 reg = regs.ptscr;
549 break;
550
551 case ISR:
552 reg = regs.isr;
553 devIntrClear(ISR_ALL);
554 break;
555
556 case IMR:
557 reg = regs.imr;
558 break;
559
560 case IER:
561 reg = regs.ier;
562 break;
563
564 case IHR:
565 reg = regs.ihr;
566 break;
567
568 case TXDP:
569 reg = regs.txdp;
570 break;
571
572 case TXDP_HI:
573 reg = regs.txdp_hi;
574 break;
575
576 case TX_CFG:
577 reg = regs.txcfg;
578 break;
579
580 case GPIOR:
581 reg = regs.gpior;
582 break;
583
584 case RXDP:
585 reg = regs.rxdp;
586 break;
587
588 case RXDP_HI:
589 reg = regs.rxdp_hi;
590 break;
591
592 case RX_CFG:
593 reg = regs.rxcfg;
594 break;
595
596 case PQCR:
597 reg = regs.pqcr;
598 break;
599
600 case WCSR:
601 reg = regs.wcsr;
602 break;
603
604 case PCR:
605 reg = regs.pcr;
606 break;
607
608 // see the spec sheet for how RFCR and RFDR work
609 // basically, you write to RFCR to tell the machine
610 // what you want to do next, then you act upon RFDR,
611 // and the device will be prepared b/c of what you
612 // wrote to RFCR
613 case RFCR:
614 reg = regs.rfcr;
615 break;
616
617 case RFDR:
618 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
619 switch (rfaddr) {
620 // Read from perfect match ROM octets
621 case 0x000:
622 reg = rom.perfectMatch[1];
623 reg = reg << 8;
624 reg += rom.perfectMatch[0];
625 break;
626 case 0x002:
627 reg = rom.perfectMatch[3] << 8;
628 reg += rom.perfectMatch[2];
629 break;
630 case 0x004:
631 reg = rom.perfectMatch[5] << 8;
632 reg += rom.perfectMatch[4];
633 break;
634 default:
635 // Read filter hash table
636 if (rfaddr >= FHASH_ADDR &&
637 rfaddr < FHASH_ADDR + FHASH_SIZE) {
638
639 // Only word-aligned reads supported
640 if (rfaddr % 2)
641 panic("unaligned read from filter hash table!");
642
643 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
644 reg += rom.filterHash[rfaddr - FHASH_ADDR];
645 break;
646 }
647
648 panic("reading RFDR for something other than pattern"
649 " matching or hashing! %#x\n", rfaddr);
650 }
651 break;
652
653 case SRR:
654 reg = regs.srr;
655 break;
656
657 case MIBC:
658 reg = regs.mibc;
659 reg &= ~(MIBC_MIBS | MIBC_ACLR);
660 break;
661
662 case VRCR:
663 reg = regs.vrcr;
664 break;
665
666 case VTCR:
667 reg = regs.vtcr;
668 break;
669
670 case VDR:
671 reg = regs.vdr;
672 break;
673
674 case CCSR:
675 reg = regs.ccsr;
676 break;
677
678 case TBICR:
679 reg = regs.tbicr;
680 break;
681
682 case TBISR:
683 reg = regs.tbisr;
684 break;
685
686 case TANAR:
687 reg = regs.tanar;
688 break;
689
690 case TANLPAR:
691 reg = regs.tanlpar;
692 break;
693
694 case TANER:
695 reg = regs.taner;
696 break;
697
698 case TESR:
699 reg = regs.tesr;
700 break;
701
702 case M5REG:
703 reg = 0;
704 if (params()->rx_thread)
705 reg |= M5REG_RX_THREAD;
706 if (params()->tx_thread)
707 reg |= M5REG_TX_THREAD;
708 if (params()->rss)
709 reg |= M5REG_RSS;
710 break;
711
712 default:
713 panic("reading unimplemented register: addr=%#x", daddr);
714 }
715
716 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
717 daddr, reg, reg);
718
719 pkt->result = Packet::Success;
720 return pioDelay;
721 }
722
723 Tick
724 NSGigE::write(Packet *pkt)
725 {
726 assert(ioEnable);
727
728 Addr daddr = pkt->getAddr() & 0xfff;
729 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
730 daddr, pkt->getAddr(), pkt->getSize());
731
732 if (daddr > LAST && daddr <= RESERVED) {
733 panic("Accessing reserved register");
734 } else if (daddr > RESERVED && daddr <= 0x3FC) {
735 if (pkt->getSize() == sizeof(uint8_t))
736 writeConfig(daddr & 0xff, pkt->get<uint8_t>());
737 if (pkt->getSize() == sizeof(uint16_t))
738 writeConfig(daddr & 0xff, pkt->get<uint16_t>());
739 if (pkt->getSize() == sizeof(uint32_t))
740 writeConfig(daddr & 0xff, pkt->get<uint32_t>());
741 pkt->result = Packet::Success;
742 return pioDelay;
743 } else if (daddr > 0x3FC)
744 panic("Something is messed up!\n");
745
746 if (pkt->getSize() == sizeof(uint32_t)) {
747 uint32_t reg = pkt->get<uint32_t>();
748 uint16_t rfaddr;
749
750 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
751
752 switch (daddr) {
753 case CR:
754 regs.command = reg;
755 if (reg & CR_TXD) {
756 txEnable = false;
757 } else if (reg & CR_TXE) {
758 txEnable = true;
759
760 // the kernel is enabling the transmit machine
761 if (txState == txIdle)
762 txKick();
763 }
764
765 if (reg & CR_RXD) {
766 rxEnable = false;
767 } else if (reg & CR_RXE) {
768 rxEnable = true;
769
770 if (rxState == rxIdle)
771 rxKick();
772 }
773
774 if (reg & CR_TXR)
775 txReset();
776
777 if (reg & CR_RXR)
778 rxReset();
779
780 if (reg & CR_SWI)
781 devIntrPost(ISR_SWI);
782
783 if (reg & CR_RST) {
784 txReset();
785 rxReset();
786
787 regsReset();
788 }
789 break;
790
791 case CFGR:
792 if (reg & CFGR_LNKSTS ||
793 reg & CFGR_SPDSTS ||
794 reg & CFGR_DUPSTS ||
795 reg & CFGR_RESERVED ||
796 reg & CFGR_T64ADDR ||
797 reg & CFGR_PCI64_DET)
798
799 // First clear all writable bits
800 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
801 CFGR_RESERVED | CFGR_T64ADDR |
802 CFGR_PCI64_DET;
803 // Now set the appropriate writable bits
804 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
805 CFGR_RESERVED | CFGR_T64ADDR |
806 CFGR_PCI64_DET);
807
808 // all these #if 0's are because i don't THINK the kernel needs to
809 // have these implemented. if there is a problem relating to one of
810 // these, you may need to add functionality in.
811 if (reg & CFGR_TBI_EN) ;
812 if (reg & CFGR_MODE_1000) ;
813
814 if (reg & CFGR_AUTO_1000)
815 panic("CFGR_AUTO_1000 not implemented!\n");
816
817 if (reg & CFGR_PINT_DUPSTS ||
818 reg & CFGR_PINT_LNKSTS ||
819 reg & CFGR_PINT_SPDSTS)
820 ;
821
822 if (reg & CFGR_TMRTEST) ;
823 if (reg & CFGR_MRM_DIS) ;
824 if (reg & CFGR_MWI_DIS) ;
825
826 if (reg & CFGR_T64ADDR) ;
827 // panic("CFGR_T64ADDR is read only register!\n");
828
829 if (reg & CFGR_PCI64_DET)
830 panic("CFGR_PCI64_DET is read only register!\n");
831
832 if (reg & CFGR_DATA64_EN) ;
833 if (reg & CFGR_M64ADDR) ;
834 if (reg & CFGR_PHY_RST) ;
835 if (reg & CFGR_PHY_DIS) ;
836
837 if (reg & CFGR_EXTSTS_EN)
838 extstsEnable = true;
839 else
840 extstsEnable = false;
841
842 if (reg & CFGR_REQALG) ;
843 if (reg & CFGR_SB) ;
844 if (reg & CFGR_POW) ;
845 if (reg & CFGR_EXD) ;
846 if (reg & CFGR_PESEL) ;
847 if (reg & CFGR_BROM_DIS) ;
848 if (reg & CFGR_EXT_125) ;
849 if (reg & CFGR_BEM) ;
850 break;
851
852 case MEAR:
853 // Clear writable bits
854 regs.mear &= MEAR_EEDO;
855 // Set appropriate writable bits
856 regs.mear |= reg & ~MEAR_EEDO;
857
858 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
859 // even though it could get it through RFDR
860 if (reg & MEAR_EESEL) {
861 // Rising edge of clock
862 if (reg & MEAR_EECLK && !eepromClk)
863 eepromKick();
864 }
865 else {
866 eepromState = eepromStart;
867 regs.mear &= ~MEAR_EEDI;
868 }
869
870 eepromClk = reg & MEAR_EECLK;
871
872 // since phy is completely faked, MEAR_MD* don't matter
873 if (reg & MEAR_MDIO) ;
874 if (reg & MEAR_MDDIR) ;
875 if (reg & MEAR_MDC) ;
876 break;
877
878 case PTSCR:
879 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
880 // these control BISTs for various parts of chip - we
881 // don't care or do just fake that the BIST is done
882 if (reg & PTSCR_RBIST_EN)
883 regs.ptscr |= PTSCR_RBIST_DONE;
884 if (reg & PTSCR_EEBIST_EN)
885 regs.ptscr &= ~PTSCR_EEBIST_EN;
886 if (reg & PTSCR_EELOAD_EN)
887 regs.ptscr &= ~PTSCR_EELOAD_EN;
888 break;
889
890 case ISR: /* writing to the ISR has no effect */
891 panic("ISR is a read only register!\n");
892
893 case IMR:
894 regs.imr = reg;
895 devIntrChangeMask();
896 break;
897
898 case IER:
899 regs.ier = reg;
900 break;
901
902 case IHR:
903 regs.ihr = reg;
904 /* not going to implement real interrupt holdoff */
905 break;
906
907 case TXDP:
908 regs.txdp = (reg & 0xFFFFFFFC);
909 assert(txState == txIdle);
910 CTDD = false;
911 break;
912
913 case TXDP_HI:
914 regs.txdp_hi = reg;
915 break;
916
917 case TX_CFG:
918 regs.txcfg = reg;
919 #if 0
920 if (reg & TX_CFG_CSI) ;
921 if (reg & TX_CFG_HBI) ;
922 if (reg & TX_CFG_MLB) ;
923 if (reg & TX_CFG_ATP) ;
924 if (reg & TX_CFG_ECRETRY) {
925 /*
926 * this could easily be implemented, but considering
927 * the network is just a fake pipe, wouldn't make
928 * sense to do this
929 */
930 }
931
932 if (reg & TX_CFG_BRST_DIS) ;
933 #endif
934
935 #if 0
936 /* we handle our own DMA, ignore the kernel's exhortations */
937 if (reg & TX_CFG_MXDMA) ;
938 #endif
939
940 // also, we currently don't care about fill/drain
941 // thresholds though this may change in the future with
942 // more realistic networks or a driver which changes it
943 // according to feedback
944
945 break;
946
947 case GPIOR:
948 // Only write writable bits
949 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
950 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
951 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
952 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
953 /* these just control general purpose i/o pins, don't matter */
954 break;
955
956 case RXDP:
957 regs.rxdp = reg;
958 CRDD = false;
959 break;
960
961 case RXDP_HI:
962 regs.rxdp_hi = reg;
963 break;
964
965 case RX_CFG:
966 regs.rxcfg = reg;
967 #if 0
968 if (reg & RX_CFG_AEP) ;
969 if (reg & RX_CFG_ARP) ;
970 if (reg & RX_CFG_STRIPCRC) ;
971 if (reg & RX_CFG_RX_RD) ;
972 if (reg & RX_CFG_ALP) ;
973 if (reg & RX_CFG_AIRL) ;
974
975 /* we handle our own DMA, ignore what kernel says about it */
976 if (reg & RX_CFG_MXDMA) ;
977
978 //also, we currently don't care about fill/drain thresholds
979 //though this may change in the future with more realistic
980 //networks or a driver which changes it according to feedback
981 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
982 #endif
983 break;
984
985 case PQCR:
986 /* there is no priority queueing used in the linux 2.6 driver */
987 regs.pqcr = reg;
988 break;
989
990 case WCSR:
991 /* not going to implement wake on LAN */
992 regs.wcsr = reg;
993 break;
994
995 case PCR:
996 /* not going to implement pause control */
997 regs.pcr = reg;
998 break;
999
1000 case RFCR:
1001 regs.rfcr = reg;
1002
1003 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1004 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1005 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1006 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1007 acceptPerfect = (reg & RFCR_APM) ? true : false;
1008 acceptArp = (reg & RFCR_AARP) ? true : false;
1009 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1010
1011 #if 0
1012 if (reg & RFCR_APAT)
1013 panic("RFCR_APAT not implemented!\n");
1014 #endif
1015 if (reg & RFCR_UHEN)
1016 panic("Unicast hash filtering not used by drivers!\n");
1017
1018 if (reg & RFCR_ULM)
1019 panic("RFCR_ULM not implemented!\n");
1020
1021 break;
1022
1023 case RFDR:
1024 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1025 switch (rfaddr) {
1026 case 0x000:
1027 rom.perfectMatch[0] = (uint8_t)reg;
1028 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1029 break;
1030 case 0x002:
1031 rom.perfectMatch[2] = (uint8_t)reg;
1032 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1033 break;
1034 case 0x004:
1035 rom.perfectMatch[4] = (uint8_t)reg;
1036 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1037 break;
1038 default:
1039
1040 if (rfaddr >= FHASH_ADDR &&
1041 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1042
1043 // Only word-aligned writes supported
1044 if (rfaddr % 2)
1045 panic("unaligned write to filter hash table!");
1046
1047 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1048 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1049 = (uint8_t)(reg >> 8);
1050 break;
1051 }
1052 panic("writing RFDR for something other than pattern matching\
1053 or hashing! %#x\n", rfaddr);
1054 }
1055
1056 case BRAR:
1057 regs.brar = reg;
1058 break;
1059
1060 case BRDR:
1061 panic("the driver never uses BRDR, something is wrong!\n");
1062
1063 case SRR:
1064 panic("SRR is read only register!\n");
1065
1066 case MIBC:
1067 panic("the driver never uses MIBC, something is wrong!\n");
1068
1069 case VRCR:
1070 regs.vrcr = reg;
1071 break;
1072
1073 case VTCR:
1074 regs.vtcr = reg;
1075 break;
1076
1077 case VDR:
1078 panic("the driver never uses VDR, something is wrong!\n");
1079
1080 case CCSR:
1081 /* not going to implement clockrun stuff */
1082 regs.ccsr = reg;
1083 break;
1084
1085 case TBICR:
1086 regs.tbicr = reg;
1087 if (reg & TBICR_MR_LOOPBACK)
1088 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1089
1090 if (reg & TBICR_MR_AN_ENABLE) {
1091 regs.tanlpar = regs.tanar;
1092 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1093 }
1094
1095 #if 0
1096 if (reg & TBICR_MR_RESTART_AN) ;
1097 #endif
1098
1099 break;
1100
1101 case TBISR:
1102 panic("TBISR is read only register!\n");
1103
1104 case TANAR:
1105 // Only write the writable bits
1106 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1107 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1108
1109 // Pause capability unimplemented
1110 #if 0
1111 if (reg & TANAR_PS2) ;
1112 if (reg & TANAR_PS1) ;
1113 #endif
1114
1115 break;
1116
1117 case TANLPAR:
1118 panic("this should only be written to by the fake phy!\n");
1119
1120 case TANER:
1121 panic("TANER is read only register!\n");
1122
1123 case TESR:
1124 regs.tesr = reg;
1125 break;
1126
1127 default:
1128 panic("invalid register access daddr=%#x", daddr);
1129 }
1130 } else {
1131 panic("Invalid Request Size");
1132 }
1133 pkt->result = Packet::Success;
1134 return pioDelay;
1135 }
1136
1137 void
1138 NSGigE::devIntrPost(uint32_t interrupts)
1139 {
1140 if (interrupts & ISR_RESERVE)
1141 panic("Cannot set a reserved interrupt");
1142
1143 if (interrupts & ISR_NOIMPL)
1144 warn("interrupt not implemented %#x\n", interrupts);
1145
1146 interrupts &= ISR_IMPL;
1147 regs.isr |= interrupts;
1148
1149 if (interrupts & regs.imr) {
1150 if (interrupts & ISR_SWI) {
1151 totalSwi++;
1152 }
1153 if (interrupts & ISR_RXIDLE) {
1154 totalRxIdle++;
1155 }
1156 if (interrupts & ISR_RXOK) {
1157 totalRxOk++;
1158 }
1159 if (interrupts & ISR_RXDESC) {
1160 totalRxDesc++;
1161 }
1162 if (interrupts & ISR_TXOK) {
1163 totalTxOk++;
1164 }
1165 if (interrupts & ISR_TXIDLE) {
1166 totalTxIdle++;
1167 }
1168 if (interrupts & ISR_TXDESC) {
1169 totalTxDesc++;
1170 }
1171 if (interrupts & ISR_RXORN) {
1172 totalRxOrn++;
1173 }
1174 }
1175
1176 DPRINTF(EthernetIntr,
1177 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1178 interrupts, regs.isr, regs.imr);
1179
1180 if ((regs.isr & regs.imr)) {
1181 Tick when = curTick;
1182 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1183 when += intrDelay;
1184 cpuIntrPost(when);
1185 }
1186 }
1187
1188 /* writing this interrupt counting stats inside this means that this function
1189 is now limited to being used to clear all interrupts upon the kernel
1190 reading isr and servicing. just telling you in case you were thinking
1191 of expanding use.
1192 */
1193 void
1194 NSGigE::devIntrClear(uint32_t interrupts)
1195 {
1196 if (interrupts & ISR_RESERVE)
1197 panic("Cannot clear a reserved interrupt");
1198
1199 if (regs.isr & regs.imr & ISR_SWI) {
1200 postedSwi++;
1201 }
1202 if (regs.isr & regs.imr & ISR_RXIDLE) {
1203 postedRxIdle++;
1204 }
1205 if (regs.isr & regs.imr & ISR_RXOK) {
1206 postedRxOk++;
1207 }
1208 if (regs.isr & regs.imr & ISR_RXDESC) {
1209 postedRxDesc++;
1210 }
1211 if (regs.isr & regs.imr & ISR_TXOK) {
1212 postedTxOk++;
1213 }
1214 if (regs.isr & regs.imr & ISR_TXIDLE) {
1215 postedTxIdle++;
1216 }
1217 if (regs.isr & regs.imr & ISR_TXDESC) {
1218 postedTxDesc++;
1219 }
1220 if (regs.isr & regs.imr & ISR_RXORN) {
1221 postedRxOrn++;
1222 }
1223
1224 if (regs.isr & regs.imr & ISR_IMPL)
1225 postedInterrupts++;
1226
1227 interrupts &= ~ISR_NOIMPL;
1228 regs.isr &= ~interrupts;
1229
1230 DPRINTF(EthernetIntr,
1231 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1232 interrupts, regs.isr, regs.imr);
1233
1234 if (!(regs.isr & regs.imr))
1235 cpuIntrClear();
1236 }
1237
1238 void
1239 NSGigE::devIntrChangeMask()
1240 {
1241 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1242 regs.isr, regs.imr, regs.isr & regs.imr);
1243
1244 if (regs.isr & regs.imr)
1245 cpuIntrPost(curTick);
1246 else
1247 cpuIntrClear();
1248 }
1249
1250 void
1251 NSGigE::cpuIntrPost(Tick when)
1252 {
1253 // If the interrupt you want to post is later than an interrupt
1254 // already scheduled, just let it post in the coming one and don't
1255 // schedule another.
1256 // HOWEVER, must be sure that the scheduled intrTick is in the
1257 // future (this was formerly the source of a bug)
1258 /**
1259 * @todo this warning should be removed and the intrTick code should
1260 * be fixed.
1261 */
1262 assert(when >= curTick);
1263 assert(intrTick >= curTick || intrTick == 0);
1264 if (when > intrTick && intrTick != 0) {
1265 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1266 intrTick);
1267 return;
1268 }
1269
1270 intrTick = when;
1271 if (intrTick < curTick) {
1272 debug_break();
1273 intrTick = curTick;
1274 }
1275
1276 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1277 intrTick);
1278
1279 if (intrEvent)
1280 intrEvent->squash();
1281 intrEvent = new IntrEvent(this, true);
1282 intrEvent->schedule(intrTick);
1283 }
1284
1285 void
1286 NSGigE::cpuInterrupt()
1287 {
1288 assert(intrTick == curTick);
1289
1290 // Whether or not there's a pending interrupt, we don't care about
1291 // it anymore
1292 intrEvent = 0;
1293 intrTick = 0;
1294
1295 // Don't send an interrupt if there's already one
1296 if (cpuPendingIntr) {
1297 DPRINTF(EthernetIntr,
1298 "would send an interrupt now, but there's already pending\n");
1299 } else {
1300 // Send interrupt
1301 cpuPendingIntr = true;
1302
1303 DPRINTF(EthernetIntr, "posting interrupt\n");
1304 intrPost();
1305 }
1306 }
1307
1308 void
1309 NSGigE::cpuIntrClear()
1310 {
1311 if (!cpuPendingIntr)
1312 return;
1313
1314 if (intrEvent) {
1315 intrEvent->squash();
1316 intrEvent = 0;
1317 }
1318
1319 intrTick = 0;
1320
1321 cpuPendingIntr = false;
1322
1323 DPRINTF(EthernetIntr, "clearing interrupt\n");
1324 intrClear();
1325 }
1326
1327 bool
1328 NSGigE::cpuIntrPending() const
1329 { return cpuPendingIntr; }
1330
1331 void
1332 NSGigE::txReset()
1333 {
1334
1335 DPRINTF(Ethernet, "transmit reset\n");
1336
1337 CTDD = false;
1338 txEnable = false;;
1339 txFragPtr = 0;
1340 assert(txDescCnt == 0);
1341 txFifo.clear();
1342 txState = txIdle;
1343 assert(txDmaState == dmaIdle);
1344 }
1345
1346 void
1347 NSGigE::rxReset()
1348 {
1349 DPRINTF(Ethernet, "receive reset\n");
1350
1351 CRDD = false;
1352 assert(rxPktBytes == 0);
1353 rxEnable = false;
1354 rxFragPtr = 0;
1355 assert(rxDescCnt == 0);
1356 assert(rxDmaState == dmaIdle);
1357 rxFifo.clear();
1358 rxState = rxIdle;
1359 }
1360
1361 void
1362 NSGigE::regsReset()
1363 {
1364 memset(&regs, 0, sizeof(regs));
1365 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1366 regs.mear = 0x12;
1367 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1368 // fill threshold to 32 bytes
1369 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1370 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1371 regs.mibc = MIBC_FRZ;
1372 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1373 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1374 regs.brar = 0xffffffff;
1375
1376 extstsEnable = false;
1377 acceptBroadcast = false;
1378 acceptMulticast = false;
1379 acceptUnicast = false;
1380 acceptPerfect = false;
1381 acceptArp = false;
1382 }
1383
1384 bool
1385 NSGigE::doRxDmaRead()
1386 {
1387 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1388 rxDmaState = dmaReading;
1389
1390 if (dmaPending())
1391 rxDmaState = dmaReadWaiting;
1392 else
1393 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1394
1395 return true;
1396 }
1397
1398 void
1399 NSGigE::rxDmaReadDone()
1400 {
1401 assert(rxDmaState == dmaReading);
1402 rxDmaState = dmaIdle;
1403
1404 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1405 rxDmaAddr, rxDmaLen);
1406 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1407
1408 // If the transmit state machine has a pending DMA, let it go first
1409 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1410 txKick();
1411
1412 rxKick();
1413 }
1414
1415 bool
1416 NSGigE::doRxDmaWrite()
1417 {
1418 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1419 rxDmaState = dmaWriting;
1420
1421 if (dmaPending())
1422 rxDmaState = dmaWriteWaiting;
1423 else
1424 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1425 return true;
1426 }
1427
1428 void
1429 NSGigE::rxDmaWriteDone()
1430 {
1431 assert(rxDmaState == dmaWriting);
1432 rxDmaState = dmaIdle;
1433
1434 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1435 rxDmaAddr, rxDmaLen);
1436 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1437
1438 // If the transmit state machine has a pending DMA, let it go first
1439 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1440 txKick();
1441
1442 rxKick();
1443 }
1444
1445 void
1446 NSGigE::rxKick()
1447 {
1448 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1449
1450 DPRINTF(EthernetSM,
1451 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1452 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1453
1454 Addr link, bufptr;
1455 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1456 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1457
1458 next:
1459 if (clock) {
1460 if (rxKickTick > curTick) {
1461 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1462 rxKickTick);
1463
1464 goto exit;
1465 }
1466
1467 // Go to the next state machine clock tick.
1468 rxKickTick = curTick + cycles(1);
1469 }
1470
1471 switch(rxDmaState) {
1472 case dmaReadWaiting:
1473 if (doRxDmaRead())
1474 goto exit;
1475 break;
1476 case dmaWriteWaiting:
1477 if (doRxDmaWrite())
1478 goto exit;
1479 break;
1480 default:
1481 break;
1482 }
1483
1484 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1485 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1486
1487 // see state machine from spec for details
1488 // the way this works is, if you finish work on one state and can
1489 // go directly to another, you do that through jumping to the
1490 // label "next". however, if you have intermediate work, like DMA
1491 // so that you can't go to the next state yet, you go to exit and
1492 // exit the loop. however, when the DMA is done it will trigger
1493 // an event and come back to this loop.
1494 switch (rxState) {
1495 case rxIdle:
1496 if (!rxEnable) {
1497 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1498 goto exit;
1499 }
1500
1501 if (CRDD) {
1502 rxState = rxDescRefr;
1503
1504 rxDmaAddr = regs.rxdp & 0x3fffffff;
1505 rxDmaData =
1506 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1507 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1508 rxDmaFree = dmaDescFree;
1509
1510 descDmaReads++;
1511 descDmaRdBytes += rxDmaLen;
1512
1513 if (doRxDmaRead())
1514 goto exit;
1515 } else {
1516 rxState = rxDescRead;
1517
1518 rxDmaAddr = regs.rxdp & 0x3fffffff;
1519 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1520 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1521 rxDmaFree = dmaDescFree;
1522
1523 descDmaReads++;
1524 descDmaRdBytes += rxDmaLen;
1525
1526 if (doRxDmaRead())
1527 goto exit;
1528 }
1529 break;
1530
1531 case rxDescRefr:
1532 if (rxDmaState != dmaIdle)
1533 goto exit;
1534
1535 rxState = rxAdvance;
1536 break;
1537
1538 case rxDescRead:
1539 if (rxDmaState != dmaIdle)
1540 goto exit;
1541
1542 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1543 regs.rxdp & 0x3fffffff);
1544 DPRINTF(EthernetDesc,
1545 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1546 link, bufptr, cmdsts, extsts);
1547
1548 if (cmdsts & CMDSTS_OWN) {
1549 devIntrPost(ISR_RXIDLE);
1550 rxState = rxIdle;
1551 goto exit;
1552 } else {
1553 rxState = rxFifoBlock;
1554 rxFragPtr = bufptr;
1555 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1556 }
1557 break;
1558
1559 case rxFifoBlock:
1560 if (!rxPacket) {
1561 /**
1562 * @todo in reality, we should be able to start processing
1563 * the packet as it arrives, and not have to wait for the
1564 * full packet ot be in the receive fifo.
1565 */
1566 if (rxFifo.empty())
1567 goto exit;
1568
1569 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1570
1571 // If we don't have a packet, grab a new one from the fifo.
1572 rxPacket = rxFifo.front();
1573 rxPktBytes = rxPacket->length;
1574 rxPacketBufPtr = rxPacket->data;
1575
1576 #if TRACING_ON
1577 if (DTRACE(Ethernet)) {
1578 IpPtr ip(rxPacket);
1579 if (ip) {
1580 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1581 TcpPtr tcp(ip);
1582 if (tcp) {
1583 DPRINTF(Ethernet,
1584 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1585 tcp->sport(), tcp->dport(), tcp->seq(),
1586 tcp->ack());
1587 }
1588 }
1589 }
1590 #endif
1591
1592 // sanity check - i think the driver behaves like this
1593 assert(rxDescCnt >= rxPktBytes);
1594 rxFifo.pop();
1595 }
1596
1597
1598 // dont' need the && rxDescCnt > 0 if driver sanity check
1599 // above holds
1600 if (rxPktBytes > 0) {
1601 rxState = rxFragWrite;
1602 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1603 // check holds
1604 rxXferLen = rxPktBytes;
1605
1606 rxDmaAddr = rxFragPtr & 0x3fffffff;
1607 rxDmaData = rxPacketBufPtr;
1608 rxDmaLen = rxXferLen;
1609 rxDmaFree = dmaDataFree;
1610
1611 if (doRxDmaWrite())
1612 goto exit;
1613
1614 } else {
1615 rxState = rxDescWrite;
1616
1617 //if (rxPktBytes == 0) { /* packet is done */
1618 assert(rxPktBytes == 0);
1619 DPRINTF(EthernetSM, "done with receiving packet\n");
1620
1621 cmdsts |= CMDSTS_OWN;
1622 cmdsts &= ~CMDSTS_MORE;
1623 cmdsts |= CMDSTS_OK;
1624 cmdsts &= 0xffff0000;
1625 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1626
1627 #if 0
1628 /*
1629 * all the driver uses these are for its own stats keeping
1630 * which we don't care about, aren't necessary for
1631 * functionality and doing this would just slow us down.
1632 * if they end up using this in a later version for
1633 * functional purposes, just undef
1634 */
1635 if (rxFilterEnable) {
1636 cmdsts &= ~CMDSTS_DEST_MASK;
1637 const EthAddr &dst = rxFifoFront()->dst();
1638 if (dst->unicast())
1639 cmdsts |= CMDSTS_DEST_SELF;
1640 if (dst->multicast())
1641 cmdsts |= CMDSTS_DEST_MULTI;
1642 if (dst->broadcast())
1643 cmdsts |= CMDSTS_DEST_MASK;
1644 }
1645 #endif
1646
1647 IpPtr ip(rxPacket);
1648 if (extstsEnable && ip) {
1649 extsts |= EXTSTS_IPPKT;
1650 rxIpChecksums++;
1651 if (cksum(ip) != 0) {
1652 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1653 extsts |= EXTSTS_IPERR;
1654 }
1655 TcpPtr tcp(ip);
1656 UdpPtr udp(ip);
1657 if (tcp) {
1658 extsts |= EXTSTS_TCPPKT;
1659 rxTcpChecksums++;
1660 if (cksum(tcp) != 0) {
1661 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1662 extsts |= EXTSTS_TCPERR;
1663
1664 }
1665 } else if (udp) {
1666 extsts |= EXTSTS_UDPPKT;
1667 rxUdpChecksums++;
1668 if (cksum(udp) != 0) {
1669 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1670 extsts |= EXTSTS_UDPERR;
1671 }
1672 }
1673 }
1674 rxPacket = 0;
1675
1676 /*
1677 * the driver seems to always receive into desc buffers
1678 * of size 1514, so you never have a pkt that is split
1679 * into multiple descriptors on the receive side, so
1680 * i don't implement that case, hence the assert above.
1681 */
1682
1683 DPRINTF(EthernetDesc,
1684 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1685 regs.rxdp & 0x3fffffff);
1686 DPRINTF(EthernetDesc,
1687 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1688 link, bufptr, cmdsts, extsts);
1689
1690 rxDmaAddr = regs.rxdp & 0x3fffffff;
1691 rxDmaData = &cmdsts;
1692 if (is64bit) {
1693 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1694 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1695 } else {
1696 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1697 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1698 }
1699 rxDmaFree = dmaDescFree;
1700
1701 descDmaWrites++;
1702 descDmaWrBytes += rxDmaLen;
1703
1704 if (doRxDmaWrite())
1705 goto exit;
1706 }
1707 break;
1708
1709 case rxFragWrite:
1710 if (rxDmaState != dmaIdle)
1711 goto exit;
1712
1713 rxPacketBufPtr += rxXferLen;
1714 rxFragPtr += rxXferLen;
1715 rxPktBytes -= rxXferLen;
1716
1717 rxState = rxFifoBlock;
1718 break;
1719
1720 case rxDescWrite:
1721 if (rxDmaState != dmaIdle)
1722 goto exit;
1723
1724 assert(cmdsts & CMDSTS_OWN);
1725
1726 assert(rxPacket == 0);
1727 devIntrPost(ISR_RXOK);
1728
1729 if (cmdsts & CMDSTS_INTR)
1730 devIntrPost(ISR_RXDESC);
1731
1732 if (!rxEnable) {
1733 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1734 rxState = rxIdle;
1735 goto exit;
1736 } else
1737 rxState = rxAdvance;
1738 break;
1739
1740 case rxAdvance:
1741 if (link == 0) {
1742 devIntrPost(ISR_RXIDLE);
1743 rxState = rxIdle;
1744 CRDD = true;
1745 goto exit;
1746 } else {
1747 if (rxDmaState != dmaIdle)
1748 goto exit;
1749 rxState = rxDescRead;
1750 regs.rxdp = link;
1751 CRDD = false;
1752
1753 rxDmaAddr = regs.rxdp & 0x3fffffff;
1754 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1755 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1756 rxDmaFree = dmaDescFree;
1757
1758 if (doRxDmaRead())
1759 goto exit;
1760 }
1761 break;
1762
1763 default:
1764 panic("Invalid rxState!");
1765 }
1766
1767 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1768 NsRxStateStrings[rxState]);
1769 goto next;
1770
1771 exit:
1772 /**
1773 * @todo do we want to schedule a future kick?
1774 */
1775 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1776 NsRxStateStrings[rxState]);
1777
1778 if (clock && !rxKickEvent.scheduled())
1779 rxKickEvent.schedule(rxKickTick);
1780 }
1781
1782 void
1783 NSGigE::transmit()
1784 {
1785 if (txFifo.empty()) {
1786 DPRINTF(Ethernet, "nothing to transmit\n");
1787 return;
1788 }
1789
1790 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1791 txFifo.size());
1792 if (interface->sendPacket(txFifo.front())) {
1793 #if TRACING_ON
1794 if (DTRACE(Ethernet)) {
1795 IpPtr ip(txFifo.front());
1796 if (ip) {
1797 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1798 TcpPtr tcp(ip);
1799 if (tcp) {
1800 DPRINTF(Ethernet,
1801 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1802 tcp->sport(), tcp->dport(), tcp->seq(),
1803 tcp->ack());
1804 }
1805 }
1806 }
1807 #endif
1808
1809 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1810 txBytes += txFifo.front()->length;
1811 txPackets++;
1812
1813 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1814 txFifo.avail());
1815 txFifo.pop();
1816
1817 /*
1818 * normally do a writeback of the descriptor here, and ONLY
1819 * after that is done, send this interrupt. but since our
1820 * stuff never actually fails, just do this interrupt here,
1821 * otherwise the code has to stray from this nice format.
1822 * besides, it's functionally the same.
1823 */
1824 devIntrPost(ISR_TXOK);
1825 }
1826
1827 if (!txFifo.empty() && !txEvent.scheduled()) {
1828 DPRINTF(Ethernet, "reschedule transmit\n");
1829 txEvent.schedule(curTick + retryTime);
1830 }
1831 }
1832
1833 bool
1834 NSGigE::doTxDmaRead()
1835 {
1836 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1837 txDmaState = dmaReading;
1838
1839 if (dmaPending())
1840 txDmaState = dmaReadWaiting;
1841 else
1842 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1843
1844 return true;
1845 }
1846
1847 void
1848 NSGigE::txDmaReadDone()
1849 {
1850 assert(txDmaState == dmaReading);
1851 txDmaState = dmaIdle;
1852
1853 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1854 txDmaAddr, txDmaLen);
1855 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1856
1857 // If the receive state machine has a pending DMA, let it go first
1858 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1859 rxKick();
1860
1861 txKick();
1862 }
1863
1864 bool
1865 NSGigE::doTxDmaWrite()
1866 {
1867 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1868 txDmaState = dmaWriting;
1869
1870 if (dmaPending())
1871 txDmaState = dmaWriteWaiting;
1872 else
1873 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1874 return true;
1875 }
1876
1877 void
1878 NSGigE::txDmaWriteDone()
1879 {
1880 assert(txDmaState == dmaWriting);
1881 txDmaState = dmaIdle;
1882
1883 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1884 txDmaAddr, txDmaLen);
1885 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1886
1887 // If the receive state machine has a pending DMA, let it go first
1888 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1889 rxKick();
1890
1891 txKick();
1892 }
1893
1894 void
1895 NSGigE::txKick()
1896 {
1897 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1898
1899 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1900 NsTxStateStrings[txState], is64bit ? 64 : 32);
1901
1902 Addr link, bufptr;
1903 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1904 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1905
1906 next:
1907 if (clock) {
1908 if (txKickTick > curTick) {
1909 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1910 txKickTick);
1911 goto exit;
1912 }
1913
1914 // Go to the next state machine clock tick.
1915 txKickTick = curTick + cycles(1);
1916 }
1917
1918 switch(txDmaState) {
1919 case dmaReadWaiting:
1920 if (doTxDmaRead())
1921 goto exit;
1922 break;
1923 case dmaWriteWaiting:
1924 if (doTxDmaWrite())
1925 goto exit;
1926 break;
1927 default:
1928 break;
1929 }
1930
1931 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1932 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1933 switch (txState) {
1934 case txIdle:
1935 if (!txEnable) {
1936 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1937 goto exit;
1938 }
1939
1940 if (CTDD) {
1941 txState = txDescRefr;
1942
1943 txDmaAddr = regs.txdp & 0x3fffffff;
1944 txDmaData =
1945 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1946 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1947 txDmaFree = dmaDescFree;
1948
1949 descDmaReads++;
1950 descDmaRdBytes += txDmaLen;
1951
1952 if (doTxDmaRead())
1953 goto exit;
1954
1955 } else {
1956 txState = txDescRead;
1957
1958 txDmaAddr = regs.txdp & 0x3fffffff;
1959 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1960 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1961 txDmaFree = dmaDescFree;
1962
1963 descDmaReads++;
1964 descDmaRdBytes += txDmaLen;
1965
1966 if (doTxDmaRead())
1967 goto exit;
1968 }
1969 break;
1970
1971 case txDescRefr:
1972 if (txDmaState != dmaIdle)
1973 goto exit;
1974
1975 txState = txAdvance;
1976 break;
1977
1978 case txDescRead:
1979 if (txDmaState != dmaIdle)
1980 goto exit;
1981
1982 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1983 regs.txdp & 0x3fffffff);
1984 DPRINTF(EthernetDesc,
1985 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1986 link, bufptr, cmdsts, extsts);
1987
1988 if (cmdsts & CMDSTS_OWN) {
1989 txState = txFifoBlock;
1990 txFragPtr = bufptr;
1991 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1992 } else {
1993 devIntrPost(ISR_TXIDLE);
1994 txState = txIdle;
1995 goto exit;
1996 }
1997 break;
1998
1999 case txFifoBlock:
2000 if (!txPacket) {
2001 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2002 txPacket = new EthPacketData(16384);
2003 txPacketBufPtr = txPacket->data;
2004 }
2005
2006 if (txDescCnt == 0) {
2007 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2008 if (cmdsts & CMDSTS_MORE) {
2009 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2010 txState = txDescWrite;
2011
2012 cmdsts &= ~CMDSTS_OWN;
2013
2014 txDmaAddr = regs.txdp & 0x3fffffff;
2015 txDmaData = &cmdsts;
2016 if (is64bit) {
2017 txDmaAddr += offsetof(ns_desc64, cmdsts);
2018 txDmaLen = sizeof(txDesc64.cmdsts);
2019 } else {
2020 txDmaAddr += offsetof(ns_desc32, cmdsts);
2021 txDmaLen = sizeof(txDesc32.cmdsts);
2022 }
2023 txDmaFree = dmaDescFree;
2024
2025 if (doTxDmaWrite())
2026 goto exit;
2027
2028 } else { /* this packet is totally done */
2029 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2030 /* deal with the the packet that just finished */
2031 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2032 IpPtr ip(txPacket);
2033 if (extsts & EXTSTS_UDPPKT) {
2034 UdpPtr udp(ip);
2035 udp->sum(0);
2036 udp->sum(cksum(udp));
2037 txUdpChecksums++;
2038 } else if (extsts & EXTSTS_TCPPKT) {
2039 TcpPtr tcp(ip);
2040 tcp->sum(0);
2041 tcp->sum(cksum(tcp));
2042 txTcpChecksums++;
2043 }
2044 if (extsts & EXTSTS_IPPKT) {
2045 ip->sum(0);
2046 ip->sum(cksum(ip));
2047 txIpChecksums++;
2048 }
2049 }
2050
2051 txPacket->length = txPacketBufPtr - txPacket->data;
2052 // this is just because the receive can't handle a
2053 // packet bigger want to make sure
2054 if (txPacket->length > 1514)
2055 panic("transmit packet too large, %s > 1514\n",
2056 txPacket->length);
2057
2058 #ifndef NDEBUG
2059 bool success =
2060 #endif
2061 txFifo.push(txPacket);
2062 assert(success);
2063
2064 /*
2065 * this following section is not tqo spec, but
2066 * functionally shouldn't be any different. normally,
2067 * the chip will wait til the transmit has occurred
2068 * before writing back the descriptor because it has
2069 * to wait to see that it was successfully transmitted
2070 * to decide whether to set CMDSTS_OK or not.
2071 * however, in the simulator since it is always
2072 * successfully transmitted, and writing it exactly to
2073 * spec would complicate the code, we just do it here
2074 */
2075
2076 cmdsts &= ~CMDSTS_OWN;
2077 cmdsts |= CMDSTS_OK;
2078
2079 DPRINTF(EthernetDesc,
2080 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2081 cmdsts, extsts);
2082
2083 txDmaFree = dmaDescFree;
2084 txDmaAddr = regs.txdp & 0x3fffffff;
2085 txDmaData = &cmdsts;
2086 if (is64bit) {
2087 txDmaAddr += offsetof(ns_desc64, cmdsts);
2088 txDmaLen =
2089 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2090 } else {
2091 txDmaAddr += offsetof(ns_desc32, cmdsts);
2092 txDmaLen =
2093 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2094 }
2095
2096 descDmaWrites++;
2097 descDmaWrBytes += txDmaLen;
2098
2099 transmit();
2100 txPacket = 0;
2101
2102 if (!txEnable) {
2103 DPRINTF(EthernetSM, "halting TX state machine\n");
2104 txState = txIdle;
2105 goto exit;
2106 } else
2107 txState = txAdvance;
2108
2109 if (doTxDmaWrite())
2110 goto exit;
2111 }
2112 } else {
2113 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2114 if (!txFifo.full()) {
2115 txState = txFragRead;
2116
2117 /*
2118 * The number of bytes transferred is either whatever
2119 * is left in the descriptor (txDescCnt), or if there
2120 * is not enough room in the fifo, just whatever room
2121 * is left in the fifo
2122 */
2123 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2124
2125 txDmaAddr = txFragPtr & 0x3fffffff;
2126 txDmaData = txPacketBufPtr;
2127 txDmaLen = txXferLen;
2128 txDmaFree = dmaDataFree;
2129
2130 if (doTxDmaRead())
2131 goto exit;
2132 } else {
2133 txState = txFifoBlock;
2134 transmit();
2135
2136 goto exit;
2137 }
2138
2139 }
2140 break;
2141
2142 case txFragRead:
2143 if (txDmaState != dmaIdle)
2144 goto exit;
2145
2146 txPacketBufPtr += txXferLen;
2147 txFragPtr += txXferLen;
2148 txDescCnt -= txXferLen;
2149 txFifo.reserve(txXferLen);
2150
2151 txState = txFifoBlock;
2152 break;
2153
2154 case txDescWrite:
2155 if (txDmaState != dmaIdle)
2156 goto exit;
2157
2158 if (cmdsts & CMDSTS_INTR)
2159 devIntrPost(ISR_TXDESC);
2160
2161 if (!txEnable) {
2162 DPRINTF(EthernetSM, "halting TX state machine\n");
2163 txState = txIdle;
2164 goto exit;
2165 } else
2166 txState = txAdvance;
2167 break;
2168
2169 case txAdvance:
2170 if (link == 0) {
2171 devIntrPost(ISR_TXIDLE);
2172 txState = txIdle;
2173 goto exit;
2174 } else {
2175 if (txDmaState != dmaIdle)
2176 goto exit;
2177 txState = txDescRead;
2178 regs.txdp = link;
2179 CTDD = false;
2180
2181 txDmaAddr = link & 0x3fffffff;
2182 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2183 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2184 txDmaFree = dmaDescFree;
2185
2186 if (doTxDmaRead())
2187 goto exit;
2188 }
2189 break;
2190
2191 default:
2192 panic("invalid state");
2193 }
2194
2195 DPRINTF(EthernetSM, "entering next txState=%s\n",
2196 NsTxStateStrings[txState]);
2197 goto next;
2198
2199 exit:
2200 /**
2201 * @todo do we want to schedule a future kick?
2202 */
2203 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2204 NsTxStateStrings[txState]);
2205
2206 if (clock && !txKickEvent.scheduled())
2207 txKickEvent.schedule(txKickTick);
2208 }
2209
2210 /**
2211 * Advance the EEPROM state machine
2212 * Called on rising edge of EEPROM clock bit in MEAR
2213 */
2214 void
2215 NSGigE::eepromKick()
2216 {
2217 switch (eepromState) {
2218
2219 case eepromStart:
2220
2221 // Wait for start bit
2222 if (regs.mear & MEAR_EEDI) {
2223 // Set up to get 2 opcode bits
2224 eepromState = eepromGetOpcode;
2225 eepromBitsToRx = 2;
2226 eepromOpcode = 0;
2227 }
2228 break;
2229
2230 case eepromGetOpcode:
2231 eepromOpcode <<= 1;
2232 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2233 --eepromBitsToRx;
2234
2235 // Done getting opcode
2236 if (eepromBitsToRx == 0) {
2237 if (eepromOpcode != EEPROM_READ)
2238 panic("only EEPROM reads are implemented!");
2239
2240 // Set up to get address
2241 eepromState = eepromGetAddress;
2242 eepromBitsToRx = 6;
2243 eepromAddress = 0;
2244 }
2245 break;
2246
2247 case eepromGetAddress:
2248 eepromAddress <<= 1;
2249 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2250 --eepromBitsToRx;
2251
2252 // Done getting address
2253 if (eepromBitsToRx == 0) {
2254
2255 if (eepromAddress >= EEPROM_SIZE)
2256 panic("EEPROM read access out of range!");
2257
2258 switch (eepromAddress) {
2259
2260 case EEPROM_PMATCH2_ADDR:
2261 eepromData = rom.perfectMatch[5];
2262 eepromData <<= 8;
2263 eepromData += rom.perfectMatch[4];
2264 break;
2265
2266 case EEPROM_PMATCH1_ADDR:
2267 eepromData = rom.perfectMatch[3];
2268 eepromData <<= 8;
2269 eepromData += rom.perfectMatch[2];
2270 break;
2271
2272 case EEPROM_PMATCH0_ADDR:
2273 eepromData = rom.perfectMatch[1];
2274 eepromData <<= 8;
2275 eepromData += rom.perfectMatch[0];
2276 break;
2277
2278 default:
2279 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2280 }
2281 // Set up to read data
2282 eepromState = eepromRead;
2283 eepromBitsToRx = 16;
2284
2285 // Clear data in bit
2286 regs.mear &= ~MEAR_EEDI;
2287 }
2288 break;
2289
2290 case eepromRead:
2291 // Clear Data Out bit
2292 regs.mear &= ~MEAR_EEDO;
2293 // Set bit to value of current EEPROM bit
2294 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2295
2296 eepromData <<= 1;
2297 --eepromBitsToRx;
2298
2299 // All done
2300 if (eepromBitsToRx == 0) {
2301 eepromState = eepromStart;
2302 }
2303 break;
2304
2305 default:
2306 panic("invalid EEPROM state");
2307 }
2308
2309 }
2310
2311 void
2312 NSGigE::transferDone()
2313 {
2314 if (txFifo.empty()) {
2315 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2316 return;
2317 }
2318
2319 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2320
2321 if (txEvent.scheduled())
2322 txEvent.reschedule(curTick + cycles(1));
2323 else
2324 txEvent.schedule(curTick + cycles(1));
2325 }
2326
2327 bool
2328 NSGigE::rxFilter(const EthPacketPtr &packet)
2329 {
2330 EthPtr eth = packet;
2331 bool drop = true;
2332 string type;
2333
2334 const EthAddr &dst = eth->dst();
2335 if (dst.unicast()) {
2336 // If we're accepting all unicast addresses
2337 if (acceptUnicast)
2338 drop = false;
2339
2340 // If we make a perfect match
2341 if (acceptPerfect && dst == rom.perfectMatch)
2342 drop = false;
2343
2344 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2345 drop = false;
2346
2347 } else if (dst.broadcast()) {
2348 // if we're accepting broadcasts
2349 if (acceptBroadcast)
2350 drop = false;
2351
2352 } else if (dst.multicast()) {
2353 // if we're accepting all multicasts
2354 if (acceptMulticast)
2355 drop = false;
2356
2357 // Multicast hashing faked - all packets accepted
2358 if (multicastHashEnable)
2359 drop = false;
2360 }
2361
2362 if (drop) {
2363 DPRINTF(Ethernet, "rxFilter drop\n");
2364 DDUMP(EthernetData, packet->data, packet->length);
2365 }
2366
2367 return drop;
2368 }
2369
2370 bool
2371 NSGigE::recvPacket(EthPacketPtr packet)
2372 {
2373 rxBytes += packet->length;
2374 rxPackets++;
2375
2376 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2377 rxFifo.avail());
2378
2379 if (!rxEnable) {
2380 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2381 return true;
2382 }
2383
2384 if (!rxFilterEnable) {
2385 DPRINTF(Ethernet,
2386 "receive packet filtering disabled . . . packet dropped\n");
2387 return true;
2388 }
2389
2390 if (rxFilter(packet)) {
2391 DPRINTF(Ethernet, "packet filtered...dropped\n");
2392 return true;
2393 }
2394
2395 if (rxFifo.avail() < packet->length) {
2396 #if TRACING_ON
2397 IpPtr ip(packet);
2398 TcpPtr tcp(ip);
2399 if (ip) {
2400 DPRINTF(Ethernet,
2401 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2402 ip->id());
2403 if (tcp) {
2404 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2405 }
2406 }
2407 #endif
2408 droppedPackets++;
2409 devIntrPost(ISR_RXORN);
2410 return false;
2411 }
2412
2413 rxFifo.push(packet);
2414
2415 rxKick();
2416 return true;
2417 }
2418
2419 //=====================================================================
2420 //
2421 //
2422 void
2423 NSGigE::serialize(ostream &os)
2424 {
2425 // Serialize the PciDev base class
2426 PciDev::serialize(os);
2427
2428 /*
2429 * Finalize any DMA events now.
2430 */
2431 // @todo will mem system save pending dma?
2432
2433 /*
2434 * Serialize the device registers
2435 */
2436 SERIALIZE_SCALAR(regs.command);
2437 SERIALIZE_SCALAR(regs.config);
2438 SERIALIZE_SCALAR(regs.mear);
2439 SERIALIZE_SCALAR(regs.ptscr);
2440 SERIALIZE_SCALAR(regs.isr);
2441 SERIALIZE_SCALAR(regs.imr);
2442 SERIALIZE_SCALAR(regs.ier);
2443 SERIALIZE_SCALAR(regs.ihr);
2444 SERIALIZE_SCALAR(regs.txdp);
2445 SERIALIZE_SCALAR(regs.txdp_hi);
2446 SERIALIZE_SCALAR(regs.txcfg);
2447 SERIALIZE_SCALAR(regs.gpior);
2448 SERIALIZE_SCALAR(regs.rxdp);
2449 SERIALIZE_SCALAR(regs.rxdp_hi);
2450 SERIALIZE_SCALAR(regs.rxcfg);
2451 SERIALIZE_SCALAR(regs.pqcr);
2452 SERIALIZE_SCALAR(regs.wcsr);
2453 SERIALIZE_SCALAR(regs.pcr);
2454 SERIALIZE_SCALAR(regs.rfcr);
2455 SERIALIZE_SCALAR(regs.rfdr);
2456 SERIALIZE_SCALAR(regs.brar);
2457 SERIALIZE_SCALAR(regs.brdr);
2458 SERIALIZE_SCALAR(regs.srr);
2459 SERIALIZE_SCALAR(regs.mibc);
2460 SERIALIZE_SCALAR(regs.vrcr);
2461 SERIALIZE_SCALAR(regs.vtcr);
2462 SERIALIZE_SCALAR(regs.vdr);
2463 SERIALIZE_SCALAR(regs.ccsr);
2464 SERIALIZE_SCALAR(regs.tbicr);
2465 SERIALIZE_SCALAR(regs.tbisr);
2466 SERIALIZE_SCALAR(regs.tanar);
2467 SERIALIZE_SCALAR(regs.tanlpar);
2468 SERIALIZE_SCALAR(regs.taner);
2469 SERIALIZE_SCALAR(regs.tesr);
2470
2471 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2472 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2473
2474 SERIALIZE_SCALAR(ioEnable);
2475
2476 /*
2477 * Serialize the data Fifos
2478 */
2479 rxFifo.serialize("rxFifo", os);
2480 txFifo.serialize("txFifo", os);
2481
2482 /*
2483 * Serialize the various helper variables
2484 */
2485 bool txPacketExists = txPacket;
2486 SERIALIZE_SCALAR(txPacketExists);
2487 if (txPacketExists) {
2488 txPacket->length = txPacketBufPtr - txPacket->data;
2489 txPacket->serialize("txPacket", os);
2490 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2491 SERIALIZE_SCALAR(txPktBufPtr);
2492 }
2493
2494 bool rxPacketExists = rxPacket;
2495 SERIALIZE_SCALAR(rxPacketExists);
2496 if (rxPacketExists) {
2497 rxPacket->serialize("rxPacket", os);
2498 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2499 SERIALIZE_SCALAR(rxPktBufPtr);
2500 }
2501
2502 SERIALIZE_SCALAR(txXferLen);
2503 SERIALIZE_SCALAR(rxXferLen);
2504
2505 /*
2506 * Serialize Cached Descriptors
2507 */
2508 SERIALIZE_SCALAR(rxDesc64.link);
2509 SERIALIZE_SCALAR(rxDesc64.bufptr);
2510 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2511 SERIALIZE_SCALAR(rxDesc64.extsts);
2512 SERIALIZE_SCALAR(txDesc64.link);
2513 SERIALIZE_SCALAR(txDesc64.bufptr);
2514 SERIALIZE_SCALAR(txDesc64.cmdsts);
2515 SERIALIZE_SCALAR(txDesc64.extsts);
2516 SERIALIZE_SCALAR(rxDesc32.link);
2517 SERIALIZE_SCALAR(rxDesc32.bufptr);
2518 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2519 SERIALIZE_SCALAR(rxDesc32.extsts);
2520 SERIALIZE_SCALAR(txDesc32.link);
2521 SERIALIZE_SCALAR(txDesc32.bufptr);
2522 SERIALIZE_SCALAR(txDesc32.cmdsts);
2523 SERIALIZE_SCALAR(txDesc32.extsts);
2524 SERIALIZE_SCALAR(extstsEnable);
2525
2526 /*
2527 * Serialize tx state machine
2528 */
2529 int txState = this->txState;
2530 SERIALIZE_SCALAR(txState);
2531 SERIALIZE_SCALAR(txEnable);
2532 SERIALIZE_SCALAR(CTDD);
2533 SERIALIZE_SCALAR(txFragPtr);
2534 SERIALIZE_SCALAR(txDescCnt);
2535 int txDmaState = this->txDmaState;
2536 SERIALIZE_SCALAR(txDmaState);
2537 SERIALIZE_SCALAR(txKickTick);
2538
2539 /*
2540 * Serialize rx state machine
2541 */
2542 int rxState = this->rxState;
2543 SERIALIZE_SCALAR(rxState);
2544 SERIALIZE_SCALAR(rxEnable);
2545 SERIALIZE_SCALAR(CRDD);
2546 SERIALIZE_SCALAR(rxPktBytes);
2547 SERIALIZE_SCALAR(rxFragPtr);
2548 SERIALIZE_SCALAR(rxDescCnt);
2549 int rxDmaState = this->rxDmaState;
2550 SERIALIZE_SCALAR(rxDmaState);
2551 SERIALIZE_SCALAR(rxKickTick);
2552
2553 /*
2554 * Serialize EEPROM state machine
2555 */
2556 int eepromState = this->eepromState;
2557 SERIALIZE_SCALAR(eepromState);
2558 SERIALIZE_SCALAR(eepromClk);
2559 SERIALIZE_SCALAR(eepromBitsToRx);
2560 SERIALIZE_SCALAR(eepromOpcode);
2561 SERIALIZE_SCALAR(eepromAddress);
2562 SERIALIZE_SCALAR(eepromData);
2563
2564 /*
2565 * If there's a pending transmit, store the time so we can
2566 * reschedule it later
2567 */
2568 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2569 SERIALIZE_SCALAR(transmitTick);
2570
2571 /*
2572 * receive address filter settings
2573 */
2574 SERIALIZE_SCALAR(rxFilterEnable);
2575 SERIALIZE_SCALAR(acceptBroadcast);
2576 SERIALIZE_SCALAR(acceptMulticast);
2577 SERIALIZE_SCALAR(acceptUnicast);
2578 SERIALIZE_SCALAR(acceptPerfect);
2579 SERIALIZE_SCALAR(acceptArp);
2580 SERIALIZE_SCALAR(multicastHashEnable);
2581
2582 /*
2583 * Keep track of pending interrupt status.
2584 */
2585 SERIALIZE_SCALAR(intrTick);
2586 SERIALIZE_SCALAR(cpuPendingIntr);
2587 Tick intrEventTick = 0;
2588 if (intrEvent)
2589 intrEventTick = intrEvent->when();
2590 SERIALIZE_SCALAR(intrEventTick);
2591
2592 }
2593
2594 void
2595 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2596 {
2597 // Unserialize the PciDev base class
2598 PciDev::unserialize(cp, section);
2599
2600 UNSERIALIZE_SCALAR(regs.command);
2601 UNSERIALIZE_SCALAR(regs.config);
2602 UNSERIALIZE_SCALAR(regs.mear);
2603 UNSERIALIZE_SCALAR(regs.ptscr);
2604 UNSERIALIZE_SCALAR(regs.isr);
2605 UNSERIALIZE_SCALAR(regs.imr);
2606 UNSERIALIZE_SCALAR(regs.ier);
2607 UNSERIALIZE_SCALAR(regs.ihr);
2608 UNSERIALIZE_SCALAR(regs.txdp);
2609 UNSERIALIZE_SCALAR(regs.txdp_hi);
2610 UNSERIALIZE_SCALAR(regs.txcfg);
2611 UNSERIALIZE_SCALAR(regs.gpior);
2612 UNSERIALIZE_SCALAR(regs.rxdp);
2613 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2614 UNSERIALIZE_SCALAR(regs.rxcfg);
2615 UNSERIALIZE_SCALAR(regs.pqcr);
2616 UNSERIALIZE_SCALAR(regs.wcsr);
2617 UNSERIALIZE_SCALAR(regs.pcr);
2618 UNSERIALIZE_SCALAR(regs.rfcr);
2619 UNSERIALIZE_SCALAR(regs.rfdr);
2620 UNSERIALIZE_SCALAR(regs.brar);
2621 UNSERIALIZE_SCALAR(regs.brdr);
2622 UNSERIALIZE_SCALAR(regs.srr);
2623 UNSERIALIZE_SCALAR(regs.mibc);
2624 UNSERIALIZE_SCALAR(regs.vrcr);
2625 UNSERIALIZE_SCALAR(regs.vtcr);
2626 UNSERIALIZE_SCALAR(regs.vdr);
2627 UNSERIALIZE_SCALAR(regs.ccsr);
2628 UNSERIALIZE_SCALAR(regs.tbicr);
2629 UNSERIALIZE_SCALAR(regs.tbisr);
2630 UNSERIALIZE_SCALAR(regs.tanar);
2631 UNSERIALIZE_SCALAR(regs.tanlpar);
2632 UNSERIALIZE_SCALAR(regs.taner);
2633 UNSERIALIZE_SCALAR(regs.tesr);
2634
2635 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2636 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2637
2638 UNSERIALIZE_SCALAR(ioEnable);
2639
2640 /*
2641 * unserialize the data fifos
2642 */
2643 rxFifo.unserialize("rxFifo", cp, section);
2644 txFifo.unserialize("txFifo", cp, section);
2645
2646 /*
2647 * unserialize the various helper variables
2648 */
2649 bool txPacketExists;
2650 UNSERIALIZE_SCALAR(txPacketExists);
2651 if (txPacketExists) {
2652 txPacket = new EthPacketData(16384);
2653 txPacket->unserialize("txPacket", cp, section);
2654 uint32_t txPktBufPtr;
2655 UNSERIALIZE_SCALAR(txPktBufPtr);
2656 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2657 } else
2658 txPacket = 0;
2659
2660 bool rxPacketExists;
2661 UNSERIALIZE_SCALAR(rxPacketExists);
2662 rxPacket = 0;
2663 if (rxPacketExists) {
2664 rxPacket = new EthPacketData(16384);
2665 rxPacket->unserialize("rxPacket", cp, section);
2666 uint32_t rxPktBufPtr;
2667 UNSERIALIZE_SCALAR(rxPktBufPtr);
2668 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2669 } else
2670 rxPacket = 0;
2671
2672 UNSERIALIZE_SCALAR(txXferLen);
2673 UNSERIALIZE_SCALAR(rxXferLen);
2674
2675 /*
2676 * Unserialize Cached Descriptors
2677 */
2678 UNSERIALIZE_SCALAR(rxDesc64.link);
2679 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2680 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2681 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2682 UNSERIALIZE_SCALAR(txDesc64.link);
2683 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2684 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2685 UNSERIALIZE_SCALAR(txDesc64.extsts);
2686 UNSERIALIZE_SCALAR(rxDesc32.link);
2687 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2688 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2689 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2690 UNSERIALIZE_SCALAR(txDesc32.link);
2691 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2692 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2693 UNSERIALIZE_SCALAR(txDesc32.extsts);
2694 UNSERIALIZE_SCALAR(extstsEnable);
2695
2696 /*
2697 * unserialize tx state machine
2698 */
2699 int txState;
2700 UNSERIALIZE_SCALAR(txState);
2701 this->txState = (TxState) txState;
2702 UNSERIALIZE_SCALAR(txEnable);
2703 UNSERIALIZE_SCALAR(CTDD);
2704 UNSERIALIZE_SCALAR(txFragPtr);
2705 UNSERIALIZE_SCALAR(txDescCnt);
2706 int txDmaState;
2707 UNSERIALIZE_SCALAR(txDmaState);
2708 this->txDmaState = (DmaState) txDmaState;
2709 UNSERIALIZE_SCALAR(txKickTick);
2710 if (txKickTick)
2711 txKickEvent.schedule(txKickTick);
2712
2713 /*
2714 * unserialize rx state machine
2715 */
2716 int rxState;
2717 UNSERIALIZE_SCALAR(rxState);
2718 this->rxState = (RxState) rxState;
2719 UNSERIALIZE_SCALAR(rxEnable);
2720 UNSERIALIZE_SCALAR(CRDD);
2721 UNSERIALIZE_SCALAR(rxPktBytes);
2722 UNSERIALIZE_SCALAR(rxFragPtr);
2723 UNSERIALIZE_SCALAR(rxDescCnt);
2724 int rxDmaState;
2725 UNSERIALIZE_SCALAR(rxDmaState);
2726 this->rxDmaState = (DmaState) rxDmaState;
2727 UNSERIALIZE_SCALAR(rxKickTick);
2728 if (rxKickTick)
2729 rxKickEvent.schedule(rxKickTick);
2730
2731 /*
2732 * Unserialize EEPROM state machine
2733 */
2734 int eepromState;
2735 UNSERIALIZE_SCALAR(eepromState);
2736 this->eepromState = (EEPROMState) eepromState;
2737 UNSERIALIZE_SCALAR(eepromClk);
2738 UNSERIALIZE_SCALAR(eepromBitsToRx);
2739 UNSERIALIZE_SCALAR(eepromOpcode);
2740 UNSERIALIZE_SCALAR(eepromAddress);
2741 UNSERIALIZE_SCALAR(eepromData);
2742
2743 /*
2744 * If there's a pending transmit, reschedule it now
2745 */
2746 Tick transmitTick;
2747 UNSERIALIZE_SCALAR(transmitTick);
2748 if (transmitTick)
2749 txEvent.schedule(curTick + transmitTick);
2750
2751 /*
2752 * unserialize receive address filter settings
2753 */
2754 UNSERIALIZE_SCALAR(rxFilterEnable);
2755 UNSERIALIZE_SCALAR(acceptBroadcast);
2756 UNSERIALIZE_SCALAR(acceptMulticast);
2757 UNSERIALIZE_SCALAR(acceptUnicast);
2758 UNSERIALIZE_SCALAR(acceptPerfect);
2759 UNSERIALIZE_SCALAR(acceptArp);
2760 UNSERIALIZE_SCALAR(multicastHashEnable);
2761
2762 /*
2763 * Keep track of pending interrupt status.
2764 */
2765 UNSERIALIZE_SCALAR(intrTick);
2766 UNSERIALIZE_SCALAR(cpuPendingIntr);
2767 Tick intrEventTick;
2768 UNSERIALIZE_SCALAR(intrEventTick);
2769 if (intrEventTick) {
2770 intrEvent = new IntrEvent(this, true);
2771 intrEvent->schedule(intrEventTick);
2772 }
2773 }
2774
2775 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2776
2777 SimObjectParam<EtherInt *> peer;
2778 SimObjectParam<NSGigE *> device;
2779
2780 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2781
2782 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2783
2784 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2785 INIT_PARAM(device, "Ethernet device of this interface")
2786
2787 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2788
2789 CREATE_SIM_OBJECT(NSGigEInt)
2790 {
2791 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2792
2793 EtherInt *p = (EtherInt *)peer;
2794 if (p) {
2795 dev_int->setPeer(p);
2796 p->setPeer(dev_int);
2797 }
2798
2799 return dev_int;
2800 }
2801
2802 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2803
2804
2805 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2806
2807 SimObjectParam<System *> system;
2808 SimObjectParam<Platform *> platform;
2809 SimObjectParam<PciConfigAll *> configspace;
2810 SimObjectParam<PciConfigData *> configdata;
2811 Param<uint32_t> pci_bus;
2812 Param<uint32_t> pci_dev;
2813 Param<uint32_t> pci_func;
2814 Param<Tick> pio_latency;
2815
2816 Param<Tick> clock;
2817 Param<bool> dma_desc_free;
2818 Param<bool> dma_data_free;
2819 Param<Tick> dma_read_delay;
2820 Param<Tick> dma_write_delay;
2821 Param<Tick> dma_read_factor;
2822 Param<Tick> dma_write_factor;
2823 Param<bool> dma_no_allocate;
2824 Param<Tick> intr_delay;
2825
2826 Param<Tick> rx_delay;
2827 Param<Tick> tx_delay;
2828 Param<uint32_t> rx_fifo_size;
2829 Param<uint32_t> tx_fifo_size;
2830
2831 Param<bool> rx_filter;
2832 Param<string> hardware_address;
2833 Param<bool> rx_thread;
2834 Param<bool> tx_thread;
2835 Param<bool> rss;
2836
2837 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2838
2839 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2840
2841 INIT_PARAM(system, "System pointer"),
2842 INIT_PARAM(platform, "Platform pointer"),
2843 INIT_PARAM(configspace, "PCI Configspace"),
2844 INIT_PARAM(configdata, "PCI Config data"),
2845 INIT_PARAM(pci_bus, "PCI bus ID"),
2846 INIT_PARAM(pci_dev, "PCI device number"),
2847 INIT_PARAM(pci_func, "PCI function code"),
2848 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2849 INIT_PARAM(clock, "State machine cycle time"),
2850
2851 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
2852 INIT_PARAM(dma_data_free, "DMA of Data is free"),
2853 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
2854 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
2855 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
2856 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
2857 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
2858 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2859
2860 INIT_PARAM(rx_delay, "Receive Delay"),
2861 INIT_PARAM(tx_delay, "Transmit Delay"),
2862 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
2863 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
2864
2865 INIT_PARAM(rx_filter, "Enable Receive Filter"),
2866 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
2867 INIT_PARAM(rx_thread, ""),
2868 INIT_PARAM(tx_thread, ""),
2869 INIT_PARAM(rss, "")
2870
2871 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2872
2873
2874 CREATE_SIM_OBJECT(NSGigE)
2875 {
2876 NSGigE::Params *params = new NSGigE::Params;
2877
2878 params->name = getInstanceName();
2879 params->platform = platform;
2880 params->system = system;
2881 params->configSpace = configspace;
2882 params->configData = configdata;
2883 params->busNum = pci_bus;
2884 params->deviceNum = pci_dev;
2885 params->functionNum = pci_func;
2886 params->pio_delay = pio_latency;
2887
2888 params->clock = clock;
2889 params->dma_desc_free = dma_desc_free;
2890 params->dma_data_free = dma_data_free;
2891 params->dma_read_delay = dma_read_delay;
2892 params->dma_write_delay = dma_write_delay;
2893 params->dma_read_factor = dma_read_factor;
2894 params->dma_write_factor = dma_write_factor;
2895 params->dma_no_allocate = dma_no_allocate;
2896 params->pio_delay = pio_latency;
2897 params->intr_delay = intr_delay;
2898
2899 params->rx_delay = rx_delay;
2900 params->tx_delay = tx_delay;
2901 params->rx_fifo_size = rx_fifo_size;
2902 params->tx_fifo_size = tx_fifo_size;
2903
2904 params->rx_filter = rx_filter;
2905 params->eaddr = hardware_address;
2906 params->rx_thread = rx_thread;
2907 params->tx_thread = tx_thread;
2908 params->rss = rss;
2909
2910 return new NSGigE(params);
2911 }
2912
2913 REGISTER_SIM_OBJECT("NSGigE", NSGigE)