Merge zizzer:/bk/newmem
[gem5.git] / src / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32 /** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36 #include <deque>
37 #include <string>
38
39 #include "base/inet.hh"
40 #include "cpu/thread_context.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/packet.hh"
45 #include "mem/packet_access.hh"
46 #include "sim/builder.hh"
47 #include "sim/debug.hh"
48 #include "sim/host.hh"
49 #include "sim/stats.hh"
50 #include "sim/system.hh"
51
52 const char *NsRxStateStrings[] =
53 {
54 "rxIdle",
55 "rxDescRefr",
56 "rxDescRead",
57 "rxFifoBlock",
58 "rxFragWrite",
59 "rxDescWrite",
60 "rxAdvance"
61 };
62
63 const char *NsTxStateStrings[] =
64 {
65 "txIdle",
66 "txDescRefr",
67 "txDescRead",
68 "txFifoBlock",
69 "txFragRead",
70 "txDescWrite",
71 "txAdvance"
72 };
73
74 const char *NsDmaState[] =
75 {
76 "dmaIdle",
77 "dmaReading",
78 "dmaWriting",
79 "dmaReadWaiting",
80 "dmaWriteWaiting"
81 };
82
83 using namespace std;
84 using namespace Net;
85 using namespace TheISA;
86
87 ///////////////////////////////////////////////////////////////////////
88 //
89 // NSGigE PCI Device
90 //
91 NSGigE::NSGigE(Params *p)
92 : PciDev(p), ioEnable(false),
93 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
94 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
95 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
96 clock(p->clock),
97 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
99 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
101 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
102 eepromOpcode(0), eepromAddress(0), eepromData(0),
103 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
104 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
105 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
106 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
107 rxDmaReadEvent(this), rxDmaWriteEvent(this),
108 txDmaReadEvent(this), txDmaWriteEvent(this),
109 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
110 txDelay(p->tx_delay), rxDelay(p->rx_delay),
111 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
112 txEvent(this), rxFilterEnable(p->rx_filter),
113 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
114 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
115 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
116 intrEvent(0), interface(0)
117 {
118
119
120 regsReset();
121 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
122
123 memset(&rxDesc32, 0, sizeof(rxDesc32));
124 memset(&txDesc32, 0, sizeof(txDesc32));
125 memset(&rxDesc64, 0, sizeof(rxDesc64));
126 memset(&txDesc64, 0, sizeof(txDesc64));
127 }
128
129 NSGigE::~NSGigE()
130 {}
131
132 void
133 NSGigE::regStats()
134 {
135 txBytes
136 .name(name() + ".txBytes")
137 .desc("Bytes Transmitted")
138 .prereq(txBytes)
139 ;
140
141 rxBytes
142 .name(name() + ".rxBytes")
143 .desc("Bytes Received")
144 .prereq(rxBytes)
145 ;
146
147 txPackets
148 .name(name() + ".txPackets")
149 .desc("Number of Packets Transmitted")
150 .prereq(txBytes)
151 ;
152
153 rxPackets
154 .name(name() + ".rxPackets")
155 .desc("Number of Packets Received")
156 .prereq(rxBytes)
157 ;
158
159 txIpChecksums
160 .name(name() + ".txIpChecksums")
161 .desc("Number of tx IP Checksums done by device")
162 .precision(0)
163 .prereq(txBytes)
164 ;
165
166 rxIpChecksums
167 .name(name() + ".rxIpChecksums")
168 .desc("Number of rx IP Checksums done by device")
169 .precision(0)
170 .prereq(rxBytes)
171 ;
172
173 txTcpChecksums
174 .name(name() + ".txTcpChecksums")
175 .desc("Number of tx TCP Checksums done by device")
176 .precision(0)
177 .prereq(txBytes)
178 ;
179
180 rxTcpChecksums
181 .name(name() + ".rxTcpChecksums")
182 .desc("Number of rx TCP Checksums done by device")
183 .precision(0)
184 .prereq(rxBytes)
185 ;
186
187 txUdpChecksums
188 .name(name() + ".txUdpChecksums")
189 .desc("Number of tx UDP Checksums done by device")
190 .precision(0)
191 .prereq(txBytes)
192 ;
193
194 rxUdpChecksums
195 .name(name() + ".rxUdpChecksums")
196 .desc("Number of rx UDP Checksums done by device")
197 .precision(0)
198 .prereq(rxBytes)
199 ;
200
201 descDmaReads
202 .name(name() + ".descDMAReads")
203 .desc("Number of descriptors the device read w/ DMA")
204 .precision(0)
205 ;
206
207 descDmaWrites
208 .name(name() + ".descDMAWrites")
209 .desc("Number of descriptors the device wrote w/ DMA")
210 .precision(0)
211 ;
212
213 descDmaRdBytes
214 .name(name() + ".descDmaReadBytes")
215 .desc("number of descriptor bytes read w/ DMA")
216 .precision(0)
217 ;
218
219 descDmaWrBytes
220 .name(name() + ".descDmaWriteBytes")
221 .desc("number of descriptor bytes write w/ DMA")
222 .precision(0)
223 ;
224
225 txBandwidth
226 .name(name() + ".txBandwidth")
227 .desc("Transmit Bandwidth (bits/s)")
228 .precision(0)
229 .prereq(txBytes)
230 ;
231
232 rxBandwidth
233 .name(name() + ".rxBandwidth")
234 .desc("Receive Bandwidth (bits/s)")
235 .precision(0)
236 .prereq(rxBytes)
237 ;
238
239 totBandwidth
240 .name(name() + ".totBandwidth")
241 .desc("Total Bandwidth (bits/s)")
242 .precision(0)
243 .prereq(totBytes)
244 ;
245
246 totPackets
247 .name(name() + ".totPackets")
248 .desc("Total Packets")
249 .precision(0)
250 .prereq(totBytes)
251 ;
252
253 totBytes
254 .name(name() + ".totBytes")
255 .desc("Total Bytes")
256 .precision(0)
257 .prereq(totBytes)
258 ;
259
260 totPacketRate
261 .name(name() + ".totPPS")
262 .desc("Total Tranmission Rate (packets/s)")
263 .precision(0)
264 .prereq(totBytes)
265 ;
266
267 txPacketRate
268 .name(name() + ".txPPS")
269 .desc("Packet Tranmission Rate (packets/s)")
270 .precision(0)
271 .prereq(txBytes)
272 ;
273
274 rxPacketRate
275 .name(name() + ".rxPPS")
276 .desc("Packet Reception Rate (packets/s)")
277 .precision(0)
278 .prereq(rxBytes)
279 ;
280
281 postedSwi
282 .name(name() + ".postedSwi")
283 .desc("number of software interrupts posted to CPU")
284 .precision(0)
285 ;
286
287 totalSwi
288 .name(name() + ".totalSwi")
289 .desc("total number of Swi written to ISR")
290 .precision(0)
291 ;
292
293 coalescedSwi
294 .name(name() + ".coalescedSwi")
295 .desc("average number of Swi's coalesced into each post")
296 .precision(0)
297 ;
298
299 postedRxIdle
300 .name(name() + ".postedRxIdle")
301 .desc("number of rxIdle interrupts posted to CPU")
302 .precision(0)
303 ;
304
305 totalRxIdle
306 .name(name() + ".totalRxIdle")
307 .desc("total number of RxIdle written to ISR")
308 .precision(0)
309 ;
310
311 coalescedRxIdle
312 .name(name() + ".coalescedRxIdle")
313 .desc("average number of RxIdle's coalesced into each post")
314 .precision(0)
315 ;
316
317 postedRxOk
318 .name(name() + ".postedRxOk")
319 .desc("number of RxOk interrupts posted to CPU")
320 .precision(0)
321 ;
322
323 totalRxOk
324 .name(name() + ".totalRxOk")
325 .desc("total number of RxOk written to ISR")
326 .precision(0)
327 ;
328
329 coalescedRxOk
330 .name(name() + ".coalescedRxOk")
331 .desc("average number of RxOk's coalesced into each post")
332 .precision(0)
333 ;
334
335 postedRxDesc
336 .name(name() + ".postedRxDesc")
337 .desc("number of RxDesc interrupts posted to CPU")
338 .precision(0)
339 ;
340
341 totalRxDesc
342 .name(name() + ".totalRxDesc")
343 .desc("total number of RxDesc written to ISR")
344 .precision(0)
345 ;
346
347 coalescedRxDesc
348 .name(name() + ".coalescedRxDesc")
349 .desc("average number of RxDesc's coalesced into each post")
350 .precision(0)
351 ;
352
353 postedTxOk
354 .name(name() + ".postedTxOk")
355 .desc("number of TxOk interrupts posted to CPU")
356 .precision(0)
357 ;
358
359 totalTxOk
360 .name(name() + ".totalTxOk")
361 .desc("total number of TxOk written to ISR")
362 .precision(0)
363 ;
364
365 coalescedTxOk
366 .name(name() + ".coalescedTxOk")
367 .desc("average number of TxOk's coalesced into each post")
368 .precision(0)
369 ;
370
371 postedTxIdle
372 .name(name() + ".postedTxIdle")
373 .desc("number of TxIdle interrupts posted to CPU")
374 .precision(0)
375 ;
376
377 totalTxIdle
378 .name(name() + ".totalTxIdle")
379 .desc("total number of TxIdle written to ISR")
380 .precision(0)
381 ;
382
383 coalescedTxIdle
384 .name(name() + ".coalescedTxIdle")
385 .desc("average number of TxIdle's coalesced into each post")
386 .precision(0)
387 ;
388
389 postedTxDesc
390 .name(name() + ".postedTxDesc")
391 .desc("number of TxDesc interrupts posted to CPU")
392 .precision(0)
393 ;
394
395 totalTxDesc
396 .name(name() + ".totalTxDesc")
397 .desc("total number of TxDesc written to ISR")
398 .precision(0)
399 ;
400
401 coalescedTxDesc
402 .name(name() + ".coalescedTxDesc")
403 .desc("average number of TxDesc's coalesced into each post")
404 .precision(0)
405 ;
406
407 postedRxOrn
408 .name(name() + ".postedRxOrn")
409 .desc("number of RxOrn posted to CPU")
410 .precision(0)
411 ;
412
413 totalRxOrn
414 .name(name() + ".totalRxOrn")
415 .desc("total number of RxOrn written to ISR")
416 .precision(0)
417 ;
418
419 coalescedRxOrn
420 .name(name() + ".coalescedRxOrn")
421 .desc("average number of RxOrn's coalesced into each post")
422 .precision(0)
423 ;
424
425 coalescedTotal
426 .name(name() + ".coalescedTotal")
427 .desc("average number of interrupts coalesced into each post")
428 .precision(0)
429 ;
430
431 postedInterrupts
432 .name(name() + ".postedInterrupts")
433 .desc("number of posts to CPU")
434 .precision(0)
435 ;
436
437 droppedPackets
438 .name(name() + ".droppedPackets")
439 .desc("number of packets dropped")
440 .precision(0)
441 ;
442
443 coalescedSwi = totalSwi / postedInterrupts;
444 coalescedRxIdle = totalRxIdle / postedInterrupts;
445 coalescedRxOk = totalRxOk / postedInterrupts;
446 coalescedRxDesc = totalRxDesc / postedInterrupts;
447 coalescedTxOk = totalTxOk / postedInterrupts;
448 coalescedTxIdle = totalTxIdle / postedInterrupts;
449 coalescedTxDesc = totalTxDesc / postedInterrupts;
450 coalescedRxOrn = totalRxOrn / postedInterrupts;
451
452 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
453 totalTxOk + totalTxIdle + totalTxDesc +
454 totalRxOrn) / postedInterrupts;
455
456 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
457 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
458 totBandwidth = txBandwidth + rxBandwidth;
459 totBytes = txBytes + rxBytes;
460 totPackets = txPackets + rxPackets;
461
462 txPacketRate = txPackets / simSeconds;
463 rxPacketRate = rxPackets / simSeconds;
464 }
465
466
467 /**
468 * This is to write to the PCI general configuration registers
469 */
470 Tick
471 NSGigE::writeConfig(PacketPtr pkt)
472 {
473 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
474 if (offset < PCI_DEVICE_SPECIFIC)
475 PciDev::writeConfig(pkt);
476 else
477 panic("Device specific PCI config space not implemented!\n");
478
479 switch (offset) {
480 // seems to work fine without all these PCI settings, but i
481 // put in the IO to double check, an assertion will fail if we
482 // need to properly implement it
483 case PCI_COMMAND:
484 if (config.data[offset] & PCI_CMD_IOSE)
485 ioEnable = true;
486 else
487 ioEnable = false;
488 break;
489 }
490 pkt->result = Packet::Success;
491 return configDelay;
492 }
493
494 /**
495 * This reads the device registers, which are detailed in the NS83820
496 * spec sheet
497 */
498 Tick
499 NSGigE::read(PacketPtr pkt)
500 {
501 assert(ioEnable);
502
503 pkt->allocate();
504
505 //The mask is to give you only the offset into the device register file
506 Addr daddr = pkt->getAddr() & 0xfff;
507 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
508 daddr, pkt->getAddr(), pkt->getSize());
509
510
511 // there are some reserved registers, you can see ns_gige_reg.h and
512 // the spec sheet for details
513 if (daddr > LAST && daddr <= RESERVED) {
514 panic("Accessing reserved register");
515 } else if (daddr > RESERVED && daddr <= 0x3FC) {
516 return readConfig(pkt);
517 } else if (daddr >= MIB_START && daddr <= MIB_END) {
518 // don't implement all the MIB's. hopefully the kernel
519 // doesn't actually DEPEND upon their values
520 // MIB are just hardware stats keepers
521 pkt->set<uint32_t>(0);
522 pkt->result = Packet::Success;
523 return pioDelay;
524 } else if (daddr > 0x3FC)
525 panic("Something is messed up!\n");
526
527 assert(pkt->getSize() == sizeof(uint32_t));
528 uint32_t &reg = *pkt->getPtr<uint32_t>();
529 uint16_t rfaddr;
530
531 switch (daddr) {
532 case CR:
533 reg = regs.command;
534 //these are supposed to be cleared on a read
535 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
536 break;
537
538 case CFGR:
539 reg = regs.config;
540 break;
541
542 case MEAR:
543 reg = regs.mear;
544 break;
545
546 case PTSCR:
547 reg = regs.ptscr;
548 break;
549
550 case ISR:
551 reg = regs.isr;
552 devIntrClear(ISR_ALL);
553 break;
554
555 case IMR:
556 reg = regs.imr;
557 break;
558
559 case IER:
560 reg = regs.ier;
561 break;
562
563 case IHR:
564 reg = regs.ihr;
565 break;
566
567 case TXDP:
568 reg = regs.txdp;
569 break;
570
571 case TXDP_HI:
572 reg = regs.txdp_hi;
573 break;
574
575 case TX_CFG:
576 reg = regs.txcfg;
577 break;
578
579 case GPIOR:
580 reg = regs.gpior;
581 break;
582
583 case RXDP:
584 reg = regs.rxdp;
585 break;
586
587 case RXDP_HI:
588 reg = regs.rxdp_hi;
589 break;
590
591 case RX_CFG:
592 reg = regs.rxcfg;
593 break;
594
595 case PQCR:
596 reg = regs.pqcr;
597 break;
598
599 case WCSR:
600 reg = regs.wcsr;
601 break;
602
603 case PCR:
604 reg = regs.pcr;
605 break;
606
607 // see the spec sheet for how RFCR and RFDR work
608 // basically, you write to RFCR to tell the machine
609 // what you want to do next, then you act upon RFDR,
610 // and the device will be prepared b/c of what you
611 // wrote to RFCR
612 case RFCR:
613 reg = regs.rfcr;
614 break;
615
616 case RFDR:
617 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
618 switch (rfaddr) {
619 // Read from perfect match ROM octets
620 case 0x000:
621 reg = rom.perfectMatch[1];
622 reg = reg << 8;
623 reg += rom.perfectMatch[0];
624 break;
625 case 0x002:
626 reg = rom.perfectMatch[3] << 8;
627 reg += rom.perfectMatch[2];
628 break;
629 case 0x004:
630 reg = rom.perfectMatch[5] << 8;
631 reg += rom.perfectMatch[4];
632 break;
633 default:
634 // Read filter hash table
635 if (rfaddr >= FHASH_ADDR &&
636 rfaddr < FHASH_ADDR + FHASH_SIZE) {
637
638 // Only word-aligned reads supported
639 if (rfaddr % 2)
640 panic("unaligned read from filter hash table!");
641
642 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
643 reg += rom.filterHash[rfaddr - FHASH_ADDR];
644 break;
645 }
646
647 panic("reading RFDR for something other than pattern"
648 " matching or hashing! %#x\n", rfaddr);
649 }
650 break;
651
652 case SRR:
653 reg = regs.srr;
654 break;
655
656 case MIBC:
657 reg = regs.mibc;
658 reg &= ~(MIBC_MIBS | MIBC_ACLR);
659 break;
660
661 case VRCR:
662 reg = regs.vrcr;
663 break;
664
665 case VTCR:
666 reg = regs.vtcr;
667 break;
668
669 case VDR:
670 reg = regs.vdr;
671 break;
672
673 case CCSR:
674 reg = regs.ccsr;
675 break;
676
677 case TBICR:
678 reg = regs.tbicr;
679 break;
680
681 case TBISR:
682 reg = regs.tbisr;
683 break;
684
685 case TANAR:
686 reg = regs.tanar;
687 break;
688
689 case TANLPAR:
690 reg = regs.tanlpar;
691 break;
692
693 case TANER:
694 reg = regs.taner;
695 break;
696
697 case TESR:
698 reg = regs.tesr;
699 break;
700
701 case M5REG:
702 reg = 0;
703 if (params()->rx_thread)
704 reg |= M5REG_RX_THREAD;
705 if (params()->tx_thread)
706 reg |= M5REG_TX_THREAD;
707 if (params()->rss)
708 reg |= M5REG_RSS;
709 break;
710
711 default:
712 panic("reading unimplemented register: addr=%#x", daddr);
713 }
714
715 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
716 daddr, reg, reg);
717
718 pkt->result = Packet::Success;
719 return pioDelay;
720 }
721
722 Tick
723 NSGigE::write(PacketPtr pkt)
724 {
725 assert(ioEnable);
726
727 Addr daddr = pkt->getAddr() & 0xfff;
728 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
729 daddr, pkt->getAddr(), pkt->getSize());
730
731 if (daddr > LAST && daddr <= RESERVED) {
732 panic("Accessing reserved register");
733 } else if (daddr > RESERVED && daddr <= 0x3FC) {
734 return writeConfig(pkt);
735 } else if (daddr > 0x3FC)
736 panic("Something is messed up!\n");
737
738 if (pkt->getSize() == sizeof(uint32_t)) {
739 uint32_t reg = pkt->get<uint32_t>();
740 uint16_t rfaddr;
741
742 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
743
744 switch (daddr) {
745 case CR:
746 regs.command = reg;
747 if (reg & CR_TXD) {
748 txEnable = false;
749 } else if (reg & CR_TXE) {
750 txEnable = true;
751
752 // the kernel is enabling the transmit machine
753 if (txState == txIdle)
754 txKick();
755 }
756
757 if (reg & CR_RXD) {
758 rxEnable = false;
759 } else if (reg & CR_RXE) {
760 rxEnable = true;
761
762 if (rxState == rxIdle)
763 rxKick();
764 }
765
766 if (reg & CR_TXR)
767 txReset();
768
769 if (reg & CR_RXR)
770 rxReset();
771
772 if (reg & CR_SWI)
773 devIntrPost(ISR_SWI);
774
775 if (reg & CR_RST) {
776 txReset();
777 rxReset();
778
779 regsReset();
780 }
781 break;
782
783 case CFGR:
784 if (reg & CFGR_LNKSTS ||
785 reg & CFGR_SPDSTS ||
786 reg & CFGR_DUPSTS ||
787 reg & CFGR_RESERVED ||
788 reg & CFGR_T64ADDR ||
789 reg & CFGR_PCI64_DET)
790
791 // First clear all writable bits
792 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
793 CFGR_RESERVED | CFGR_T64ADDR |
794 CFGR_PCI64_DET;
795 // Now set the appropriate writable bits
796 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
797 CFGR_RESERVED | CFGR_T64ADDR |
798 CFGR_PCI64_DET);
799
800 // all these #if 0's are because i don't THINK the kernel needs to
801 // have these implemented. if there is a problem relating to one of
802 // these, you may need to add functionality in.
803 if (reg & CFGR_TBI_EN) ;
804 if (reg & CFGR_MODE_1000) ;
805
806 if (reg & CFGR_AUTO_1000)
807 panic("CFGR_AUTO_1000 not implemented!\n");
808
809 if (reg & CFGR_PINT_DUPSTS ||
810 reg & CFGR_PINT_LNKSTS ||
811 reg & CFGR_PINT_SPDSTS)
812 ;
813
814 if (reg & CFGR_TMRTEST) ;
815 if (reg & CFGR_MRM_DIS) ;
816 if (reg & CFGR_MWI_DIS) ;
817
818 if (reg & CFGR_T64ADDR) ;
819 // panic("CFGR_T64ADDR is read only register!\n");
820
821 if (reg & CFGR_PCI64_DET)
822 panic("CFGR_PCI64_DET is read only register!\n");
823
824 if (reg & CFGR_DATA64_EN) ;
825 if (reg & CFGR_M64ADDR) ;
826 if (reg & CFGR_PHY_RST) ;
827 if (reg & CFGR_PHY_DIS) ;
828
829 if (reg & CFGR_EXTSTS_EN)
830 extstsEnable = true;
831 else
832 extstsEnable = false;
833
834 if (reg & CFGR_REQALG) ;
835 if (reg & CFGR_SB) ;
836 if (reg & CFGR_POW) ;
837 if (reg & CFGR_EXD) ;
838 if (reg & CFGR_PESEL) ;
839 if (reg & CFGR_BROM_DIS) ;
840 if (reg & CFGR_EXT_125) ;
841 if (reg & CFGR_BEM) ;
842 break;
843
844 case MEAR:
845 // Clear writable bits
846 regs.mear &= MEAR_EEDO;
847 // Set appropriate writable bits
848 regs.mear |= reg & ~MEAR_EEDO;
849
850 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
851 // even though it could get it through RFDR
852 if (reg & MEAR_EESEL) {
853 // Rising edge of clock
854 if (reg & MEAR_EECLK && !eepromClk)
855 eepromKick();
856 }
857 else {
858 eepromState = eepromStart;
859 regs.mear &= ~MEAR_EEDI;
860 }
861
862 eepromClk = reg & MEAR_EECLK;
863
864 // since phy is completely faked, MEAR_MD* don't matter
865 if (reg & MEAR_MDIO) ;
866 if (reg & MEAR_MDDIR) ;
867 if (reg & MEAR_MDC) ;
868 break;
869
870 case PTSCR:
871 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
872 // these control BISTs for various parts of chip - we
873 // don't care or do just fake that the BIST is done
874 if (reg & PTSCR_RBIST_EN)
875 regs.ptscr |= PTSCR_RBIST_DONE;
876 if (reg & PTSCR_EEBIST_EN)
877 regs.ptscr &= ~PTSCR_EEBIST_EN;
878 if (reg & PTSCR_EELOAD_EN)
879 regs.ptscr &= ~PTSCR_EELOAD_EN;
880 break;
881
882 case ISR: /* writing to the ISR has no effect */
883 panic("ISR is a read only register!\n");
884
885 case IMR:
886 regs.imr = reg;
887 devIntrChangeMask();
888 break;
889
890 case IER:
891 regs.ier = reg;
892 break;
893
894 case IHR:
895 regs.ihr = reg;
896 /* not going to implement real interrupt holdoff */
897 break;
898
899 case TXDP:
900 regs.txdp = (reg & 0xFFFFFFFC);
901 assert(txState == txIdle);
902 CTDD = false;
903 break;
904
905 case TXDP_HI:
906 regs.txdp_hi = reg;
907 break;
908
909 case TX_CFG:
910 regs.txcfg = reg;
911 #if 0
912 if (reg & TX_CFG_CSI) ;
913 if (reg & TX_CFG_HBI) ;
914 if (reg & TX_CFG_MLB) ;
915 if (reg & TX_CFG_ATP) ;
916 if (reg & TX_CFG_ECRETRY) {
917 /*
918 * this could easily be implemented, but considering
919 * the network is just a fake pipe, wouldn't make
920 * sense to do this
921 */
922 }
923
924 if (reg & TX_CFG_BRST_DIS) ;
925 #endif
926
927 #if 0
928 /* we handle our own DMA, ignore the kernel's exhortations */
929 if (reg & TX_CFG_MXDMA) ;
930 #endif
931
932 // also, we currently don't care about fill/drain
933 // thresholds though this may change in the future with
934 // more realistic networks or a driver which changes it
935 // according to feedback
936
937 break;
938
939 case GPIOR:
940 // Only write writable bits
941 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
942 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
943 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
944 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
945 /* these just control general purpose i/o pins, don't matter */
946 break;
947
948 case RXDP:
949 regs.rxdp = reg;
950 CRDD = false;
951 break;
952
953 case RXDP_HI:
954 regs.rxdp_hi = reg;
955 break;
956
957 case RX_CFG:
958 regs.rxcfg = reg;
959 #if 0
960 if (reg & RX_CFG_AEP) ;
961 if (reg & RX_CFG_ARP) ;
962 if (reg & RX_CFG_STRIPCRC) ;
963 if (reg & RX_CFG_RX_RD) ;
964 if (reg & RX_CFG_ALP) ;
965 if (reg & RX_CFG_AIRL) ;
966
967 /* we handle our own DMA, ignore what kernel says about it */
968 if (reg & RX_CFG_MXDMA) ;
969
970 //also, we currently don't care about fill/drain thresholds
971 //though this may change in the future with more realistic
972 //networks or a driver which changes it according to feedback
973 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
974 #endif
975 break;
976
977 case PQCR:
978 /* there is no priority queueing used in the linux 2.6 driver */
979 regs.pqcr = reg;
980 break;
981
982 case WCSR:
983 /* not going to implement wake on LAN */
984 regs.wcsr = reg;
985 break;
986
987 case PCR:
988 /* not going to implement pause control */
989 regs.pcr = reg;
990 break;
991
992 case RFCR:
993 regs.rfcr = reg;
994
995 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
996 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
997 acceptMulticast = (reg & RFCR_AAM) ? true : false;
998 acceptUnicast = (reg & RFCR_AAU) ? true : false;
999 acceptPerfect = (reg & RFCR_APM) ? true : false;
1000 acceptArp = (reg & RFCR_AARP) ? true : false;
1001 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1002
1003 #if 0
1004 if (reg & RFCR_APAT)
1005 panic("RFCR_APAT not implemented!\n");
1006 #endif
1007 if (reg & RFCR_UHEN)
1008 panic("Unicast hash filtering not used by drivers!\n");
1009
1010 if (reg & RFCR_ULM)
1011 panic("RFCR_ULM not implemented!\n");
1012
1013 break;
1014
1015 case RFDR:
1016 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1017 switch (rfaddr) {
1018 case 0x000:
1019 rom.perfectMatch[0] = (uint8_t)reg;
1020 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1021 break;
1022 case 0x002:
1023 rom.perfectMatch[2] = (uint8_t)reg;
1024 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1025 break;
1026 case 0x004:
1027 rom.perfectMatch[4] = (uint8_t)reg;
1028 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1029 break;
1030 default:
1031
1032 if (rfaddr >= FHASH_ADDR &&
1033 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1034
1035 // Only word-aligned writes supported
1036 if (rfaddr % 2)
1037 panic("unaligned write to filter hash table!");
1038
1039 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1040 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1041 = (uint8_t)(reg >> 8);
1042 break;
1043 }
1044 panic("writing RFDR for something other than pattern matching\
1045 or hashing! %#x\n", rfaddr);
1046 }
1047
1048 case BRAR:
1049 regs.brar = reg;
1050 break;
1051
1052 case BRDR:
1053 panic("the driver never uses BRDR, something is wrong!\n");
1054
1055 case SRR:
1056 panic("SRR is read only register!\n");
1057
1058 case MIBC:
1059 panic("the driver never uses MIBC, something is wrong!\n");
1060
1061 case VRCR:
1062 regs.vrcr = reg;
1063 break;
1064
1065 case VTCR:
1066 regs.vtcr = reg;
1067 break;
1068
1069 case VDR:
1070 panic("the driver never uses VDR, something is wrong!\n");
1071
1072 case CCSR:
1073 /* not going to implement clockrun stuff */
1074 regs.ccsr = reg;
1075 break;
1076
1077 case TBICR:
1078 regs.tbicr = reg;
1079 if (reg & TBICR_MR_LOOPBACK)
1080 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1081
1082 if (reg & TBICR_MR_AN_ENABLE) {
1083 regs.tanlpar = regs.tanar;
1084 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1085 }
1086
1087 #if 0
1088 if (reg & TBICR_MR_RESTART_AN) ;
1089 #endif
1090
1091 break;
1092
1093 case TBISR:
1094 panic("TBISR is read only register!\n");
1095
1096 case TANAR:
1097 // Only write the writable bits
1098 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1099 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1100
1101 // Pause capability unimplemented
1102 #if 0
1103 if (reg & TANAR_PS2) ;
1104 if (reg & TANAR_PS1) ;
1105 #endif
1106
1107 break;
1108
1109 case TANLPAR:
1110 panic("this should only be written to by the fake phy!\n");
1111
1112 case TANER:
1113 panic("TANER is read only register!\n");
1114
1115 case TESR:
1116 regs.tesr = reg;
1117 break;
1118
1119 default:
1120 panic("invalid register access daddr=%#x", daddr);
1121 }
1122 } else {
1123 panic("Invalid Request Size");
1124 }
1125 pkt->result = Packet::Success;
1126 return pioDelay;
1127 }
1128
1129 void
1130 NSGigE::devIntrPost(uint32_t interrupts)
1131 {
1132 if (interrupts & ISR_RESERVE)
1133 panic("Cannot set a reserved interrupt");
1134
1135 if (interrupts & ISR_NOIMPL)
1136 warn("interrupt not implemented %#x\n", interrupts);
1137
1138 interrupts &= ISR_IMPL;
1139 regs.isr |= interrupts;
1140
1141 if (interrupts & regs.imr) {
1142 if (interrupts & ISR_SWI) {
1143 totalSwi++;
1144 }
1145 if (interrupts & ISR_RXIDLE) {
1146 totalRxIdle++;
1147 }
1148 if (interrupts & ISR_RXOK) {
1149 totalRxOk++;
1150 }
1151 if (interrupts & ISR_RXDESC) {
1152 totalRxDesc++;
1153 }
1154 if (interrupts & ISR_TXOK) {
1155 totalTxOk++;
1156 }
1157 if (interrupts & ISR_TXIDLE) {
1158 totalTxIdle++;
1159 }
1160 if (interrupts & ISR_TXDESC) {
1161 totalTxDesc++;
1162 }
1163 if (interrupts & ISR_RXORN) {
1164 totalRxOrn++;
1165 }
1166 }
1167
1168 DPRINTF(EthernetIntr,
1169 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1170 interrupts, regs.isr, regs.imr);
1171
1172 if ((regs.isr & regs.imr)) {
1173 Tick when = curTick;
1174 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1175 when += intrDelay;
1176 cpuIntrPost(when);
1177 }
1178 }
1179
1180 /* writing this interrupt counting stats inside this means that this function
1181 is now limited to being used to clear all interrupts upon the kernel
1182 reading isr and servicing. just telling you in case you were thinking
1183 of expanding use.
1184 */
1185 void
1186 NSGigE::devIntrClear(uint32_t interrupts)
1187 {
1188 if (interrupts & ISR_RESERVE)
1189 panic("Cannot clear a reserved interrupt");
1190
1191 if (regs.isr & regs.imr & ISR_SWI) {
1192 postedSwi++;
1193 }
1194 if (regs.isr & regs.imr & ISR_RXIDLE) {
1195 postedRxIdle++;
1196 }
1197 if (regs.isr & regs.imr & ISR_RXOK) {
1198 postedRxOk++;
1199 }
1200 if (regs.isr & regs.imr & ISR_RXDESC) {
1201 postedRxDesc++;
1202 }
1203 if (regs.isr & regs.imr & ISR_TXOK) {
1204 postedTxOk++;
1205 }
1206 if (regs.isr & regs.imr & ISR_TXIDLE) {
1207 postedTxIdle++;
1208 }
1209 if (regs.isr & regs.imr & ISR_TXDESC) {
1210 postedTxDesc++;
1211 }
1212 if (regs.isr & regs.imr & ISR_RXORN) {
1213 postedRxOrn++;
1214 }
1215
1216 if (regs.isr & regs.imr & ISR_IMPL)
1217 postedInterrupts++;
1218
1219 interrupts &= ~ISR_NOIMPL;
1220 regs.isr &= ~interrupts;
1221
1222 DPRINTF(EthernetIntr,
1223 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1224 interrupts, regs.isr, regs.imr);
1225
1226 if (!(regs.isr & regs.imr))
1227 cpuIntrClear();
1228 }
1229
1230 void
1231 NSGigE::devIntrChangeMask()
1232 {
1233 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1234 regs.isr, regs.imr, regs.isr & regs.imr);
1235
1236 if (regs.isr & regs.imr)
1237 cpuIntrPost(curTick);
1238 else
1239 cpuIntrClear();
1240 }
1241
1242 void
1243 NSGigE::cpuIntrPost(Tick when)
1244 {
1245 // If the interrupt you want to post is later than an interrupt
1246 // already scheduled, just let it post in the coming one and don't
1247 // schedule another.
1248 // HOWEVER, must be sure that the scheduled intrTick is in the
1249 // future (this was formerly the source of a bug)
1250 /**
1251 * @todo this warning should be removed and the intrTick code should
1252 * be fixed.
1253 */
1254 assert(when >= curTick);
1255 assert(intrTick >= curTick || intrTick == 0);
1256 if (when > intrTick && intrTick != 0) {
1257 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1258 intrTick);
1259 return;
1260 }
1261
1262 intrTick = when;
1263 if (intrTick < curTick) {
1264 debug_break();
1265 intrTick = curTick;
1266 }
1267
1268 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1269 intrTick);
1270
1271 if (intrEvent)
1272 intrEvent->squash();
1273 intrEvent = new IntrEvent(this, true);
1274 intrEvent->schedule(intrTick);
1275 }
1276
1277 void
1278 NSGigE::cpuInterrupt()
1279 {
1280 assert(intrTick == curTick);
1281
1282 // Whether or not there's a pending interrupt, we don't care about
1283 // it anymore
1284 intrEvent = 0;
1285 intrTick = 0;
1286
1287 // Don't send an interrupt if there's already one
1288 if (cpuPendingIntr) {
1289 DPRINTF(EthernetIntr,
1290 "would send an interrupt now, but there's already pending\n");
1291 } else {
1292 // Send interrupt
1293 cpuPendingIntr = true;
1294
1295 DPRINTF(EthernetIntr, "posting interrupt\n");
1296 intrPost();
1297 }
1298 }
1299
1300 void
1301 NSGigE::cpuIntrClear()
1302 {
1303 if (!cpuPendingIntr)
1304 return;
1305
1306 if (intrEvent) {
1307 intrEvent->squash();
1308 intrEvent = 0;
1309 }
1310
1311 intrTick = 0;
1312
1313 cpuPendingIntr = false;
1314
1315 DPRINTF(EthernetIntr, "clearing interrupt\n");
1316 intrClear();
1317 }
1318
1319 bool
1320 NSGigE::cpuIntrPending() const
1321 { return cpuPendingIntr; }
1322
1323 void
1324 NSGigE::txReset()
1325 {
1326
1327 DPRINTF(Ethernet, "transmit reset\n");
1328
1329 CTDD = false;
1330 txEnable = false;;
1331 txFragPtr = 0;
1332 assert(txDescCnt == 0);
1333 txFifo.clear();
1334 txState = txIdle;
1335 assert(txDmaState == dmaIdle);
1336 }
1337
1338 void
1339 NSGigE::rxReset()
1340 {
1341 DPRINTF(Ethernet, "receive reset\n");
1342
1343 CRDD = false;
1344 assert(rxPktBytes == 0);
1345 rxEnable = false;
1346 rxFragPtr = 0;
1347 assert(rxDescCnt == 0);
1348 assert(rxDmaState == dmaIdle);
1349 rxFifo.clear();
1350 rxState = rxIdle;
1351 }
1352
1353 void
1354 NSGigE::regsReset()
1355 {
1356 memset(&regs, 0, sizeof(regs));
1357 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1358 regs.mear = 0x12;
1359 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1360 // fill threshold to 32 bytes
1361 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1362 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1363 regs.mibc = MIBC_FRZ;
1364 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1365 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1366 regs.brar = 0xffffffff;
1367
1368 extstsEnable = false;
1369 acceptBroadcast = false;
1370 acceptMulticast = false;
1371 acceptUnicast = false;
1372 acceptPerfect = false;
1373 acceptArp = false;
1374 }
1375
1376 bool
1377 NSGigE::doRxDmaRead()
1378 {
1379 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1380 rxDmaState = dmaReading;
1381
1382 if (dmaPending() || getState() != Running)
1383 rxDmaState = dmaReadWaiting;
1384 else
1385 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1386
1387 return true;
1388 }
1389
1390 void
1391 NSGigE::rxDmaReadDone()
1392 {
1393 assert(rxDmaState == dmaReading);
1394 rxDmaState = dmaIdle;
1395
1396 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1397 rxDmaAddr, rxDmaLen);
1398 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1399
1400 // If the transmit state machine has a pending DMA, let it go first
1401 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1402 txKick();
1403
1404 rxKick();
1405 }
1406
1407 bool
1408 NSGigE::doRxDmaWrite()
1409 {
1410 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1411 rxDmaState = dmaWriting;
1412
1413 if (dmaPending() || getState() != Running)
1414 rxDmaState = dmaWriteWaiting;
1415 else
1416 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1417 return true;
1418 }
1419
1420 void
1421 NSGigE::rxDmaWriteDone()
1422 {
1423 assert(rxDmaState == dmaWriting);
1424 rxDmaState = dmaIdle;
1425
1426 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1427 rxDmaAddr, rxDmaLen);
1428 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1429
1430 // If the transmit state machine has a pending DMA, let it go first
1431 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1432 txKick();
1433
1434 rxKick();
1435 }
1436
1437 void
1438 NSGigE::rxKick()
1439 {
1440 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1441
1442 DPRINTF(EthernetSM,
1443 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1444 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1445
1446 Addr link, bufptr;
1447 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1448 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1449
1450 next:
1451 if (clock) {
1452 if (rxKickTick > curTick) {
1453 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1454 rxKickTick);
1455
1456 goto exit;
1457 }
1458
1459 // Go to the next state machine clock tick.
1460 rxKickTick = curTick + cycles(1);
1461 }
1462
1463 switch(rxDmaState) {
1464 case dmaReadWaiting:
1465 if (doRxDmaRead())
1466 goto exit;
1467 break;
1468 case dmaWriteWaiting:
1469 if (doRxDmaWrite())
1470 goto exit;
1471 break;
1472 default:
1473 break;
1474 }
1475
1476 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1477 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1478
1479 // see state machine from spec for details
1480 // the way this works is, if you finish work on one state and can
1481 // go directly to another, you do that through jumping to the
1482 // label "next". however, if you have intermediate work, like DMA
1483 // so that you can't go to the next state yet, you go to exit and
1484 // exit the loop. however, when the DMA is done it will trigger
1485 // an event and come back to this loop.
1486 switch (rxState) {
1487 case rxIdle:
1488 if (!rxEnable) {
1489 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1490 goto exit;
1491 }
1492
1493 if (CRDD) {
1494 rxState = rxDescRefr;
1495
1496 rxDmaAddr = regs.rxdp & 0x3fffffff;
1497 rxDmaData =
1498 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1499 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1500 rxDmaFree = dmaDescFree;
1501
1502 descDmaReads++;
1503 descDmaRdBytes += rxDmaLen;
1504
1505 if (doRxDmaRead())
1506 goto exit;
1507 } else {
1508 rxState = rxDescRead;
1509
1510 rxDmaAddr = regs.rxdp & 0x3fffffff;
1511 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1512 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1513 rxDmaFree = dmaDescFree;
1514
1515 descDmaReads++;
1516 descDmaRdBytes += rxDmaLen;
1517
1518 if (doRxDmaRead())
1519 goto exit;
1520 }
1521 break;
1522
1523 case rxDescRefr:
1524 if (rxDmaState != dmaIdle)
1525 goto exit;
1526
1527 rxState = rxAdvance;
1528 break;
1529
1530 case rxDescRead:
1531 if (rxDmaState != dmaIdle)
1532 goto exit;
1533
1534 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1535 regs.rxdp & 0x3fffffff);
1536 DPRINTF(EthernetDesc,
1537 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1538 link, bufptr, cmdsts, extsts);
1539
1540 if (cmdsts & CMDSTS_OWN) {
1541 devIntrPost(ISR_RXIDLE);
1542 rxState = rxIdle;
1543 goto exit;
1544 } else {
1545 rxState = rxFifoBlock;
1546 rxFragPtr = bufptr;
1547 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1548 }
1549 break;
1550
1551 case rxFifoBlock:
1552 if (!rxPacket) {
1553 /**
1554 * @todo in reality, we should be able to start processing
1555 * the packet as it arrives, and not have to wait for the
1556 * full packet ot be in the receive fifo.
1557 */
1558 if (rxFifo.empty())
1559 goto exit;
1560
1561 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1562
1563 // If we don't have a packet, grab a new one from the fifo.
1564 rxPacket = rxFifo.front();
1565 rxPktBytes = rxPacket->length;
1566 rxPacketBufPtr = rxPacket->data;
1567
1568 #if TRACING_ON
1569 if (DTRACE(Ethernet)) {
1570 IpPtr ip(rxPacket);
1571 if (ip) {
1572 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1573 TcpPtr tcp(ip);
1574 if (tcp) {
1575 DPRINTF(Ethernet,
1576 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1577 tcp->sport(), tcp->dport(), tcp->seq(),
1578 tcp->ack());
1579 }
1580 }
1581 }
1582 #endif
1583
1584 // sanity check - i think the driver behaves like this
1585 assert(rxDescCnt >= rxPktBytes);
1586 rxFifo.pop();
1587 }
1588
1589
1590 // dont' need the && rxDescCnt > 0 if driver sanity check
1591 // above holds
1592 if (rxPktBytes > 0) {
1593 rxState = rxFragWrite;
1594 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1595 // check holds
1596 rxXferLen = rxPktBytes;
1597
1598 rxDmaAddr = rxFragPtr & 0x3fffffff;
1599 rxDmaData = rxPacketBufPtr;
1600 rxDmaLen = rxXferLen;
1601 rxDmaFree = dmaDataFree;
1602
1603 if (doRxDmaWrite())
1604 goto exit;
1605
1606 } else {
1607 rxState = rxDescWrite;
1608
1609 //if (rxPktBytes == 0) { /* packet is done */
1610 assert(rxPktBytes == 0);
1611 DPRINTF(EthernetSM, "done with receiving packet\n");
1612
1613 cmdsts |= CMDSTS_OWN;
1614 cmdsts &= ~CMDSTS_MORE;
1615 cmdsts |= CMDSTS_OK;
1616 cmdsts &= 0xffff0000;
1617 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1618
1619 #if 0
1620 /*
1621 * all the driver uses these are for its own stats keeping
1622 * which we don't care about, aren't necessary for
1623 * functionality and doing this would just slow us down.
1624 * if they end up using this in a later version for
1625 * functional purposes, just undef
1626 */
1627 if (rxFilterEnable) {
1628 cmdsts &= ~CMDSTS_DEST_MASK;
1629 const EthAddr &dst = rxFifoFront()->dst();
1630 if (dst->unicast())
1631 cmdsts |= CMDSTS_DEST_SELF;
1632 if (dst->multicast())
1633 cmdsts |= CMDSTS_DEST_MULTI;
1634 if (dst->broadcast())
1635 cmdsts |= CMDSTS_DEST_MASK;
1636 }
1637 #endif
1638
1639 IpPtr ip(rxPacket);
1640 if (extstsEnable && ip) {
1641 extsts |= EXTSTS_IPPKT;
1642 rxIpChecksums++;
1643 if (cksum(ip) != 0) {
1644 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1645 extsts |= EXTSTS_IPERR;
1646 }
1647 TcpPtr tcp(ip);
1648 UdpPtr udp(ip);
1649 if (tcp) {
1650 extsts |= EXTSTS_TCPPKT;
1651 rxTcpChecksums++;
1652 if (cksum(tcp) != 0) {
1653 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1654 extsts |= EXTSTS_TCPERR;
1655
1656 }
1657 } else if (udp) {
1658 extsts |= EXTSTS_UDPPKT;
1659 rxUdpChecksums++;
1660 if (cksum(udp) != 0) {
1661 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1662 extsts |= EXTSTS_UDPERR;
1663 }
1664 }
1665 }
1666 rxPacket = 0;
1667
1668 /*
1669 * the driver seems to always receive into desc buffers
1670 * of size 1514, so you never have a pkt that is split
1671 * into multiple descriptors on the receive side, so
1672 * i don't implement that case, hence the assert above.
1673 */
1674
1675 DPRINTF(EthernetDesc,
1676 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1677 regs.rxdp & 0x3fffffff);
1678 DPRINTF(EthernetDesc,
1679 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1680 link, bufptr, cmdsts, extsts);
1681
1682 rxDmaAddr = regs.rxdp & 0x3fffffff;
1683 rxDmaData = &cmdsts;
1684 if (is64bit) {
1685 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1686 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1687 } else {
1688 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1689 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1690 }
1691 rxDmaFree = dmaDescFree;
1692
1693 descDmaWrites++;
1694 descDmaWrBytes += rxDmaLen;
1695
1696 if (doRxDmaWrite())
1697 goto exit;
1698 }
1699 break;
1700
1701 case rxFragWrite:
1702 if (rxDmaState != dmaIdle)
1703 goto exit;
1704
1705 rxPacketBufPtr += rxXferLen;
1706 rxFragPtr += rxXferLen;
1707 rxPktBytes -= rxXferLen;
1708
1709 rxState = rxFifoBlock;
1710 break;
1711
1712 case rxDescWrite:
1713 if (rxDmaState != dmaIdle)
1714 goto exit;
1715
1716 assert(cmdsts & CMDSTS_OWN);
1717
1718 assert(rxPacket == 0);
1719 devIntrPost(ISR_RXOK);
1720
1721 if (cmdsts & CMDSTS_INTR)
1722 devIntrPost(ISR_RXDESC);
1723
1724 if (!rxEnable) {
1725 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1726 rxState = rxIdle;
1727 goto exit;
1728 } else
1729 rxState = rxAdvance;
1730 break;
1731
1732 case rxAdvance:
1733 if (link == 0) {
1734 devIntrPost(ISR_RXIDLE);
1735 rxState = rxIdle;
1736 CRDD = true;
1737 goto exit;
1738 } else {
1739 if (rxDmaState != dmaIdle)
1740 goto exit;
1741 rxState = rxDescRead;
1742 regs.rxdp = link;
1743 CRDD = false;
1744
1745 rxDmaAddr = regs.rxdp & 0x3fffffff;
1746 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1747 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1748 rxDmaFree = dmaDescFree;
1749
1750 if (doRxDmaRead())
1751 goto exit;
1752 }
1753 break;
1754
1755 default:
1756 panic("Invalid rxState!");
1757 }
1758
1759 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1760 NsRxStateStrings[rxState]);
1761 goto next;
1762
1763 exit:
1764 /**
1765 * @todo do we want to schedule a future kick?
1766 */
1767 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1768 NsRxStateStrings[rxState]);
1769
1770 if (clock && !rxKickEvent.scheduled())
1771 rxKickEvent.schedule(rxKickTick);
1772 }
1773
1774 void
1775 NSGigE::transmit()
1776 {
1777 if (txFifo.empty()) {
1778 DPRINTF(Ethernet, "nothing to transmit\n");
1779 return;
1780 }
1781
1782 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1783 txFifo.size());
1784 if (interface->sendPacket(txFifo.front())) {
1785 #if TRACING_ON
1786 if (DTRACE(Ethernet)) {
1787 IpPtr ip(txFifo.front());
1788 if (ip) {
1789 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1790 TcpPtr tcp(ip);
1791 if (tcp) {
1792 DPRINTF(Ethernet,
1793 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1794 tcp->sport(), tcp->dport(), tcp->seq(),
1795 tcp->ack());
1796 }
1797 }
1798 }
1799 #endif
1800
1801 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1802 txBytes += txFifo.front()->length;
1803 txPackets++;
1804
1805 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1806 txFifo.avail());
1807 txFifo.pop();
1808
1809 /*
1810 * normally do a writeback of the descriptor here, and ONLY
1811 * after that is done, send this interrupt. but since our
1812 * stuff never actually fails, just do this interrupt here,
1813 * otherwise the code has to stray from this nice format.
1814 * besides, it's functionally the same.
1815 */
1816 devIntrPost(ISR_TXOK);
1817 }
1818
1819 if (!txFifo.empty() && !txEvent.scheduled()) {
1820 DPRINTF(Ethernet, "reschedule transmit\n");
1821 txEvent.schedule(curTick + retryTime);
1822 }
1823 }
1824
1825 bool
1826 NSGigE::doTxDmaRead()
1827 {
1828 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1829 txDmaState = dmaReading;
1830
1831 if (dmaPending() || getState() != Running)
1832 txDmaState = dmaReadWaiting;
1833 else
1834 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1835
1836 return true;
1837 }
1838
1839 void
1840 NSGigE::txDmaReadDone()
1841 {
1842 assert(txDmaState == dmaReading);
1843 txDmaState = dmaIdle;
1844
1845 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1846 txDmaAddr, txDmaLen);
1847 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1848
1849 // If the receive state machine has a pending DMA, let it go first
1850 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1851 rxKick();
1852
1853 txKick();
1854 }
1855
1856 bool
1857 NSGigE::doTxDmaWrite()
1858 {
1859 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1860 txDmaState = dmaWriting;
1861
1862 if (dmaPending() || getState() != Running)
1863 txDmaState = dmaWriteWaiting;
1864 else
1865 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1866 return true;
1867 }
1868
1869 void
1870 NSGigE::txDmaWriteDone()
1871 {
1872 assert(txDmaState == dmaWriting);
1873 txDmaState = dmaIdle;
1874
1875 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1876 txDmaAddr, txDmaLen);
1877 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1878
1879 // If the receive state machine has a pending DMA, let it go first
1880 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1881 rxKick();
1882
1883 txKick();
1884 }
1885
1886 void
1887 NSGigE::txKick()
1888 {
1889 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1890
1891 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1892 NsTxStateStrings[txState], is64bit ? 64 : 32);
1893
1894 Addr link, bufptr;
1895 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1896 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1897
1898 next:
1899 if (clock) {
1900 if (txKickTick > curTick) {
1901 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1902 txKickTick);
1903 goto exit;
1904 }
1905
1906 // Go to the next state machine clock tick.
1907 txKickTick = curTick + cycles(1);
1908 }
1909
1910 switch(txDmaState) {
1911 case dmaReadWaiting:
1912 if (doTxDmaRead())
1913 goto exit;
1914 break;
1915 case dmaWriteWaiting:
1916 if (doTxDmaWrite())
1917 goto exit;
1918 break;
1919 default:
1920 break;
1921 }
1922
1923 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1924 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1925 switch (txState) {
1926 case txIdle:
1927 if (!txEnable) {
1928 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1929 goto exit;
1930 }
1931
1932 if (CTDD) {
1933 txState = txDescRefr;
1934
1935 txDmaAddr = regs.txdp & 0x3fffffff;
1936 txDmaData =
1937 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1938 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1939 txDmaFree = dmaDescFree;
1940
1941 descDmaReads++;
1942 descDmaRdBytes += txDmaLen;
1943
1944 if (doTxDmaRead())
1945 goto exit;
1946
1947 } else {
1948 txState = txDescRead;
1949
1950 txDmaAddr = regs.txdp & 0x3fffffff;
1951 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1952 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1953 txDmaFree = dmaDescFree;
1954
1955 descDmaReads++;
1956 descDmaRdBytes += txDmaLen;
1957
1958 if (doTxDmaRead())
1959 goto exit;
1960 }
1961 break;
1962
1963 case txDescRefr:
1964 if (txDmaState != dmaIdle)
1965 goto exit;
1966
1967 txState = txAdvance;
1968 break;
1969
1970 case txDescRead:
1971 if (txDmaState != dmaIdle)
1972 goto exit;
1973
1974 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1975 regs.txdp & 0x3fffffff);
1976 DPRINTF(EthernetDesc,
1977 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1978 link, bufptr, cmdsts, extsts);
1979
1980 if (cmdsts & CMDSTS_OWN) {
1981 txState = txFifoBlock;
1982 txFragPtr = bufptr;
1983 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1984 } else {
1985 devIntrPost(ISR_TXIDLE);
1986 txState = txIdle;
1987 goto exit;
1988 }
1989 break;
1990
1991 case txFifoBlock:
1992 if (!txPacket) {
1993 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1994 txPacket = new EthPacketData(16384);
1995 txPacketBufPtr = txPacket->data;
1996 }
1997
1998 if (txDescCnt == 0) {
1999 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2000 if (cmdsts & CMDSTS_MORE) {
2001 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2002 txState = txDescWrite;
2003
2004 cmdsts &= ~CMDSTS_OWN;
2005
2006 txDmaAddr = regs.txdp & 0x3fffffff;
2007 txDmaData = &cmdsts;
2008 if (is64bit) {
2009 txDmaAddr += offsetof(ns_desc64, cmdsts);
2010 txDmaLen = sizeof(txDesc64.cmdsts);
2011 } else {
2012 txDmaAddr += offsetof(ns_desc32, cmdsts);
2013 txDmaLen = sizeof(txDesc32.cmdsts);
2014 }
2015 txDmaFree = dmaDescFree;
2016
2017 if (doTxDmaWrite())
2018 goto exit;
2019
2020 } else { /* this packet is totally done */
2021 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2022 /* deal with the the packet that just finished */
2023 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2024 IpPtr ip(txPacket);
2025 if (extsts & EXTSTS_UDPPKT) {
2026 UdpPtr udp(ip);
2027 udp->sum(0);
2028 udp->sum(cksum(udp));
2029 txUdpChecksums++;
2030 } else if (extsts & EXTSTS_TCPPKT) {
2031 TcpPtr tcp(ip);
2032 tcp->sum(0);
2033 tcp->sum(cksum(tcp));
2034 txTcpChecksums++;
2035 }
2036 if (extsts & EXTSTS_IPPKT) {
2037 ip->sum(0);
2038 ip->sum(cksum(ip));
2039 txIpChecksums++;
2040 }
2041 }
2042
2043 txPacket->length = txPacketBufPtr - txPacket->data;
2044 // this is just because the receive can't handle a
2045 // packet bigger want to make sure
2046 if (txPacket->length > 1514)
2047 panic("transmit packet too large, %s > 1514\n",
2048 txPacket->length);
2049
2050 #ifndef NDEBUG
2051 bool success =
2052 #endif
2053 txFifo.push(txPacket);
2054 assert(success);
2055
2056 /*
2057 * this following section is not tqo spec, but
2058 * functionally shouldn't be any different. normally,
2059 * the chip will wait til the transmit has occurred
2060 * before writing back the descriptor because it has
2061 * to wait to see that it was successfully transmitted
2062 * to decide whether to set CMDSTS_OK or not.
2063 * however, in the simulator since it is always
2064 * successfully transmitted, and writing it exactly to
2065 * spec would complicate the code, we just do it here
2066 */
2067
2068 cmdsts &= ~CMDSTS_OWN;
2069 cmdsts |= CMDSTS_OK;
2070
2071 DPRINTF(EthernetDesc,
2072 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2073 cmdsts, extsts);
2074
2075 txDmaFree = dmaDescFree;
2076 txDmaAddr = regs.txdp & 0x3fffffff;
2077 txDmaData = &cmdsts;
2078 if (is64bit) {
2079 txDmaAddr += offsetof(ns_desc64, cmdsts);
2080 txDmaLen =
2081 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2082 } else {
2083 txDmaAddr += offsetof(ns_desc32, cmdsts);
2084 txDmaLen =
2085 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2086 }
2087
2088 descDmaWrites++;
2089 descDmaWrBytes += txDmaLen;
2090
2091 transmit();
2092 txPacket = 0;
2093
2094 if (!txEnable) {
2095 DPRINTF(EthernetSM, "halting TX state machine\n");
2096 txState = txIdle;
2097 goto exit;
2098 } else
2099 txState = txAdvance;
2100
2101 if (doTxDmaWrite())
2102 goto exit;
2103 }
2104 } else {
2105 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2106 if (!txFifo.full()) {
2107 txState = txFragRead;
2108
2109 /*
2110 * The number of bytes transferred is either whatever
2111 * is left in the descriptor (txDescCnt), or if there
2112 * is not enough room in the fifo, just whatever room
2113 * is left in the fifo
2114 */
2115 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2116
2117 txDmaAddr = txFragPtr & 0x3fffffff;
2118 txDmaData = txPacketBufPtr;
2119 txDmaLen = txXferLen;
2120 txDmaFree = dmaDataFree;
2121
2122 if (doTxDmaRead())
2123 goto exit;
2124 } else {
2125 txState = txFifoBlock;
2126 transmit();
2127
2128 goto exit;
2129 }
2130
2131 }
2132 break;
2133
2134 case txFragRead:
2135 if (txDmaState != dmaIdle)
2136 goto exit;
2137
2138 txPacketBufPtr += txXferLen;
2139 txFragPtr += txXferLen;
2140 txDescCnt -= txXferLen;
2141 txFifo.reserve(txXferLen);
2142
2143 txState = txFifoBlock;
2144 break;
2145
2146 case txDescWrite:
2147 if (txDmaState != dmaIdle)
2148 goto exit;
2149
2150 if (cmdsts & CMDSTS_INTR)
2151 devIntrPost(ISR_TXDESC);
2152
2153 if (!txEnable) {
2154 DPRINTF(EthernetSM, "halting TX state machine\n");
2155 txState = txIdle;
2156 goto exit;
2157 } else
2158 txState = txAdvance;
2159 break;
2160
2161 case txAdvance:
2162 if (link == 0) {
2163 devIntrPost(ISR_TXIDLE);
2164 txState = txIdle;
2165 goto exit;
2166 } else {
2167 if (txDmaState != dmaIdle)
2168 goto exit;
2169 txState = txDescRead;
2170 regs.txdp = link;
2171 CTDD = false;
2172
2173 txDmaAddr = link & 0x3fffffff;
2174 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2175 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2176 txDmaFree = dmaDescFree;
2177
2178 if (doTxDmaRead())
2179 goto exit;
2180 }
2181 break;
2182
2183 default:
2184 panic("invalid state");
2185 }
2186
2187 DPRINTF(EthernetSM, "entering next txState=%s\n",
2188 NsTxStateStrings[txState]);
2189 goto next;
2190
2191 exit:
2192 /**
2193 * @todo do we want to schedule a future kick?
2194 */
2195 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2196 NsTxStateStrings[txState]);
2197
2198 if (clock && !txKickEvent.scheduled())
2199 txKickEvent.schedule(txKickTick);
2200 }
2201
2202 /**
2203 * Advance the EEPROM state machine
2204 * Called on rising edge of EEPROM clock bit in MEAR
2205 */
2206 void
2207 NSGigE::eepromKick()
2208 {
2209 switch (eepromState) {
2210
2211 case eepromStart:
2212
2213 // Wait for start bit
2214 if (regs.mear & MEAR_EEDI) {
2215 // Set up to get 2 opcode bits
2216 eepromState = eepromGetOpcode;
2217 eepromBitsToRx = 2;
2218 eepromOpcode = 0;
2219 }
2220 break;
2221
2222 case eepromGetOpcode:
2223 eepromOpcode <<= 1;
2224 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2225 --eepromBitsToRx;
2226
2227 // Done getting opcode
2228 if (eepromBitsToRx == 0) {
2229 if (eepromOpcode != EEPROM_READ)
2230 panic("only EEPROM reads are implemented!");
2231
2232 // Set up to get address
2233 eepromState = eepromGetAddress;
2234 eepromBitsToRx = 6;
2235 eepromAddress = 0;
2236 }
2237 break;
2238
2239 case eepromGetAddress:
2240 eepromAddress <<= 1;
2241 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2242 --eepromBitsToRx;
2243
2244 // Done getting address
2245 if (eepromBitsToRx == 0) {
2246
2247 if (eepromAddress >= EEPROM_SIZE)
2248 panic("EEPROM read access out of range!");
2249
2250 switch (eepromAddress) {
2251
2252 case EEPROM_PMATCH2_ADDR:
2253 eepromData = rom.perfectMatch[5];
2254 eepromData <<= 8;
2255 eepromData += rom.perfectMatch[4];
2256 break;
2257
2258 case EEPROM_PMATCH1_ADDR:
2259 eepromData = rom.perfectMatch[3];
2260 eepromData <<= 8;
2261 eepromData += rom.perfectMatch[2];
2262 break;
2263
2264 case EEPROM_PMATCH0_ADDR:
2265 eepromData = rom.perfectMatch[1];
2266 eepromData <<= 8;
2267 eepromData += rom.perfectMatch[0];
2268 break;
2269
2270 default:
2271 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2272 }
2273 // Set up to read data
2274 eepromState = eepromRead;
2275 eepromBitsToRx = 16;
2276
2277 // Clear data in bit
2278 regs.mear &= ~MEAR_EEDI;
2279 }
2280 break;
2281
2282 case eepromRead:
2283 // Clear Data Out bit
2284 regs.mear &= ~MEAR_EEDO;
2285 // Set bit to value of current EEPROM bit
2286 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2287
2288 eepromData <<= 1;
2289 --eepromBitsToRx;
2290
2291 // All done
2292 if (eepromBitsToRx == 0) {
2293 eepromState = eepromStart;
2294 }
2295 break;
2296
2297 default:
2298 panic("invalid EEPROM state");
2299 }
2300
2301 }
2302
2303 void
2304 NSGigE::transferDone()
2305 {
2306 if (txFifo.empty()) {
2307 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2308 return;
2309 }
2310
2311 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2312
2313 if (txEvent.scheduled())
2314 txEvent.reschedule(curTick + cycles(1));
2315 else
2316 txEvent.schedule(curTick + cycles(1));
2317 }
2318
2319 bool
2320 NSGigE::rxFilter(const EthPacketPtr &packet)
2321 {
2322 EthPtr eth = packet;
2323 bool drop = true;
2324 string type;
2325
2326 const EthAddr &dst = eth->dst();
2327 if (dst.unicast()) {
2328 // If we're accepting all unicast addresses
2329 if (acceptUnicast)
2330 drop = false;
2331
2332 // If we make a perfect match
2333 if (acceptPerfect && dst == rom.perfectMatch)
2334 drop = false;
2335
2336 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2337 drop = false;
2338
2339 } else if (dst.broadcast()) {
2340 // if we're accepting broadcasts
2341 if (acceptBroadcast)
2342 drop = false;
2343
2344 } else if (dst.multicast()) {
2345 // if we're accepting all multicasts
2346 if (acceptMulticast)
2347 drop = false;
2348
2349 // Multicast hashing faked - all packets accepted
2350 if (multicastHashEnable)
2351 drop = false;
2352 }
2353
2354 if (drop) {
2355 DPRINTF(Ethernet, "rxFilter drop\n");
2356 DDUMP(EthernetData, packet->data, packet->length);
2357 }
2358
2359 return drop;
2360 }
2361
2362 bool
2363 NSGigE::recvPacket(EthPacketPtr packet)
2364 {
2365 rxBytes += packet->length;
2366 rxPackets++;
2367
2368 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2369 rxFifo.avail());
2370
2371 if (!rxEnable) {
2372 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2373 return true;
2374 }
2375
2376 if (!rxFilterEnable) {
2377 DPRINTF(Ethernet,
2378 "receive packet filtering disabled . . . packet dropped\n");
2379 return true;
2380 }
2381
2382 if (rxFilter(packet)) {
2383 DPRINTF(Ethernet, "packet filtered...dropped\n");
2384 return true;
2385 }
2386
2387 if (rxFifo.avail() < packet->length) {
2388 #if TRACING_ON
2389 IpPtr ip(packet);
2390 TcpPtr tcp(ip);
2391 if (ip) {
2392 DPRINTF(Ethernet,
2393 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2394 ip->id());
2395 if (tcp) {
2396 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2397 }
2398 }
2399 #endif
2400 droppedPackets++;
2401 devIntrPost(ISR_RXORN);
2402 return false;
2403 }
2404
2405 rxFifo.push(packet);
2406
2407 rxKick();
2408 return true;
2409 }
2410
2411
2412 void
2413 NSGigE::resume()
2414 {
2415 SimObject::resume();
2416
2417 // During drain we could have left the state machines in a waiting state and
2418 // they wouldn't get out until some other event occured to kick them.
2419 // This way they'll get out immediately
2420 txKick();
2421 rxKick();
2422 }
2423
2424
2425 //=====================================================================
2426 //
2427 //
2428 void
2429 NSGigE::serialize(ostream &os)
2430 {
2431 // Serialize the PciDev base class
2432 PciDev::serialize(os);
2433
2434 /*
2435 * Finalize any DMA events now.
2436 */
2437 // @todo will mem system save pending dma?
2438
2439 /*
2440 * Serialize the device registers
2441 */
2442 SERIALIZE_SCALAR(regs.command);
2443 SERIALIZE_SCALAR(regs.config);
2444 SERIALIZE_SCALAR(regs.mear);
2445 SERIALIZE_SCALAR(regs.ptscr);
2446 SERIALIZE_SCALAR(regs.isr);
2447 SERIALIZE_SCALAR(regs.imr);
2448 SERIALIZE_SCALAR(regs.ier);
2449 SERIALIZE_SCALAR(regs.ihr);
2450 SERIALIZE_SCALAR(regs.txdp);
2451 SERIALIZE_SCALAR(regs.txdp_hi);
2452 SERIALIZE_SCALAR(regs.txcfg);
2453 SERIALIZE_SCALAR(regs.gpior);
2454 SERIALIZE_SCALAR(regs.rxdp);
2455 SERIALIZE_SCALAR(regs.rxdp_hi);
2456 SERIALIZE_SCALAR(regs.rxcfg);
2457 SERIALIZE_SCALAR(regs.pqcr);
2458 SERIALIZE_SCALAR(regs.wcsr);
2459 SERIALIZE_SCALAR(regs.pcr);
2460 SERIALIZE_SCALAR(regs.rfcr);
2461 SERIALIZE_SCALAR(regs.rfdr);
2462 SERIALIZE_SCALAR(regs.brar);
2463 SERIALIZE_SCALAR(regs.brdr);
2464 SERIALIZE_SCALAR(regs.srr);
2465 SERIALIZE_SCALAR(regs.mibc);
2466 SERIALIZE_SCALAR(regs.vrcr);
2467 SERIALIZE_SCALAR(regs.vtcr);
2468 SERIALIZE_SCALAR(regs.vdr);
2469 SERIALIZE_SCALAR(regs.ccsr);
2470 SERIALIZE_SCALAR(regs.tbicr);
2471 SERIALIZE_SCALAR(regs.tbisr);
2472 SERIALIZE_SCALAR(regs.tanar);
2473 SERIALIZE_SCALAR(regs.tanlpar);
2474 SERIALIZE_SCALAR(regs.taner);
2475 SERIALIZE_SCALAR(regs.tesr);
2476
2477 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2478 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2479
2480 SERIALIZE_SCALAR(ioEnable);
2481
2482 /*
2483 * Serialize the data Fifos
2484 */
2485 rxFifo.serialize("rxFifo", os);
2486 txFifo.serialize("txFifo", os);
2487
2488 /*
2489 * Serialize the various helper variables
2490 */
2491 bool txPacketExists = txPacket;
2492 SERIALIZE_SCALAR(txPacketExists);
2493 if (txPacketExists) {
2494 txPacket->length = txPacketBufPtr - txPacket->data;
2495 txPacket->serialize("txPacket", os);
2496 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2497 SERIALIZE_SCALAR(txPktBufPtr);
2498 }
2499
2500 bool rxPacketExists = rxPacket;
2501 SERIALIZE_SCALAR(rxPacketExists);
2502 if (rxPacketExists) {
2503 rxPacket->serialize("rxPacket", os);
2504 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2505 SERIALIZE_SCALAR(rxPktBufPtr);
2506 }
2507
2508 SERIALIZE_SCALAR(txXferLen);
2509 SERIALIZE_SCALAR(rxXferLen);
2510
2511 /*
2512 * Serialize Cached Descriptors
2513 */
2514 SERIALIZE_SCALAR(rxDesc64.link);
2515 SERIALIZE_SCALAR(rxDesc64.bufptr);
2516 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2517 SERIALIZE_SCALAR(rxDesc64.extsts);
2518 SERIALIZE_SCALAR(txDesc64.link);
2519 SERIALIZE_SCALAR(txDesc64.bufptr);
2520 SERIALIZE_SCALAR(txDesc64.cmdsts);
2521 SERIALIZE_SCALAR(txDesc64.extsts);
2522 SERIALIZE_SCALAR(rxDesc32.link);
2523 SERIALIZE_SCALAR(rxDesc32.bufptr);
2524 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2525 SERIALIZE_SCALAR(rxDesc32.extsts);
2526 SERIALIZE_SCALAR(txDesc32.link);
2527 SERIALIZE_SCALAR(txDesc32.bufptr);
2528 SERIALIZE_SCALAR(txDesc32.cmdsts);
2529 SERIALIZE_SCALAR(txDesc32.extsts);
2530 SERIALIZE_SCALAR(extstsEnable);
2531
2532 /*
2533 * Serialize tx state machine
2534 */
2535 int txState = this->txState;
2536 SERIALIZE_SCALAR(txState);
2537 SERIALIZE_SCALAR(txEnable);
2538 SERIALIZE_SCALAR(CTDD);
2539 SERIALIZE_SCALAR(txFragPtr);
2540 SERIALIZE_SCALAR(txDescCnt);
2541 int txDmaState = this->txDmaState;
2542 SERIALIZE_SCALAR(txDmaState);
2543 SERIALIZE_SCALAR(txKickTick);
2544
2545 /*
2546 * Serialize rx state machine
2547 */
2548 int rxState = this->rxState;
2549 SERIALIZE_SCALAR(rxState);
2550 SERIALIZE_SCALAR(rxEnable);
2551 SERIALIZE_SCALAR(CRDD);
2552 SERIALIZE_SCALAR(rxPktBytes);
2553 SERIALIZE_SCALAR(rxFragPtr);
2554 SERIALIZE_SCALAR(rxDescCnt);
2555 int rxDmaState = this->rxDmaState;
2556 SERIALIZE_SCALAR(rxDmaState);
2557 SERIALIZE_SCALAR(rxKickTick);
2558
2559 /*
2560 * Serialize EEPROM state machine
2561 */
2562 int eepromState = this->eepromState;
2563 SERIALIZE_SCALAR(eepromState);
2564 SERIALIZE_SCALAR(eepromClk);
2565 SERIALIZE_SCALAR(eepromBitsToRx);
2566 SERIALIZE_SCALAR(eepromOpcode);
2567 SERIALIZE_SCALAR(eepromAddress);
2568 SERIALIZE_SCALAR(eepromData);
2569
2570 /*
2571 * If there's a pending transmit, store the time so we can
2572 * reschedule it later
2573 */
2574 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2575 SERIALIZE_SCALAR(transmitTick);
2576
2577 /*
2578 * receive address filter settings
2579 */
2580 SERIALIZE_SCALAR(rxFilterEnable);
2581 SERIALIZE_SCALAR(acceptBroadcast);
2582 SERIALIZE_SCALAR(acceptMulticast);
2583 SERIALIZE_SCALAR(acceptUnicast);
2584 SERIALIZE_SCALAR(acceptPerfect);
2585 SERIALIZE_SCALAR(acceptArp);
2586 SERIALIZE_SCALAR(multicastHashEnable);
2587
2588 /*
2589 * Keep track of pending interrupt status.
2590 */
2591 SERIALIZE_SCALAR(intrTick);
2592 SERIALIZE_SCALAR(cpuPendingIntr);
2593 Tick intrEventTick = 0;
2594 if (intrEvent)
2595 intrEventTick = intrEvent->when();
2596 SERIALIZE_SCALAR(intrEventTick);
2597
2598 }
2599
2600 void
2601 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2602 {
2603 // Unserialize the PciDev base class
2604 PciDev::unserialize(cp, section);
2605
2606 UNSERIALIZE_SCALAR(regs.command);
2607 UNSERIALIZE_SCALAR(regs.config);
2608 UNSERIALIZE_SCALAR(regs.mear);
2609 UNSERIALIZE_SCALAR(regs.ptscr);
2610 UNSERIALIZE_SCALAR(regs.isr);
2611 UNSERIALIZE_SCALAR(regs.imr);
2612 UNSERIALIZE_SCALAR(regs.ier);
2613 UNSERIALIZE_SCALAR(regs.ihr);
2614 UNSERIALIZE_SCALAR(regs.txdp);
2615 UNSERIALIZE_SCALAR(regs.txdp_hi);
2616 UNSERIALIZE_SCALAR(regs.txcfg);
2617 UNSERIALIZE_SCALAR(regs.gpior);
2618 UNSERIALIZE_SCALAR(regs.rxdp);
2619 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2620 UNSERIALIZE_SCALAR(regs.rxcfg);
2621 UNSERIALIZE_SCALAR(regs.pqcr);
2622 UNSERIALIZE_SCALAR(regs.wcsr);
2623 UNSERIALIZE_SCALAR(regs.pcr);
2624 UNSERIALIZE_SCALAR(regs.rfcr);
2625 UNSERIALIZE_SCALAR(regs.rfdr);
2626 UNSERIALIZE_SCALAR(regs.brar);
2627 UNSERIALIZE_SCALAR(regs.brdr);
2628 UNSERIALIZE_SCALAR(regs.srr);
2629 UNSERIALIZE_SCALAR(regs.mibc);
2630 UNSERIALIZE_SCALAR(regs.vrcr);
2631 UNSERIALIZE_SCALAR(regs.vtcr);
2632 UNSERIALIZE_SCALAR(regs.vdr);
2633 UNSERIALIZE_SCALAR(regs.ccsr);
2634 UNSERIALIZE_SCALAR(regs.tbicr);
2635 UNSERIALIZE_SCALAR(regs.tbisr);
2636 UNSERIALIZE_SCALAR(regs.tanar);
2637 UNSERIALIZE_SCALAR(regs.tanlpar);
2638 UNSERIALIZE_SCALAR(regs.taner);
2639 UNSERIALIZE_SCALAR(regs.tesr);
2640
2641 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2642 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2643
2644 UNSERIALIZE_SCALAR(ioEnable);
2645
2646 /*
2647 * unserialize the data fifos
2648 */
2649 rxFifo.unserialize("rxFifo", cp, section);
2650 txFifo.unserialize("txFifo", cp, section);
2651
2652 /*
2653 * unserialize the various helper variables
2654 */
2655 bool txPacketExists;
2656 UNSERIALIZE_SCALAR(txPacketExists);
2657 if (txPacketExists) {
2658 txPacket = new EthPacketData(16384);
2659 txPacket->unserialize("txPacket", cp, section);
2660 uint32_t txPktBufPtr;
2661 UNSERIALIZE_SCALAR(txPktBufPtr);
2662 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2663 } else
2664 txPacket = 0;
2665
2666 bool rxPacketExists;
2667 UNSERIALIZE_SCALAR(rxPacketExists);
2668 rxPacket = 0;
2669 if (rxPacketExists) {
2670 rxPacket = new EthPacketData(16384);
2671 rxPacket->unserialize("rxPacket", cp, section);
2672 uint32_t rxPktBufPtr;
2673 UNSERIALIZE_SCALAR(rxPktBufPtr);
2674 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2675 } else
2676 rxPacket = 0;
2677
2678 UNSERIALIZE_SCALAR(txXferLen);
2679 UNSERIALIZE_SCALAR(rxXferLen);
2680
2681 /*
2682 * Unserialize Cached Descriptors
2683 */
2684 UNSERIALIZE_SCALAR(rxDesc64.link);
2685 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2686 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2687 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2688 UNSERIALIZE_SCALAR(txDesc64.link);
2689 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2690 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2691 UNSERIALIZE_SCALAR(txDesc64.extsts);
2692 UNSERIALIZE_SCALAR(rxDesc32.link);
2693 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2694 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2695 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2696 UNSERIALIZE_SCALAR(txDesc32.link);
2697 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2698 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2699 UNSERIALIZE_SCALAR(txDesc32.extsts);
2700 UNSERIALIZE_SCALAR(extstsEnable);
2701
2702 /*
2703 * unserialize tx state machine
2704 */
2705 int txState;
2706 UNSERIALIZE_SCALAR(txState);
2707 this->txState = (TxState) txState;
2708 UNSERIALIZE_SCALAR(txEnable);
2709 UNSERIALIZE_SCALAR(CTDD);
2710 UNSERIALIZE_SCALAR(txFragPtr);
2711 UNSERIALIZE_SCALAR(txDescCnt);
2712 int txDmaState;
2713 UNSERIALIZE_SCALAR(txDmaState);
2714 this->txDmaState = (DmaState) txDmaState;
2715 UNSERIALIZE_SCALAR(txKickTick);
2716 if (txKickTick)
2717 txKickEvent.schedule(txKickTick);
2718
2719 /*
2720 * unserialize rx state machine
2721 */
2722 int rxState;
2723 UNSERIALIZE_SCALAR(rxState);
2724 this->rxState = (RxState) rxState;
2725 UNSERIALIZE_SCALAR(rxEnable);
2726 UNSERIALIZE_SCALAR(CRDD);
2727 UNSERIALIZE_SCALAR(rxPktBytes);
2728 UNSERIALIZE_SCALAR(rxFragPtr);
2729 UNSERIALIZE_SCALAR(rxDescCnt);
2730 int rxDmaState;
2731 UNSERIALIZE_SCALAR(rxDmaState);
2732 this->rxDmaState = (DmaState) rxDmaState;
2733 UNSERIALIZE_SCALAR(rxKickTick);
2734 if (rxKickTick)
2735 rxKickEvent.schedule(rxKickTick);
2736
2737 /*
2738 * Unserialize EEPROM state machine
2739 */
2740 int eepromState;
2741 UNSERIALIZE_SCALAR(eepromState);
2742 this->eepromState = (EEPROMState) eepromState;
2743 UNSERIALIZE_SCALAR(eepromClk);
2744 UNSERIALIZE_SCALAR(eepromBitsToRx);
2745 UNSERIALIZE_SCALAR(eepromOpcode);
2746 UNSERIALIZE_SCALAR(eepromAddress);
2747 UNSERIALIZE_SCALAR(eepromData);
2748
2749 /*
2750 * If there's a pending transmit, reschedule it now
2751 */
2752 Tick transmitTick;
2753 UNSERIALIZE_SCALAR(transmitTick);
2754 if (transmitTick)
2755 txEvent.schedule(curTick + transmitTick);
2756
2757 /*
2758 * unserialize receive address filter settings
2759 */
2760 UNSERIALIZE_SCALAR(rxFilterEnable);
2761 UNSERIALIZE_SCALAR(acceptBroadcast);
2762 UNSERIALIZE_SCALAR(acceptMulticast);
2763 UNSERIALIZE_SCALAR(acceptUnicast);
2764 UNSERIALIZE_SCALAR(acceptPerfect);
2765 UNSERIALIZE_SCALAR(acceptArp);
2766 UNSERIALIZE_SCALAR(multicastHashEnable);
2767
2768 /*
2769 * Keep track of pending interrupt status.
2770 */
2771 UNSERIALIZE_SCALAR(intrTick);
2772 UNSERIALIZE_SCALAR(cpuPendingIntr);
2773 Tick intrEventTick;
2774 UNSERIALIZE_SCALAR(intrEventTick);
2775 if (intrEventTick) {
2776 intrEvent = new IntrEvent(this, true);
2777 intrEvent->schedule(intrEventTick);
2778 }
2779 }
2780
2781 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2782
2783 SimObjectParam<EtherInt *> peer;
2784 SimObjectParam<NSGigE *> device;
2785
2786 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2787
2788 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2789
2790 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2791 INIT_PARAM(device, "Ethernet device of this interface")
2792
2793 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2794
2795 CREATE_SIM_OBJECT(NSGigEInt)
2796 {
2797 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2798
2799 EtherInt *p = (EtherInt *)peer;
2800 if (p) {
2801 dev_int->setPeer(p);
2802 p->setPeer(dev_int);
2803 }
2804
2805 return dev_int;
2806 }
2807
2808 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2809
2810
2811 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2812
2813 SimObjectParam<System *> system;
2814 SimObjectParam<Platform *> platform;
2815 SimObjectParam<PciConfigData *> configdata;
2816 Param<uint32_t> pci_bus;
2817 Param<uint32_t> pci_dev;
2818 Param<uint32_t> pci_func;
2819 Param<Tick> pio_latency;
2820 Param<Tick> config_latency;
2821
2822 Param<Tick> clock;
2823 Param<bool> dma_desc_free;
2824 Param<bool> dma_data_free;
2825 Param<Tick> dma_read_delay;
2826 Param<Tick> dma_write_delay;
2827 Param<Tick> dma_read_factor;
2828 Param<Tick> dma_write_factor;
2829 Param<bool> dma_no_allocate;
2830 Param<Tick> intr_delay;
2831
2832 Param<Tick> rx_delay;
2833 Param<Tick> tx_delay;
2834 Param<uint32_t> rx_fifo_size;
2835 Param<uint32_t> tx_fifo_size;
2836
2837 Param<bool> rx_filter;
2838 Param<string> hardware_address;
2839 Param<bool> rx_thread;
2840 Param<bool> tx_thread;
2841 Param<bool> rss;
2842
2843 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2844
2845 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2846
2847 INIT_PARAM(system, "System pointer"),
2848 INIT_PARAM(platform, "Platform pointer"),
2849 INIT_PARAM(configdata, "PCI Config data"),
2850 INIT_PARAM(pci_bus, "PCI bus ID"),
2851 INIT_PARAM(pci_dev, "PCI device number"),
2852 INIT_PARAM(pci_func, "PCI function code"),
2853 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2854 INIT_PARAM(config_latency, "Number of cycles for a config read or write"),
2855 INIT_PARAM(clock, "State machine cycle time"),
2856
2857 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
2858 INIT_PARAM(dma_data_free, "DMA of Data is free"),
2859 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
2860 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
2861 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
2862 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
2863 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
2864 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2865
2866 INIT_PARAM(rx_delay, "Receive Delay"),
2867 INIT_PARAM(tx_delay, "Transmit Delay"),
2868 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
2869 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
2870
2871 INIT_PARAM(rx_filter, "Enable Receive Filter"),
2872 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
2873 INIT_PARAM(rx_thread, ""),
2874 INIT_PARAM(tx_thread, ""),
2875 INIT_PARAM(rss, "")
2876
2877 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2878
2879
2880 CREATE_SIM_OBJECT(NSGigE)
2881 {
2882 NSGigE::Params *params = new NSGigE::Params;
2883
2884 params->name = getInstanceName();
2885 params->platform = platform;
2886 params->system = system;
2887 params->configData = configdata;
2888 params->busNum = pci_bus;
2889 params->deviceNum = pci_dev;
2890 params->functionNum = pci_func;
2891 params->pio_delay = pio_latency;
2892 params->config_delay = config_latency;
2893
2894 params->clock = clock;
2895 params->dma_desc_free = dma_desc_free;
2896 params->dma_data_free = dma_data_free;
2897 params->dma_read_delay = dma_read_delay;
2898 params->dma_write_delay = dma_write_delay;
2899 params->dma_read_factor = dma_read_factor;
2900 params->dma_write_factor = dma_write_factor;
2901 params->dma_no_allocate = dma_no_allocate;
2902 params->pio_delay = pio_latency;
2903 params->intr_delay = intr_delay;
2904
2905 params->rx_delay = rx_delay;
2906 params->tx_delay = tx_delay;
2907 params->rx_fifo_size = rx_fifo_size;
2908 params->tx_fifo_size = tx_fifo_size;
2909
2910 params->rx_filter = rx_filter;
2911 params->eaddr = hardware_address;
2912 params->rx_thread = rx_thread;
2913 params->tx_thread = tx_thread;
2914 params->rss = rss;
2915
2916 return new NSGigE(params);
2917 }
2918
2919 REGISTER_SIM_OBJECT("NSGigE", NSGigE)