Merge from head.
[gem5.git] / src / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32 /** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36 #include <deque>
37 #include <string>
38
39 #include "base/inet.hh"
40 #include "cpu/thread_context.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/packet.hh"
45 #include "mem/packet_access.hh"
46 #include "sim/builder.hh"
47 #include "sim/debug.hh"
48 #include "sim/host.hh"
49 #include "sim/stats.hh"
50 #include "sim/system.hh"
51
52 const char *NsRxStateStrings[] =
53 {
54 "rxIdle",
55 "rxDescRefr",
56 "rxDescRead",
57 "rxFifoBlock",
58 "rxFragWrite",
59 "rxDescWrite",
60 "rxAdvance"
61 };
62
63 const char *NsTxStateStrings[] =
64 {
65 "txIdle",
66 "txDescRefr",
67 "txDescRead",
68 "txFifoBlock",
69 "txFragRead",
70 "txDescWrite",
71 "txAdvance"
72 };
73
74 const char *NsDmaState[] =
75 {
76 "dmaIdle",
77 "dmaReading",
78 "dmaWriting",
79 "dmaReadWaiting",
80 "dmaWriteWaiting"
81 };
82
83 using namespace std;
84 using namespace Net;
85 using namespace TheISA;
86
87 ///////////////////////////////////////////////////////////////////////
88 //
89 // NSGigE PCI Device
90 //
91 NSGigE::NSGigE(Params *p)
92 : PciDev(p), ioEnable(false),
93 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
94 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
95 txXferLen(0), rxXferLen(0), rxDmaFree(false), txDmaFree(false),
96 clock(p->clock),
97 txState(txIdle), txEnable(false), CTDD(false), txHalt(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
99 rxEnable(false), CRDD(false), rxPktBytes(0), rxHalt(false),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
101 eepromState(eepromStart), eepromClk(false), eepromBitsToRx(0),
102 eepromOpcode(0), eepromAddress(0), eepromData(0),
103 dmaReadDelay(p->dma_read_delay), dmaWriteDelay(p->dma_write_delay),
104 dmaReadFactor(p->dma_read_factor), dmaWriteFactor(p->dma_write_factor),
105 rxDmaData(NULL), rxDmaAddr(0), rxDmaLen(0),
106 txDmaData(NULL), txDmaAddr(0), txDmaLen(0),
107 rxDmaReadEvent(this), rxDmaWriteEvent(this),
108 txDmaReadEvent(this), txDmaWriteEvent(this),
109 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
110 txDelay(p->tx_delay), rxDelay(p->rx_delay),
111 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
112 txEvent(this), rxFilterEnable(p->rx_filter),
113 acceptBroadcast(false), acceptMulticast(false), acceptUnicast(false),
114 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
115 intrDelay(p->intr_delay), intrTick(0), cpuPendingIntr(false),
116 intrEvent(0), interface(0)
117 {
118
119
120 regsReset();
121 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
122
123 memset(&rxDesc32, 0, sizeof(rxDesc32));
124 memset(&txDesc32, 0, sizeof(txDesc32));
125 memset(&rxDesc64, 0, sizeof(rxDesc64));
126 memset(&txDesc64, 0, sizeof(txDesc64));
127 }
128
129 NSGigE::~NSGigE()
130 {}
131
132 void
133 NSGigE::regStats()
134 {
135 txBytes
136 .name(name() + ".txBytes")
137 .desc("Bytes Transmitted")
138 .prereq(txBytes)
139 ;
140
141 rxBytes
142 .name(name() + ".rxBytes")
143 .desc("Bytes Received")
144 .prereq(rxBytes)
145 ;
146
147 txPackets
148 .name(name() + ".txPackets")
149 .desc("Number of Packets Transmitted")
150 .prereq(txBytes)
151 ;
152
153 rxPackets
154 .name(name() + ".rxPackets")
155 .desc("Number of Packets Received")
156 .prereq(rxBytes)
157 ;
158
159 txIpChecksums
160 .name(name() + ".txIpChecksums")
161 .desc("Number of tx IP Checksums done by device")
162 .precision(0)
163 .prereq(txBytes)
164 ;
165
166 rxIpChecksums
167 .name(name() + ".rxIpChecksums")
168 .desc("Number of rx IP Checksums done by device")
169 .precision(0)
170 .prereq(rxBytes)
171 ;
172
173 txTcpChecksums
174 .name(name() + ".txTcpChecksums")
175 .desc("Number of tx TCP Checksums done by device")
176 .precision(0)
177 .prereq(txBytes)
178 ;
179
180 rxTcpChecksums
181 .name(name() + ".rxTcpChecksums")
182 .desc("Number of rx TCP Checksums done by device")
183 .precision(0)
184 .prereq(rxBytes)
185 ;
186
187 txUdpChecksums
188 .name(name() + ".txUdpChecksums")
189 .desc("Number of tx UDP Checksums done by device")
190 .precision(0)
191 .prereq(txBytes)
192 ;
193
194 rxUdpChecksums
195 .name(name() + ".rxUdpChecksums")
196 .desc("Number of rx UDP Checksums done by device")
197 .precision(0)
198 .prereq(rxBytes)
199 ;
200
201 descDmaReads
202 .name(name() + ".descDMAReads")
203 .desc("Number of descriptors the device read w/ DMA")
204 .precision(0)
205 ;
206
207 descDmaWrites
208 .name(name() + ".descDMAWrites")
209 .desc("Number of descriptors the device wrote w/ DMA")
210 .precision(0)
211 ;
212
213 descDmaRdBytes
214 .name(name() + ".descDmaReadBytes")
215 .desc("number of descriptor bytes read w/ DMA")
216 .precision(0)
217 ;
218
219 descDmaWrBytes
220 .name(name() + ".descDmaWriteBytes")
221 .desc("number of descriptor bytes write w/ DMA")
222 .precision(0)
223 ;
224
225 txBandwidth
226 .name(name() + ".txBandwidth")
227 .desc("Transmit Bandwidth (bits/s)")
228 .precision(0)
229 .prereq(txBytes)
230 ;
231
232 rxBandwidth
233 .name(name() + ".rxBandwidth")
234 .desc("Receive Bandwidth (bits/s)")
235 .precision(0)
236 .prereq(rxBytes)
237 ;
238
239 totBandwidth
240 .name(name() + ".totBandwidth")
241 .desc("Total Bandwidth (bits/s)")
242 .precision(0)
243 .prereq(totBytes)
244 ;
245
246 totPackets
247 .name(name() + ".totPackets")
248 .desc("Total Packets")
249 .precision(0)
250 .prereq(totBytes)
251 ;
252
253 totBytes
254 .name(name() + ".totBytes")
255 .desc("Total Bytes")
256 .precision(0)
257 .prereq(totBytes)
258 ;
259
260 totPacketRate
261 .name(name() + ".totPPS")
262 .desc("Total Tranmission Rate (packets/s)")
263 .precision(0)
264 .prereq(totBytes)
265 ;
266
267 txPacketRate
268 .name(name() + ".txPPS")
269 .desc("Packet Tranmission Rate (packets/s)")
270 .precision(0)
271 .prereq(txBytes)
272 ;
273
274 rxPacketRate
275 .name(name() + ".rxPPS")
276 .desc("Packet Reception Rate (packets/s)")
277 .precision(0)
278 .prereq(rxBytes)
279 ;
280
281 postedSwi
282 .name(name() + ".postedSwi")
283 .desc("number of software interrupts posted to CPU")
284 .precision(0)
285 ;
286
287 totalSwi
288 .name(name() + ".totalSwi")
289 .desc("total number of Swi written to ISR")
290 .precision(0)
291 ;
292
293 coalescedSwi
294 .name(name() + ".coalescedSwi")
295 .desc("average number of Swi's coalesced into each post")
296 .precision(0)
297 ;
298
299 postedRxIdle
300 .name(name() + ".postedRxIdle")
301 .desc("number of rxIdle interrupts posted to CPU")
302 .precision(0)
303 ;
304
305 totalRxIdle
306 .name(name() + ".totalRxIdle")
307 .desc("total number of RxIdle written to ISR")
308 .precision(0)
309 ;
310
311 coalescedRxIdle
312 .name(name() + ".coalescedRxIdle")
313 .desc("average number of RxIdle's coalesced into each post")
314 .precision(0)
315 ;
316
317 postedRxOk
318 .name(name() + ".postedRxOk")
319 .desc("number of RxOk interrupts posted to CPU")
320 .precision(0)
321 ;
322
323 totalRxOk
324 .name(name() + ".totalRxOk")
325 .desc("total number of RxOk written to ISR")
326 .precision(0)
327 ;
328
329 coalescedRxOk
330 .name(name() + ".coalescedRxOk")
331 .desc("average number of RxOk's coalesced into each post")
332 .precision(0)
333 ;
334
335 postedRxDesc
336 .name(name() + ".postedRxDesc")
337 .desc("number of RxDesc interrupts posted to CPU")
338 .precision(0)
339 ;
340
341 totalRxDesc
342 .name(name() + ".totalRxDesc")
343 .desc("total number of RxDesc written to ISR")
344 .precision(0)
345 ;
346
347 coalescedRxDesc
348 .name(name() + ".coalescedRxDesc")
349 .desc("average number of RxDesc's coalesced into each post")
350 .precision(0)
351 ;
352
353 postedTxOk
354 .name(name() + ".postedTxOk")
355 .desc("number of TxOk interrupts posted to CPU")
356 .precision(0)
357 ;
358
359 totalTxOk
360 .name(name() + ".totalTxOk")
361 .desc("total number of TxOk written to ISR")
362 .precision(0)
363 ;
364
365 coalescedTxOk
366 .name(name() + ".coalescedTxOk")
367 .desc("average number of TxOk's coalesced into each post")
368 .precision(0)
369 ;
370
371 postedTxIdle
372 .name(name() + ".postedTxIdle")
373 .desc("number of TxIdle interrupts posted to CPU")
374 .precision(0)
375 ;
376
377 totalTxIdle
378 .name(name() + ".totalTxIdle")
379 .desc("total number of TxIdle written to ISR")
380 .precision(0)
381 ;
382
383 coalescedTxIdle
384 .name(name() + ".coalescedTxIdle")
385 .desc("average number of TxIdle's coalesced into each post")
386 .precision(0)
387 ;
388
389 postedTxDesc
390 .name(name() + ".postedTxDesc")
391 .desc("number of TxDesc interrupts posted to CPU")
392 .precision(0)
393 ;
394
395 totalTxDesc
396 .name(name() + ".totalTxDesc")
397 .desc("total number of TxDesc written to ISR")
398 .precision(0)
399 ;
400
401 coalescedTxDesc
402 .name(name() + ".coalescedTxDesc")
403 .desc("average number of TxDesc's coalesced into each post")
404 .precision(0)
405 ;
406
407 postedRxOrn
408 .name(name() + ".postedRxOrn")
409 .desc("number of RxOrn posted to CPU")
410 .precision(0)
411 ;
412
413 totalRxOrn
414 .name(name() + ".totalRxOrn")
415 .desc("total number of RxOrn written to ISR")
416 .precision(0)
417 ;
418
419 coalescedRxOrn
420 .name(name() + ".coalescedRxOrn")
421 .desc("average number of RxOrn's coalesced into each post")
422 .precision(0)
423 ;
424
425 coalescedTotal
426 .name(name() + ".coalescedTotal")
427 .desc("average number of interrupts coalesced into each post")
428 .precision(0)
429 ;
430
431 postedInterrupts
432 .name(name() + ".postedInterrupts")
433 .desc("number of posts to CPU")
434 .precision(0)
435 ;
436
437 droppedPackets
438 .name(name() + ".droppedPackets")
439 .desc("number of packets dropped")
440 .precision(0)
441 ;
442
443 coalescedSwi = totalSwi / postedInterrupts;
444 coalescedRxIdle = totalRxIdle / postedInterrupts;
445 coalescedRxOk = totalRxOk / postedInterrupts;
446 coalescedRxDesc = totalRxDesc / postedInterrupts;
447 coalescedTxOk = totalTxOk / postedInterrupts;
448 coalescedTxIdle = totalTxIdle / postedInterrupts;
449 coalescedTxDesc = totalTxDesc / postedInterrupts;
450 coalescedRxOrn = totalRxOrn / postedInterrupts;
451
452 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
453 totalTxOk + totalTxIdle + totalTxDesc +
454 totalRxOrn) / postedInterrupts;
455
456 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
457 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
458 totBandwidth = txBandwidth + rxBandwidth;
459 totBytes = txBytes + rxBytes;
460 totPackets = txPackets + rxPackets;
461
462 txPacketRate = txPackets / simSeconds;
463 rxPacketRate = rxPackets / simSeconds;
464 }
465
466
467 /**
468 * This is to write to the PCI general configuration registers
469 */
470 Tick
471 NSGigE::writeConfig(PacketPtr pkt)
472 {
473 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
474 if (offset < PCI_DEVICE_SPECIFIC)
475 PciDev::writeConfig(pkt);
476 else
477 panic("Device specific PCI config space not implemented!\n");
478
479 switch (offset) {
480 // seems to work fine without all these PCI settings, but i
481 // put in the IO to double check, an assertion will fail if we
482 // need to properly implement it
483 case PCI_COMMAND:
484 if (config.data[offset] & PCI_CMD_IOSE)
485 ioEnable = true;
486 else
487 ioEnable = false;
488 break;
489 }
490
491 return configDelay;
492 }
493
494 /**
495 * This reads the device registers, which are detailed in the NS83820
496 * spec sheet
497 */
498 Tick
499 NSGigE::read(PacketPtr pkt)
500 {
501 assert(ioEnable);
502
503 pkt->allocate();
504
505 //The mask is to give you only the offset into the device register file
506 Addr daddr = pkt->getAddr() & 0xfff;
507 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
508 daddr, pkt->getAddr(), pkt->getSize());
509
510
511 // there are some reserved registers, you can see ns_gige_reg.h and
512 // the spec sheet for details
513 if (daddr > LAST && daddr <= RESERVED) {
514 panic("Accessing reserved register");
515 } else if (daddr > RESERVED && daddr <= 0x3FC) {
516 return readConfig(pkt);
517 } else if (daddr >= MIB_START && daddr <= MIB_END) {
518 // don't implement all the MIB's. hopefully the kernel
519 // doesn't actually DEPEND upon their values
520 // MIB are just hardware stats keepers
521 pkt->set<uint32_t>(0);
522 pkt->makeAtomicResponse();
523 return pioDelay;
524 } else if (daddr > 0x3FC)
525 panic("Something is messed up!\n");
526
527 assert(pkt->getSize() == sizeof(uint32_t));
528 uint32_t &reg = *pkt->getPtr<uint32_t>();
529 uint16_t rfaddr;
530
531 switch (daddr) {
532 case CR:
533 reg = regs.command;
534 //these are supposed to be cleared on a read
535 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
536 break;
537
538 case CFGR:
539 reg = regs.config;
540 break;
541
542 case MEAR:
543 reg = regs.mear;
544 break;
545
546 case PTSCR:
547 reg = regs.ptscr;
548 break;
549
550 case ISR:
551 reg = regs.isr;
552 devIntrClear(ISR_ALL);
553 break;
554
555 case IMR:
556 reg = regs.imr;
557 break;
558
559 case IER:
560 reg = regs.ier;
561 break;
562
563 case IHR:
564 reg = regs.ihr;
565 break;
566
567 case TXDP:
568 reg = regs.txdp;
569 break;
570
571 case TXDP_HI:
572 reg = regs.txdp_hi;
573 break;
574
575 case TX_CFG:
576 reg = regs.txcfg;
577 break;
578
579 case GPIOR:
580 reg = regs.gpior;
581 break;
582
583 case RXDP:
584 reg = regs.rxdp;
585 break;
586
587 case RXDP_HI:
588 reg = regs.rxdp_hi;
589 break;
590
591 case RX_CFG:
592 reg = regs.rxcfg;
593 break;
594
595 case PQCR:
596 reg = regs.pqcr;
597 break;
598
599 case WCSR:
600 reg = regs.wcsr;
601 break;
602
603 case PCR:
604 reg = regs.pcr;
605 break;
606
607 // see the spec sheet for how RFCR and RFDR work
608 // basically, you write to RFCR to tell the machine
609 // what you want to do next, then you act upon RFDR,
610 // and the device will be prepared b/c of what you
611 // wrote to RFCR
612 case RFCR:
613 reg = regs.rfcr;
614 break;
615
616 case RFDR:
617 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
618 switch (rfaddr) {
619 // Read from perfect match ROM octets
620 case 0x000:
621 reg = rom.perfectMatch[1];
622 reg = reg << 8;
623 reg += rom.perfectMatch[0];
624 break;
625 case 0x002:
626 reg = rom.perfectMatch[3] << 8;
627 reg += rom.perfectMatch[2];
628 break;
629 case 0x004:
630 reg = rom.perfectMatch[5] << 8;
631 reg += rom.perfectMatch[4];
632 break;
633 default:
634 // Read filter hash table
635 if (rfaddr >= FHASH_ADDR &&
636 rfaddr < FHASH_ADDR + FHASH_SIZE) {
637
638 // Only word-aligned reads supported
639 if (rfaddr % 2)
640 panic("unaligned read from filter hash table!");
641
642 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
643 reg += rom.filterHash[rfaddr - FHASH_ADDR];
644 break;
645 }
646
647 panic("reading RFDR for something other than pattern"
648 " matching or hashing! %#x\n", rfaddr);
649 }
650 break;
651
652 case SRR:
653 reg = regs.srr;
654 break;
655
656 case MIBC:
657 reg = regs.mibc;
658 reg &= ~(MIBC_MIBS | MIBC_ACLR);
659 break;
660
661 case VRCR:
662 reg = regs.vrcr;
663 break;
664
665 case VTCR:
666 reg = regs.vtcr;
667 break;
668
669 case VDR:
670 reg = regs.vdr;
671 break;
672
673 case CCSR:
674 reg = regs.ccsr;
675 break;
676
677 case TBICR:
678 reg = regs.tbicr;
679 break;
680
681 case TBISR:
682 reg = regs.tbisr;
683 break;
684
685 case TANAR:
686 reg = regs.tanar;
687 break;
688
689 case TANLPAR:
690 reg = regs.tanlpar;
691 break;
692
693 case TANER:
694 reg = regs.taner;
695 break;
696
697 case TESR:
698 reg = regs.tesr;
699 break;
700
701 case M5REG:
702 reg = 0;
703 if (params()->rx_thread)
704 reg |= M5REG_RX_THREAD;
705 if (params()->tx_thread)
706 reg |= M5REG_TX_THREAD;
707 if (params()->rss)
708 reg |= M5REG_RSS;
709 break;
710
711 default:
712 panic("reading unimplemented register: addr=%#x", daddr);
713 }
714
715 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
716 daddr, reg, reg);
717
718 pkt->makeAtomicResponse();
719 return pioDelay;
720 }
721
722 Tick
723 NSGigE::write(PacketPtr pkt)
724 {
725 assert(ioEnable);
726
727 Addr daddr = pkt->getAddr() & 0xfff;
728 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
729 daddr, pkt->getAddr(), pkt->getSize());
730
731 if (daddr > LAST && daddr <= RESERVED) {
732 panic("Accessing reserved register");
733 } else if (daddr > RESERVED && daddr <= 0x3FC) {
734 return writeConfig(pkt);
735 } else if (daddr > 0x3FC)
736 panic("Something is messed up!\n");
737
738 if (pkt->getSize() == sizeof(uint32_t)) {
739 uint32_t reg = pkt->get<uint32_t>();
740 uint16_t rfaddr;
741
742 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
743
744 switch (daddr) {
745 case CR:
746 regs.command = reg;
747 if (reg & CR_TXD) {
748 txEnable = false;
749 } else if (reg & CR_TXE) {
750 txEnable = true;
751
752 // the kernel is enabling the transmit machine
753 if (txState == txIdle)
754 txKick();
755 }
756
757 if (reg & CR_RXD) {
758 rxEnable = false;
759 } else if (reg & CR_RXE) {
760 rxEnable = true;
761
762 if (rxState == rxIdle)
763 rxKick();
764 }
765
766 if (reg & CR_TXR)
767 txReset();
768
769 if (reg & CR_RXR)
770 rxReset();
771
772 if (reg & CR_SWI)
773 devIntrPost(ISR_SWI);
774
775 if (reg & CR_RST) {
776 txReset();
777 rxReset();
778
779 regsReset();
780 }
781 break;
782
783 case CFGR:
784 if (reg & CFGR_LNKSTS ||
785 reg & CFGR_SPDSTS ||
786 reg & CFGR_DUPSTS ||
787 reg & CFGR_RESERVED ||
788 reg & CFGR_T64ADDR ||
789 reg & CFGR_PCI64_DET)
790
791 // First clear all writable bits
792 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
793 CFGR_RESERVED | CFGR_T64ADDR |
794 CFGR_PCI64_DET;
795 // Now set the appropriate writable bits
796 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
797 CFGR_RESERVED | CFGR_T64ADDR |
798 CFGR_PCI64_DET);
799
800 // all these #if 0's are because i don't THINK the kernel needs to
801 // have these implemented. if there is a problem relating to one of
802 // these, you may need to add functionality in.
803 if (reg & CFGR_TBI_EN) ;
804 if (reg & CFGR_MODE_1000) ;
805
806 if (reg & CFGR_AUTO_1000)
807 panic("CFGR_AUTO_1000 not implemented!\n");
808
809 if (reg & CFGR_PINT_DUPSTS ||
810 reg & CFGR_PINT_LNKSTS ||
811 reg & CFGR_PINT_SPDSTS)
812 ;
813
814 if (reg & CFGR_TMRTEST) ;
815 if (reg & CFGR_MRM_DIS) ;
816 if (reg & CFGR_MWI_DIS) ;
817
818 if (reg & CFGR_T64ADDR) ;
819 // panic("CFGR_T64ADDR is read only register!\n");
820
821 if (reg & CFGR_PCI64_DET)
822 panic("CFGR_PCI64_DET is read only register!\n");
823
824 if (reg & CFGR_DATA64_EN) ;
825 if (reg & CFGR_M64ADDR) ;
826 if (reg & CFGR_PHY_RST) ;
827 if (reg & CFGR_PHY_DIS) ;
828
829 if (reg & CFGR_EXTSTS_EN)
830 extstsEnable = true;
831 else
832 extstsEnable = false;
833
834 if (reg & CFGR_REQALG) ;
835 if (reg & CFGR_SB) ;
836 if (reg & CFGR_POW) ;
837 if (reg & CFGR_EXD) ;
838 if (reg & CFGR_PESEL) ;
839 if (reg & CFGR_BROM_DIS) ;
840 if (reg & CFGR_EXT_125) ;
841 if (reg & CFGR_BEM) ;
842 break;
843
844 case MEAR:
845 // Clear writable bits
846 regs.mear &= MEAR_EEDO;
847 // Set appropriate writable bits
848 regs.mear |= reg & ~MEAR_EEDO;
849
850 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
851 // even though it could get it through RFDR
852 if (reg & MEAR_EESEL) {
853 // Rising edge of clock
854 if (reg & MEAR_EECLK && !eepromClk)
855 eepromKick();
856 }
857 else {
858 eepromState = eepromStart;
859 regs.mear &= ~MEAR_EEDI;
860 }
861
862 eepromClk = reg & MEAR_EECLK;
863
864 // since phy is completely faked, MEAR_MD* don't matter
865 if (reg & MEAR_MDIO) ;
866 if (reg & MEAR_MDDIR) ;
867 if (reg & MEAR_MDC) ;
868 break;
869
870 case PTSCR:
871 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
872 // these control BISTs for various parts of chip - we
873 // don't care or do just fake that the BIST is done
874 if (reg & PTSCR_RBIST_EN)
875 regs.ptscr |= PTSCR_RBIST_DONE;
876 if (reg & PTSCR_EEBIST_EN)
877 regs.ptscr &= ~PTSCR_EEBIST_EN;
878 if (reg & PTSCR_EELOAD_EN)
879 regs.ptscr &= ~PTSCR_EELOAD_EN;
880 break;
881
882 case ISR: /* writing to the ISR has no effect */
883 panic("ISR is a read only register!\n");
884
885 case IMR:
886 regs.imr = reg;
887 devIntrChangeMask();
888 break;
889
890 case IER:
891 regs.ier = reg;
892 break;
893
894 case IHR:
895 regs.ihr = reg;
896 /* not going to implement real interrupt holdoff */
897 break;
898
899 case TXDP:
900 regs.txdp = (reg & 0xFFFFFFFC);
901 assert(txState == txIdle);
902 CTDD = false;
903 break;
904
905 case TXDP_HI:
906 regs.txdp_hi = reg;
907 break;
908
909 case TX_CFG:
910 regs.txcfg = reg;
911 #if 0
912 if (reg & TX_CFG_CSI) ;
913 if (reg & TX_CFG_HBI) ;
914 if (reg & TX_CFG_MLB) ;
915 if (reg & TX_CFG_ATP) ;
916 if (reg & TX_CFG_ECRETRY) {
917 /*
918 * this could easily be implemented, but considering
919 * the network is just a fake pipe, wouldn't make
920 * sense to do this
921 */
922 }
923
924 if (reg & TX_CFG_BRST_DIS) ;
925 #endif
926
927 #if 0
928 /* we handle our own DMA, ignore the kernel's exhortations */
929 if (reg & TX_CFG_MXDMA) ;
930 #endif
931
932 // also, we currently don't care about fill/drain
933 // thresholds though this may change in the future with
934 // more realistic networks or a driver which changes it
935 // according to feedback
936
937 break;
938
939 case GPIOR:
940 // Only write writable bits
941 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
942 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
943 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
944 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
945 /* these just control general purpose i/o pins, don't matter */
946 break;
947
948 case RXDP:
949 regs.rxdp = reg;
950 CRDD = false;
951 break;
952
953 case RXDP_HI:
954 regs.rxdp_hi = reg;
955 break;
956
957 case RX_CFG:
958 regs.rxcfg = reg;
959 #if 0
960 if (reg & RX_CFG_AEP) ;
961 if (reg & RX_CFG_ARP) ;
962 if (reg & RX_CFG_STRIPCRC) ;
963 if (reg & RX_CFG_RX_RD) ;
964 if (reg & RX_CFG_ALP) ;
965 if (reg & RX_CFG_AIRL) ;
966
967 /* we handle our own DMA, ignore what kernel says about it */
968 if (reg & RX_CFG_MXDMA) ;
969
970 //also, we currently don't care about fill/drain thresholds
971 //though this may change in the future with more realistic
972 //networks or a driver which changes it according to feedback
973 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
974 #endif
975 break;
976
977 case PQCR:
978 /* there is no priority queueing used in the linux 2.6 driver */
979 regs.pqcr = reg;
980 break;
981
982 case WCSR:
983 /* not going to implement wake on LAN */
984 regs.wcsr = reg;
985 break;
986
987 case PCR:
988 /* not going to implement pause control */
989 regs.pcr = reg;
990 break;
991
992 case RFCR:
993 regs.rfcr = reg;
994
995 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
996 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
997 acceptMulticast = (reg & RFCR_AAM) ? true : false;
998 acceptUnicast = (reg & RFCR_AAU) ? true : false;
999 acceptPerfect = (reg & RFCR_APM) ? true : false;
1000 acceptArp = (reg & RFCR_AARP) ? true : false;
1001 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1002
1003 #if 0
1004 if (reg & RFCR_APAT)
1005 panic("RFCR_APAT not implemented!\n");
1006 #endif
1007 if (reg & RFCR_UHEN)
1008 panic("Unicast hash filtering not used by drivers!\n");
1009
1010 if (reg & RFCR_ULM)
1011 panic("RFCR_ULM not implemented!\n");
1012
1013 break;
1014
1015 case RFDR:
1016 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1017 switch (rfaddr) {
1018 case 0x000:
1019 rom.perfectMatch[0] = (uint8_t)reg;
1020 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1021 break;
1022 case 0x002:
1023 rom.perfectMatch[2] = (uint8_t)reg;
1024 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1025 break;
1026 case 0x004:
1027 rom.perfectMatch[4] = (uint8_t)reg;
1028 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1029 break;
1030 default:
1031
1032 if (rfaddr >= FHASH_ADDR &&
1033 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1034
1035 // Only word-aligned writes supported
1036 if (rfaddr % 2)
1037 panic("unaligned write to filter hash table!");
1038
1039 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1040 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1041 = (uint8_t)(reg >> 8);
1042 break;
1043 }
1044 panic("writing RFDR for something other than pattern matching\
1045 or hashing! %#x\n", rfaddr);
1046 }
1047
1048 case BRAR:
1049 regs.brar = reg;
1050 break;
1051
1052 case BRDR:
1053 panic("the driver never uses BRDR, something is wrong!\n");
1054
1055 case SRR:
1056 panic("SRR is read only register!\n");
1057
1058 case MIBC:
1059 panic("the driver never uses MIBC, something is wrong!\n");
1060
1061 case VRCR:
1062 regs.vrcr = reg;
1063 break;
1064
1065 case VTCR:
1066 regs.vtcr = reg;
1067 break;
1068
1069 case VDR:
1070 panic("the driver never uses VDR, something is wrong!\n");
1071
1072 case CCSR:
1073 /* not going to implement clockrun stuff */
1074 regs.ccsr = reg;
1075 break;
1076
1077 case TBICR:
1078 regs.tbicr = reg;
1079 if (reg & TBICR_MR_LOOPBACK)
1080 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1081
1082 if (reg & TBICR_MR_AN_ENABLE) {
1083 regs.tanlpar = regs.tanar;
1084 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1085 }
1086
1087 #if 0
1088 if (reg & TBICR_MR_RESTART_AN) ;
1089 #endif
1090
1091 break;
1092
1093 case TBISR:
1094 panic("TBISR is read only register!\n");
1095
1096 case TANAR:
1097 // Only write the writable bits
1098 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1099 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1100
1101 // Pause capability unimplemented
1102 #if 0
1103 if (reg & TANAR_PS2) ;
1104 if (reg & TANAR_PS1) ;
1105 #endif
1106
1107 break;
1108
1109 case TANLPAR:
1110 panic("this should only be written to by the fake phy!\n");
1111
1112 case TANER:
1113 panic("TANER is read only register!\n");
1114
1115 case TESR:
1116 regs.tesr = reg;
1117 break;
1118
1119 default:
1120 panic("invalid register access daddr=%#x", daddr);
1121 }
1122 } else {
1123 panic("Invalid Request Size");
1124 }
1125 pkt->makeAtomicResponse();
1126 return pioDelay;
1127 }
1128
1129 void
1130 NSGigE::devIntrPost(uint32_t interrupts)
1131 {
1132 if (interrupts & ISR_RESERVE)
1133 panic("Cannot set a reserved interrupt");
1134
1135 if (interrupts & ISR_NOIMPL)
1136 warn("interrupt not implemented %#x\n", interrupts);
1137
1138 interrupts &= ISR_IMPL;
1139 regs.isr |= interrupts;
1140
1141 if (interrupts & regs.imr) {
1142 if (interrupts & ISR_SWI) {
1143 totalSwi++;
1144 }
1145 if (interrupts & ISR_RXIDLE) {
1146 totalRxIdle++;
1147 }
1148 if (interrupts & ISR_RXOK) {
1149 totalRxOk++;
1150 }
1151 if (interrupts & ISR_RXDESC) {
1152 totalRxDesc++;
1153 }
1154 if (interrupts & ISR_TXOK) {
1155 totalTxOk++;
1156 }
1157 if (interrupts & ISR_TXIDLE) {
1158 totalTxIdle++;
1159 }
1160 if (interrupts & ISR_TXDESC) {
1161 totalTxDesc++;
1162 }
1163 if (interrupts & ISR_RXORN) {
1164 totalRxOrn++;
1165 }
1166 }
1167
1168 DPRINTF(EthernetIntr,
1169 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1170 interrupts, regs.isr, regs.imr);
1171
1172 if ((regs.isr & regs.imr)) {
1173 Tick when = curTick;
1174 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1175 when += intrDelay;
1176 cpuIntrPost(when);
1177 }
1178 }
1179
1180 /* writing this interrupt counting stats inside this means that this function
1181 is now limited to being used to clear all interrupts upon the kernel
1182 reading isr and servicing. just telling you in case you were thinking
1183 of expanding use.
1184 */
1185 void
1186 NSGigE::devIntrClear(uint32_t interrupts)
1187 {
1188 if (interrupts & ISR_RESERVE)
1189 panic("Cannot clear a reserved interrupt");
1190
1191 if (regs.isr & regs.imr & ISR_SWI) {
1192 postedSwi++;
1193 }
1194 if (regs.isr & regs.imr & ISR_RXIDLE) {
1195 postedRxIdle++;
1196 }
1197 if (regs.isr & regs.imr & ISR_RXOK) {
1198 postedRxOk++;
1199 }
1200 if (regs.isr & regs.imr & ISR_RXDESC) {
1201 postedRxDesc++;
1202 }
1203 if (regs.isr & regs.imr & ISR_TXOK) {
1204 postedTxOk++;
1205 }
1206 if (regs.isr & regs.imr & ISR_TXIDLE) {
1207 postedTxIdle++;
1208 }
1209 if (regs.isr & regs.imr & ISR_TXDESC) {
1210 postedTxDesc++;
1211 }
1212 if (regs.isr & regs.imr & ISR_RXORN) {
1213 postedRxOrn++;
1214 }
1215
1216 if (regs.isr & regs.imr & ISR_IMPL)
1217 postedInterrupts++;
1218
1219 interrupts &= ~ISR_NOIMPL;
1220 regs.isr &= ~interrupts;
1221
1222 DPRINTF(EthernetIntr,
1223 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1224 interrupts, regs.isr, regs.imr);
1225
1226 if (!(regs.isr & regs.imr))
1227 cpuIntrClear();
1228 }
1229
1230 void
1231 NSGigE::devIntrChangeMask()
1232 {
1233 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1234 regs.isr, regs.imr, regs.isr & regs.imr);
1235
1236 if (regs.isr & regs.imr)
1237 cpuIntrPost(curTick);
1238 else
1239 cpuIntrClear();
1240 }
1241
1242 void
1243 NSGigE::cpuIntrPost(Tick when)
1244 {
1245 // If the interrupt you want to post is later than an interrupt
1246 // already scheduled, just let it post in the coming one and don't
1247 // schedule another.
1248 // HOWEVER, must be sure that the scheduled intrTick is in the
1249 // future (this was formerly the source of a bug)
1250 /**
1251 * @todo this warning should be removed and the intrTick code should
1252 * be fixed.
1253 */
1254 assert(when >= curTick);
1255 assert(intrTick >= curTick || intrTick == 0);
1256 if (when > intrTick && intrTick != 0) {
1257 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1258 intrTick);
1259 return;
1260 }
1261
1262 intrTick = when;
1263 if (intrTick < curTick) {
1264 debug_break();
1265 intrTick = curTick;
1266 }
1267
1268 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1269 intrTick);
1270
1271 if (intrEvent)
1272 intrEvent->squash();
1273 intrEvent = new IntrEvent(this, intrTick, true);
1274 }
1275
1276 void
1277 NSGigE::cpuInterrupt()
1278 {
1279 assert(intrTick == curTick);
1280
1281 // Whether or not there's a pending interrupt, we don't care about
1282 // it anymore
1283 intrEvent = 0;
1284 intrTick = 0;
1285
1286 // Don't send an interrupt if there's already one
1287 if (cpuPendingIntr) {
1288 DPRINTF(EthernetIntr,
1289 "would send an interrupt now, but there's already pending\n");
1290 } else {
1291 // Send interrupt
1292 cpuPendingIntr = true;
1293
1294 DPRINTF(EthernetIntr, "posting interrupt\n");
1295 intrPost();
1296 }
1297 }
1298
1299 void
1300 NSGigE::cpuIntrClear()
1301 {
1302 if (!cpuPendingIntr)
1303 return;
1304
1305 if (intrEvent) {
1306 intrEvent->squash();
1307 intrEvent = 0;
1308 }
1309
1310 intrTick = 0;
1311
1312 cpuPendingIntr = false;
1313
1314 DPRINTF(EthernetIntr, "clearing interrupt\n");
1315 intrClear();
1316 }
1317
1318 bool
1319 NSGigE::cpuIntrPending() const
1320 { return cpuPendingIntr; }
1321
1322 void
1323 NSGigE::txReset()
1324 {
1325
1326 DPRINTF(Ethernet, "transmit reset\n");
1327
1328 CTDD = false;
1329 txEnable = false;;
1330 txFragPtr = 0;
1331 assert(txDescCnt == 0);
1332 txFifo.clear();
1333 txState = txIdle;
1334 assert(txDmaState == dmaIdle);
1335 }
1336
1337 void
1338 NSGigE::rxReset()
1339 {
1340 DPRINTF(Ethernet, "receive reset\n");
1341
1342 CRDD = false;
1343 assert(rxPktBytes == 0);
1344 rxEnable = false;
1345 rxFragPtr = 0;
1346 assert(rxDescCnt == 0);
1347 assert(rxDmaState == dmaIdle);
1348 rxFifo.clear();
1349 rxState = rxIdle;
1350 }
1351
1352 void
1353 NSGigE::regsReset()
1354 {
1355 memset(&regs, 0, sizeof(regs));
1356 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1357 regs.mear = 0x12;
1358 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1359 // fill threshold to 32 bytes
1360 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1361 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1362 regs.mibc = MIBC_FRZ;
1363 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1364 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1365 regs.brar = 0xffffffff;
1366
1367 extstsEnable = false;
1368 acceptBroadcast = false;
1369 acceptMulticast = false;
1370 acceptUnicast = false;
1371 acceptPerfect = false;
1372 acceptArp = false;
1373 }
1374
1375 bool
1376 NSGigE::doRxDmaRead()
1377 {
1378 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1379 rxDmaState = dmaReading;
1380
1381 if (dmaPending() || getState() != Running)
1382 rxDmaState = dmaReadWaiting;
1383 else
1384 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1385
1386 return true;
1387 }
1388
1389 void
1390 NSGigE::rxDmaReadDone()
1391 {
1392 assert(rxDmaState == dmaReading);
1393 rxDmaState = dmaIdle;
1394
1395 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1396 rxDmaAddr, rxDmaLen);
1397 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1398
1399 // If the transmit state machine has a pending DMA, let it go first
1400 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1401 txKick();
1402
1403 rxKick();
1404 }
1405
1406 bool
1407 NSGigE::doRxDmaWrite()
1408 {
1409 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1410 rxDmaState = dmaWriting;
1411
1412 if (dmaPending() || getState() != Running)
1413 rxDmaState = dmaWriteWaiting;
1414 else
1415 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1416 return true;
1417 }
1418
1419 void
1420 NSGigE::rxDmaWriteDone()
1421 {
1422 assert(rxDmaState == dmaWriting);
1423 rxDmaState = dmaIdle;
1424
1425 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1426 rxDmaAddr, rxDmaLen);
1427 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1428
1429 // If the transmit state machine has a pending DMA, let it go first
1430 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1431 txKick();
1432
1433 rxKick();
1434 }
1435
1436 void
1437 NSGigE::rxKick()
1438 {
1439 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1440
1441 DPRINTF(EthernetSM,
1442 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1443 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1444
1445 Addr link, bufptr;
1446 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1447 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1448
1449 next:
1450 if (clock) {
1451 if (rxKickTick > curTick) {
1452 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1453 rxKickTick);
1454
1455 goto exit;
1456 }
1457
1458 // Go to the next state machine clock tick.
1459 rxKickTick = curTick + cycles(1);
1460 }
1461
1462 switch(rxDmaState) {
1463 case dmaReadWaiting:
1464 if (doRxDmaRead())
1465 goto exit;
1466 break;
1467 case dmaWriteWaiting:
1468 if (doRxDmaWrite())
1469 goto exit;
1470 break;
1471 default:
1472 break;
1473 }
1474
1475 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1476 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1477
1478 // see state machine from spec for details
1479 // the way this works is, if you finish work on one state and can
1480 // go directly to another, you do that through jumping to the
1481 // label "next". however, if you have intermediate work, like DMA
1482 // so that you can't go to the next state yet, you go to exit and
1483 // exit the loop. however, when the DMA is done it will trigger
1484 // an event and come back to this loop.
1485 switch (rxState) {
1486 case rxIdle:
1487 if (!rxEnable) {
1488 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1489 goto exit;
1490 }
1491
1492 if (CRDD) {
1493 rxState = rxDescRefr;
1494
1495 rxDmaAddr = regs.rxdp & 0x3fffffff;
1496 rxDmaData =
1497 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1498 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1499 rxDmaFree = dmaDescFree;
1500
1501 descDmaReads++;
1502 descDmaRdBytes += rxDmaLen;
1503
1504 if (doRxDmaRead())
1505 goto exit;
1506 } else {
1507 rxState = rxDescRead;
1508
1509 rxDmaAddr = regs.rxdp & 0x3fffffff;
1510 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1511 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1512 rxDmaFree = dmaDescFree;
1513
1514 descDmaReads++;
1515 descDmaRdBytes += rxDmaLen;
1516
1517 if (doRxDmaRead())
1518 goto exit;
1519 }
1520 break;
1521
1522 case rxDescRefr:
1523 if (rxDmaState != dmaIdle)
1524 goto exit;
1525
1526 rxState = rxAdvance;
1527 break;
1528
1529 case rxDescRead:
1530 if (rxDmaState != dmaIdle)
1531 goto exit;
1532
1533 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1534 regs.rxdp & 0x3fffffff);
1535 DPRINTF(EthernetDesc,
1536 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1537 link, bufptr, cmdsts, extsts);
1538
1539 if (cmdsts & CMDSTS_OWN) {
1540 devIntrPost(ISR_RXIDLE);
1541 rxState = rxIdle;
1542 goto exit;
1543 } else {
1544 rxState = rxFifoBlock;
1545 rxFragPtr = bufptr;
1546 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1547 }
1548 break;
1549
1550 case rxFifoBlock:
1551 if (!rxPacket) {
1552 /**
1553 * @todo in reality, we should be able to start processing
1554 * the packet as it arrives, and not have to wait for the
1555 * full packet ot be in the receive fifo.
1556 */
1557 if (rxFifo.empty())
1558 goto exit;
1559
1560 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1561
1562 // If we don't have a packet, grab a new one from the fifo.
1563 rxPacket = rxFifo.front();
1564 rxPktBytes = rxPacket->length;
1565 rxPacketBufPtr = rxPacket->data;
1566
1567 #if TRACING_ON
1568 if (DTRACE(Ethernet)) {
1569 IpPtr ip(rxPacket);
1570 if (ip) {
1571 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1572 TcpPtr tcp(ip);
1573 if (tcp) {
1574 DPRINTF(Ethernet,
1575 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1576 tcp->sport(), tcp->dport(), tcp->seq(),
1577 tcp->ack());
1578 }
1579 }
1580 }
1581 #endif
1582
1583 // sanity check - i think the driver behaves like this
1584 assert(rxDescCnt >= rxPktBytes);
1585 rxFifo.pop();
1586 }
1587
1588
1589 // dont' need the && rxDescCnt > 0 if driver sanity check
1590 // above holds
1591 if (rxPktBytes > 0) {
1592 rxState = rxFragWrite;
1593 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1594 // check holds
1595 rxXferLen = rxPktBytes;
1596
1597 rxDmaAddr = rxFragPtr & 0x3fffffff;
1598 rxDmaData = rxPacketBufPtr;
1599 rxDmaLen = rxXferLen;
1600 rxDmaFree = dmaDataFree;
1601
1602 if (doRxDmaWrite())
1603 goto exit;
1604
1605 } else {
1606 rxState = rxDescWrite;
1607
1608 //if (rxPktBytes == 0) { /* packet is done */
1609 assert(rxPktBytes == 0);
1610 DPRINTF(EthernetSM, "done with receiving packet\n");
1611
1612 cmdsts |= CMDSTS_OWN;
1613 cmdsts &= ~CMDSTS_MORE;
1614 cmdsts |= CMDSTS_OK;
1615 cmdsts &= 0xffff0000;
1616 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1617
1618 #if 0
1619 /*
1620 * all the driver uses these are for its own stats keeping
1621 * which we don't care about, aren't necessary for
1622 * functionality and doing this would just slow us down.
1623 * if they end up using this in a later version for
1624 * functional purposes, just undef
1625 */
1626 if (rxFilterEnable) {
1627 cmdsts &= ~CMDSTS_DEST_MASK;
1628 const EthAddr &dst = rxFifoFront()->dst();
1629 if (dst->unicast())
1630 cmdsts |= CMDSTS_DEST_SELF;
1631 if (dst->multicast())
1632 cmdsts |= CMDSTS_DEST_MULTI;
1633 if (dst->broadcast())
1634 cmdsts |= CMDSTS_DEST_MASK;
1635 }
1636 #endif
1637
1638 IpPtr ip(rxPacket);
1639 if (extstsEnable && ip) {
1640 extsts |= EXTSTS_IPPKT;
1641 rxIpChecksums++;
1642 if (cksum(ip) != 0) {
1643 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1644 extsts |= EXTSTS_IPERR;
1645 }
1646 TcpPtr tcp(ip);
1647 UdpPtr udp(ip);
1648 if (tcp) {
1649 extsts |= EXTSTS_TCPPKT;
1650 rxTcpChecksums++;
1651 if (cksum(tcp) != 0) {
1652 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1653 extsts |= EXTSTS_TCPERR;
1654
1655 }
1656 } else if (udp) {
1657 extsts |= EXTSTS_UDPPKT;
1658 rxUdpChecksums++;
1659 if (cksum(udp) != 0) {
1660 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1661 extsts |= EXTSTS_UDPERR;
1662 }
1663 }
1664 }
1665 rxPacket = 0;
1666
1667 /*
1668 * the driver seems to always receive into desc buffers
1669 * of size 1514, so you never have a pkt that is split
1670 * into multiple descriptors on the receive side, so
1671 * i don't implement that case, hence the assert above.
1672 */
1673
1674 DPRINTF(EthernetDesc,
1675 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1676 regs.rxdp & 0x3fffffff);
1677 DPRINTF(EthernetDesc,
1678 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1679 link, bufptr, cmdsts, extsts);
1680
1681 rxDmaAddr = regs.rxdp & 0x3fffffff;
1682 rxDmaData = &cmdsts;
1683 if (is64bit) {
1684 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1685 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1686 } else {
1687 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1688 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1689 }
1690 rxDmaFree = dmaDescFree;
1691
1692 descDmaWrites++;
1693 descDmaWrBytes += rxDmaLen;
1694
1695 if (doRxDmaWrite())
1696 goto exit;
1697 }
1698 break;
1699
1700 case rxFragWrite:
1701 if (rxDmaState != dmaIdle)
1702 goto exit;
1703
1704 rxPacketBufPtr += rxXferLen;
1705 rxFragPtr += rxXferLen;
1706 rxPktBytes -= rxXferLen;
1707
1708 rxState = rxFifoBlock;
1709 break;
1710
1711 case rxDescWrite:
1712 if (rxDmaState != dmaIdle)
1713 goto exit;
1714
1715 assert(cmdsts & CMDSTS_OWN);
1716
1717 assert(rxPacket == 0);
1718 devIntrPost(ISR_RXOK);
1719
1720 if (cmdsts & CMDSTS_INTR)
1721 devIntrPost(ISR_RXDESC);
1722
1723 if (!rxEnable) {
1724 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1725 rxState = rxIdle;
1726 goto exit;
1727 } else
1728 rxState = rxAdvance;
1729 break;
1730
1731 case rxAdvance:
1732 if (link == 0) {
1733 devIntrPost(ISR_RXIDLE);
1734 rxState = rxIdle;
1735 CRDD = true;
1736 goto exit;
1737 } else {
1738 if (rxDmaState != dmaIdle)
1739 goto exit;
1740 rxState = rxDescRead;
1741 regs.rxdp = link;
1742 CRDD = false;
1743
1744 rxDmaAddr = regs.rxdp & 0x3fffffff;
1745 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1746 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1747 rxDmaFree = dmaDescFree;
1748
1749 if (doRxDmaRead())
1750 goto exit;
1751 }
1752 break;
1753
1754 default:
1755 panic("Invalid rxState!");
1756 }
1757
1758 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1759 NsRxStateStrings[rxState]);
1760 goto next;
1761
1762 exit:
1763 /**
1764 * @todo do we want to schedule a future kick?
1765 */
1766 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1767 NsRxStateStrings[rxState]);
1768
1769 if (clock && !rxKickEvent.scheduled())
1770 rxKickEvent.schedule(rxKickTick);
1771 }
1772
1773 void
1774 NSGigE::transmit()
1775 {
1776 if (txFifo.empty()) {
1777 DPRINTF(Ethernet, "nothing to transmit\n");
1778 return;
1779 }
1780
1781 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1782 txFifo.size());
1783 if (interface->sendPacket(txFifo.front())) {
1784 #if TRACING_ON
1785 if (DTRACE(Ethernet)) {
1786 IpPtr ip(txFifo.front());
1787 if (ip) {
1788 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1789 TcpPtr tcp(ip);
1790 if (tcp) {
1791 DPRINTF(Ethernet,
1792 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1793 tcp->sport(), tcp->dport(), tcp->seq(),
1794 tcp->ack());
1795 }
1796 }
1797 }
1798 #endif
1799
1800 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1801 txBytes += txFifo.front()->length;
1802 txPackets++;
1803
1804 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1805 txFifo.avail());
1806 txFifo.pop();
1807
1808 /*
1809 * normally do a writeback of the descriptor here, and ONLY
1810 * after that is done, send this interrupt. but since our
1811 * stuff never actually fails, just do this interrupt here,
1812 * otherwise the code has to stray from this nice format.
1813 * besides, it's functionally the same.
1814 */
1815 devIntrPost(ISR_TXOK);
1816 }
1817
1818 if (!txFifo.empty() && !txEvent.scheduled()) {
1819 DPRINTF(Ethernet, "reschedule transmit\n");
1820 txEvent.schedule(curTick + retryTime);
1821 }
1822 }
1823
1824 bool
1825 NSGigE::doTxDmaRead()
1826 {
1827 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1828 txDmaState = dmaReading;
1829
1830 if (dmaPending() || getState() != Running)
1831 txDmaState = dmaReadWaiting;
1832 else
1833 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1834
1835 return true;
1836 }
1837
1838 void
1839 NSGigE::txDmaReadDone()
1840 {
1841 assert(txDmaState == dmaReading);
1842 txDmaState = dmaIdle;
1843
1844 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1845 txDmaAddr, txDmaLen);
1846 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1847
1848 // If the receive state machine has a pending DMA, let it go first
1849 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1850 rxKick();
1851
1852 txKick();
1853 }
1854
1855 bool
1856 NSGigE::doTxDmaWrite()
1857 {
1858 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1859 txDmaState = dmaWriting;
1860
1861 if (dmaPending() || getState() != Running)
1862 txDmaState = dmaWriteWaiting;
1863 else
1864 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1865 return true;
1866 }
1867
1868 void
1869 NSGigE::txDmaWriteDone()
1870 {
1871 assert(txDmaState == dmaWriting);
1872 txDmaState = dmaIdle;
1873
1874 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1875 txDmaAddr, txDmaLen);
1876 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1877
1878 // If the receive state machine has a pending DMA, let it go first
1879 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1880 rxKick();
1881
1882 txKick();
1883 }
1884
1885 void
1886 NSGigE::txKick()
1887 {
1888 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1889
1890 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1891 NsTxStateStrings[txState], is64bit ? 64 : 32);
1892
1893 Addr link, bufptr;
1894 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1895 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1896
1897 next:
1898 if (clock) {
1899 if (txKickTick > curTick) {
1900 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1901 txKickTick);
1902 goto exit;
1903 }
1904
1905 // Go to the next state machine clock tick.
1906 txKickTick = curTick + cycles(1);
1907 }
1908
1909 switch(txDmaState) {
1910 case dmaReadWaiting:
1911 if (doTxDmaRead())
1912 goto exit;
1913 break;
1914 case dmaWriteWaiting:
1915 if (doTxDmaWrite())
1916 goto exit;
1917 break;
1918 default:
1919 break;
1920 }
1921
1922 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1923 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1924 switch (txState) {
1925 case txIdle:
1926 if (!txEnable) {
1927 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1928 goto exit;
1929 }
1930
1931 if (CTDD) {
1932 txState = txDescRefr;
1933
1934 txDmaAddr = regs.txdp & 0x3fffffff;
1935 txDmaData =
1936 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1937 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1938 txDmaFree = dmaDescFree;
1939
1940 descDmaReads++;
1941 descDmaRdBytes += txDmaLen;
1942
1943 if (doTxDmaRead())
1944 goto exit;
1945
1946 } else {
1947 txState = txDescRead;
1948
1949 txDmaAddr = regs.txdp & 0x3fffffff;
1950 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1951 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1952 txDmaFree = dmaDescFree;
1953
1954 descDmaReads++;
1955 descDmaRdBytes += txDmaLen;
1956
1957 if (doTxDmaRead())
1958 goto exit;
1959 }
1960 break;
1961
1962 case txDescRefr:
1963 if (txDmaState != dmaIdle)
1964 goto exit;
1965
1966 txState = txAdvance;
1967 break;
1968
1969 case txDescRead:
1970 if (txDmaState != dmaIdle)
1971 goto exit;
1972
1973 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1974 regs.txdp & 0x3fffffff);
1975 DPRINTF(EthernetDesc,
1976 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1977 link, bufptr, cmdsts, extsts);
1978
1979 if (cmdsts & CMDSTS_OWN) {
1980 txState = txFifoBlock;
1981 txFragPtr = bufptr;
1982 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1983 } else {
1984 devIntrPost(ISR_TXIDLE);
1985 txState = txIdle;
1986 goto exit;
1987 }
1988 break;
1989
1990 case txFifoBlock:
1991 if (!txPacket) {
1992 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1993 txPacket = new EthPacketData(16384);
1994 txPacketBufPtr = txPacket->data;
1995 }
1996
1997 if (txDescCnt == 0) {
1998 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1999 if (cmdsts & CMDSTS_MORE) {
2000 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2001 txState = txDescWrite;
2002
2003 cmdsts &= ~CMDSTS_OWN;
2004
2005 txDmaAddr = regs.txdp & 0x3fffffff;
2006 txDmaData = &cmdsts;
2007 if (is64bit) {
2008 txDmaAddr += offsetof(ns_desc64, cmdsts);
2009 txDmaLen = sizeof(txDesc64.cmdsts);
2010 } else {
2011 txDmaAddr += offsetof(ns_desc32, cmdsts);
2012 txDmaLen = sizeof(txDesc32.cmdsts);
2013 }
2014 txDmaFree = dmaDescFree;
2015
2016 if (doTxDmaWrite())
2017 goto exit;
2018
2019 } else { /* this packet is totally done */
2020 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2021 /* deal with the the packet that just finished */
2022 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2023 IpPtr ip(txPacket);
2024 if (extsts & EXTSTS_UDPPKT) {
2025 UdpPtr udp(ip);
2026 udp->sum(0);
2027 udp->sum(cksum(udp));
2028 txUdpChecksums++;
2029 } else if (extsts & EXTSTS_TCPPKT) {
2030 TcpPtr tcp(ip);
2031 tcp->sum(0);
2032 tcp->sum(cksum(tcp));
2033 txTcpChecksums++;
2034 }
2035 if (extsts & EXTSTS_IPPKT) {
2036 ip->sum(0);
2037 ip->sum(cksum(ip));
2038 txIpChecksums++;
2039 }
2040 }
2041
2042 txPacket->length = txPacketBufPtr - txPacket->data;
2043 // this is just because the receive can't handle a
2044 // packet bigger want to make sure
2045 if (txPacket->length > 1514)
2046 panic("transmit packet too large, %s > 1514\n",
2047 txPacket->length);
2048
2049 #ifndef NDEBUG
2050 bool success =
2051 #endif
2052 txFifo.push(txPacket);
2053 assert(success);
2054
2055 /*
2056 * this following section is not tqo spec, but
2057 * functionally shouldn't be any different. normally,
2058 * the chip will wait til the transmit has occurred
2059 * before writing back the descriptor because it has
2060 * to wait to see that it was successfully transmitted
2061 * to decide whether to set CMDSTS_OK or not.
2062 * however, in the simulator since it is always
2063 * successfully transmitted, and writing it exactly to
2064 * spec would complicate the code, we just do it here
2065 */
2066
2067 cmdsts &= ~CMDSTS_OWN;
2068 cmdsts |= CMDSTS_OK;
2069
2070 DPRINTF(EthernetDesc,
2071 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2072 cmdsts, extsts);
2073
2074 txDmaFree = dmaDescFree;
2075 txDmaAddr = regs.txdp & 0x3fffffff;
2076 txDmaData = &cmdsts;
2077 if (is64bit) {
2078 txDmaAddr += offsetof(ns_desc64, cmdsts);
2079 txDmaLen =
2080 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2081 } else {
2082 txDmaAddr += offsetof(ns_desc32, cmdsts);
2083 txDmaLen =
2084 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2085 }
2086
2087 descDmaWrites++;
2088 descDmaWrBytes += txDmaLen;
2089
2090 transmit();
2091 txPacket = 0;
2092
2093 if (!txEnable) {
2094 DPRINTF(EthernetSM, "halting TX state machine\n");
2095 txState = txIdle;
2096 goto exit;
2097 } else
2098 txState = txAdvance;
2099
2100 if (doTxDmaWrite())
2101 goto exit;
2102 }
2103 } else {
2104 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2105 if (!txFifo.full()) {
2106 txState = txFragRead;
2107
2108 /*
2109 * The number of bytes transferred is either whatever
2110 * is left in the descriptor (txDescCnt), or if there
2111 * is not enough room in the fifo, just whatever room
2112 * is left in the fifo
2113 */
2114 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2115
2116 txDmaAddr = txFragPtr & 0x3fffffff;
2117 txDmaData = txPacketBufPtr;
2118 txDmaLen = txXferLen;
2119 txDmaFree = dmaDataFree;
2120
2121 if (doTxDmaRead())
2122 goto exit;
2123 } else {
2124 txState = txFifoBlock;
2125 transmit();
2126
2127 goto exit;
2128 }
2129
2130 }
2131 break;
2132
2133 case txFragRead:
2134 if (txDmaState != dmaIdle)
2135 goto exit;
2136
2137 txPacketBufPtr += txXferLen;
2138 txFragPtr += txXferLen;
2139 txDescCnt -= txXferLen;
2140 txFifo.reserve(txXferLen);
2141
2142 txState = txFifoBlock;
2143 break;
2144
2145 case txDescWrite:
2146 if (txDmaState != dmaIdle)
2147 goto exit;
2148
2149 if (cmdsts & CMDSTS_INTR)
2150 devIntrPost(ISR_TXDESC);
2151
2152 if (!txEnable) {
2153 DPRINTF(EthernetSM, "halting TX state machine\n");
2154 txState = txIdle;
2155 goto exit;
2156 } else
2157 txState = txAdvance;
2158 break;
2159
2160 case txAdvance:
2161 if (link == 0) {
2162 devIntrPost(ISR_TXIDLE);
2163 txState = txIdle;
2164 goto exit;
2165 } else {
2166 if (txDmaState != dmaIdle)
2167 goto exit;
2168 txState = txDescRead;
2169 regs.txdp = link;
2170 CTDD = false;
2171
2172 txDmaAddr = link & 0x3fffffff;
2173 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2174 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2175 txDmaFree = dmaDescFree;
2176
2177 if (doTxDmaRead())
2178 goto exit;
2179 }
2180 break;
2181
2182 default:
2183 panic("invalid state");
2184 }
2185
2186 DPRINTF(EthernetSM, "entering next txState=%s\n",
2187 NsTxStateStrings[txState]);
2188 goto next;
2189
2190 exit:
2191 /**
2192 * @todo do we want to schedule a future kick?
2193 */
2194 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2195 NsTxStateStrings[txState]);
2196
2197 if (clock && !txKickEvent.scheduled())
2198 txKickEvent.schedule(txKickTick);
2199 }
2200
2201 /**
2202 * Advance the EEPROM state machine
2203 * Called on rising edge of EEPROM clock bit in MEAR
2204 */
2205 void
2206 NSGigE::eepromKick()
2207 {
2208 switch (eepromState) {
2209
2210 case eepromStart:
2211
2212 // Wait for start bit
2213 if (regs.mear & MEAR_EEDI) {
2214 // Set up to get 2 opcode bits
2215 eepromState = eepromGetOpcode;
2216 eepromBitsToRx = 2;
2217 eepromOpcode = 0;
2218 }
2219 break;
2220
2221 case eepromGetOpcode:
2222 eepromOpcode <<= 1;
2223 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2224 --eepromBitsToRx;
2225
2226 // Done getting opcode
2227 if (eepromBitsToRx == 0) {
2228 if (eepromOpcode != EEPROM_READ)
2229 panic("only EEPROM reads are implemented!");
2230
2231 // Set up to get address
2232 eepromState = eepromGetAddress;
2233 eepromBitsToRx = 6;
2234 eepromAddress = 0;
2235 }
2236 break;
2237
2238 case eepromGetAddress:
2239 eepromAddress <<= 1;
2240 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2241 --eepromBitsToRx;
2242
2243 // Done getting address
2244 if (eepromBitsToRx == 0) {
2245
2246 if (eepromAddress >= EEPROM_SIZE)
2247 panic("EEPROM read access out of range!");
2248
2249 switch (eepromAddress) {
2250
2251 case EEPROM_PMATCH2_ADDR:
2252 eepromData = rom.perfectMatch[5];
2253 eepromData <<= 8;
2254 eepromData += rom.perfectMatch[4];
2255 break;
2256
2257 case EEPROM_PMATCH1_ADDR:
2258 eepromData = rom.perfectMatch[3];
2259 eepromData <<= 8;
2260 eepromData += rom.perfectMatch[2];
2261 break;
2262
2263 case EEPROM_PMATCH0_ADDR:
2264 eepromData = rom.perfectMatch[1];
2265 eepromData <<= 8;
2266 eepromData += rom.perfectMatch[0];
2267 break;
2268
2269 default:
2270 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2271 }
2272 // Set up to read data
2273 eepromState = eepromRead;
2274 eepromBitsToRx = 16;
2275
2276 // Clear data in bit
2277 regs.mear &= ~MEAR_EEDI;
2278 }
2279 break;
2280
2281 case eepromRead:
2282 // Clear Data Out bit
2283 regs.mear &= ~MEAR_EEDO;
2284 // Set bit to value of current EEPROM bit
2285 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2286
2287 eepromData <<= 1;
2288 --eepromBitsToRx;
2289
2290 // All done
2291 if (eepromBitsToRx == 0) {
2292 eepromState = eepromStart;
2293 }
2294 break;
2295
2296 default:
2297 panic("invalid EEPROM state");
2298 }
2299
2300 }
2301
2302 void
2303 NSGigE::transferDone()
2304 {
2305 if (txFifo.empty()) {
2306 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2307 return;
2308 }
2309
2310 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2311
2312 txEvent.reschedule(curTick + cycles(1), true);
2313 }
2314
2315 bool
2316 NSGigE::rxFilter(const EthPacketPtr &packet)
2317 {
2318 EthPtr eth = packet;
2319 bool drop = true;
2320 string type;
2321
2322 const EthAddr &dst = eth->dst();
2323 if (dst.unicast()) {
2324 // If we're accepting all unicast addresses
2325 if (acceptUnicast)
2326 drop = false;
2327
2328 // If we make a perfect match
2329 if (acceptPerfect && dst == rom.perfectMatch)
2330 drop = false;
2331
2332 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2333 drop = false;
2334
2335 } else if (dst.broadcast()) {
2336 // if we're accepting broadcasts
2337 if (acceptBroadcast)
2338 drop = false;
2339
2340 } else if (dst.multicast()) {
2341 // if we're accepting all multicasts
2342 if (acceptMulticast)
2343 drop = false;
2344
2345 // Multicast hashing faked - all packets accepted
2346 if (multicastHashEnable)
2347 drop = false;
2348 }
2349
2350 if (drop) {
2351 DPRINTF(Ethernet, "rxFilter drop\n");
2352 DDUMP(EthernetData, packet->data, packet->length);
2353 }
2354
2355 return drop;
2356 }
2357
2358 bool
2359 NSGigE::recvPacket(EthPacketPtr packet)
2360 {
2361 rxBytes += packet->length;
2362 rxPackets++;
2363
2364 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2365 rxFifo.avail());
2366
2367 if (!rxEnable) {
2368 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2369 return true;
2370 }
2371
2372 if (!rxFilterEnable) {
2373 DPRINTF(Ethernet,
2374 "receive packet filtering disabled . . . packet dropped\n");
2375 return true;
2376 }
2377
2378 if (rxFilter(packet)) {
2379 DPRINTF(Ethernet, "packet filtered...dropped\n");
2380 return true;
2381 }
2382
2383 if (rxFifo.avail() < packet->length) {
2384 #if TRACING_ON
2385 IpPtr ip(packet);
2386 TcpPtr tcp(ip);
2387 if (ip) {
2388 DPRINTF(Ethernet,
2389 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2390 ip->id());
2391 if (tcp) {
2392 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2393 }
2394 }
2395 #endif
2396 droppedPackets++;
2397 devIntrPost(ISR_RXORN);
2398 return false;
2399 }
2400
2401 rxFifo.push(packet);
2402
2403 rxKick();
2404 return true;
2405 }
2406
2407
2408 void
2409 NSGigE::resume()
2410 {
2411 SimObject::resume();
2412
2413 // During drain we could have left the state machines in a waiting state and
2414 // they wouldn't get out until some other event occured to kick them.
2415 // This way they'll get out immediately
2416 txKick();
2417 rxKick();
2418 }
2419
2420
2421 //=====================================================================
2422 //
2423 //
2424 void
2425 NSGigE::serialize(ostream &os)
2426 {
2427 // Serialize the PciDev base class
2428 PciDev::serialize(os);
2429
2430 /*
2431 * Finalize any DMA events now.
2432 */
2433 // @todo will mem system save pending dma?
2434
2435 /*
2436 * Serialize the device registers
2437 */
2438 SERIALIZE_SCALAR(regs.command);
2439 SERIALIZE_SCALAR(regs.config);
2440 SERIALIZE_SCALAR(regs.mear);
2441 SERIALIZE_SCALAR(regs.ptscr);
2442 SERIALIZE_SCALAR(regs.isr);
2443 SERIALIZE_SCALAR(regs.imr);
2444 SERIALIZE_SCALAR(regs.ier);
2445 SERIALIZE_SCALAR(regs.ihr);
2446 SERIALIZE_SCALAR(regs.txdp);
2447 SERIALIZE_SCALAR(regs.txdp_hi);
2448 SERIALIZE_SCALAR(regs.txcfg);
2449 SERIALIZE_SCALAR(regs.gpior);
2450 SERIALIZE_SCALAR(regs.rxdp);
2451 SERIALIZE_SCALAR(regs.rxdp_hi);
2452 SERIALIZE_SCALAR(regs.rxcfg);
2453 SERIALIZE_SCALAR(regs.pqcr);
2454 SERIALIZE_SCALAR(regs.wcsr);
2455 SERIALIZE_SCALAR(regs.pcr);
2456 SERIALIZE_SCALAR(regs.rfcr);
2457 SERIALIZE_SCALAR(regs.rfdr);
2458 SERIALIZE_SCALAR(regs.brar);
2459 SERIALIZE_SCALAR(regs.brdr);
2460 SERIALIZE_SCALAR(regs.srr);
2461 SERIALIZE_SCALAR(regs.mibc);
2462 SERIALIZE_SCALAR(regs.vrcr);
2463 SERIALIZE_SCALAR(regs.vtcr);
2464 SERIALIZE_SCALAR(regs.vdr);
2465 SERIALIZE_SCALAR(regs.ccsr);
2466 SERIALIZE_SCALAR(regs.tbicr);
2467 SERIALIZE_SCALAR(regs.tbisr);
2468 SERIALIZE_SCALAR(regs.tanar);
2469 SERIALIZE_SCALAR(regs.tanlpar);
2470 SERIALIZE_SCALAR(regs.taner);
2471 SERIALIZE_SCALAR(regs.tesr);
2472
2473 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2474 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2475
2476 SERIALIZE_SCALAR(ioEnable);
2477
2478 /*
2479 * Serialize the data Fifos
2480 */
2481 rxFifo.serialize("rxFifo", os);
2482 txFifo.serialize("txFifo", os);
2483
2484 /*
2485 * Serialize the various helper variables
2486 */
2487 bool txPacketExists = txPacket;
2488 SERIALIZE_SCALAR(txPacketExists);
2489 if (txPacketExists) {
2490 txPacket->length = txPacketBufPtr - txPacket->data;
2491 txPacket->serialize("txPacket", os);
2492 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2493 SERIALIZE_SCALAR(txPktBufPtr);
2494 }
2495
2496 bool rxPacketExists = rxPacket;
2497 SERIALIZE_SCALAR(rxPacketExists);
2498 if (rxPacketExists) {
2499 rxPacket->serialize("rxPacket", os);
2500 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2501 SERIALIZE_SCALAR(rxPktBufPtr);
2502 }
2503
2504 SERIALIZE_SCALAR(txXferLen);
2505 SERIALIZE_SCALAR(rxXferLen);
2506
2507 /*
2508 * Serialize Cached Descriptors
2509 */
2510 SERIALIZE_SCALAR(rxDesc64.link);
2511 SERIALIZE_SCALAR(rxDesc64.bufptr);
2512 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2513 SERIALIZE_SCALAR(rxDesc64.extsts);
2514 SERIALIZE_SCALAR(txDesc64.link);
2515 SERIALIZE_SCALAR(txDesc64.bufptr);
2516 SERIALIZE_SCALAR(txDesc64.cmdsts);
2517 SERIALIZE_SCALAR(txDesc64.extsts);
2518 SERIALIZE_SCALAR(rxDesc32.link);
2519 SERIALIZE_SCALAR(rxDesc32.bufptr);
2520 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2521 SERIALIZE_SCALAR(rxDesc32.extsts);
2522 SERIALIZE_SCALAR(txDesc32.link);
2523 SERIALIZE_SCALAR(txDesc32.bufptr);
2524 SERIALIZE_SCALAR(txDesc32.cmdsts);
2525 SERIALIZE_SCALAR(txDesc32.extsts);
2526 SERIALIZE_SCALAR(extstsEnable);
2527
2528 /*
2529 * Serialize tx state machine
2530 */
2531 int txState = this->txState;
2532 SERIALIZE_SCALAR(txState);
2533 SERIALIZE_SCALAR(txEnable);
2534 SERIALIZE_SCALAR(CTDD);
2535 SERIALIZE_SCALAR(txFragPtr);
2536 SERIALIZE_SCALAR(txDescCnt);
2537 int txDmaState = this->txDmaState;
2538 SERIALIZE_SCALAR(txDmaState);
2539 SERIALIZE_SCALAR(txKickTick);
2540
2541 /*
2542 * Serialize rx state machine
2543 */
2544 int rxState = this->rxState;
2545 SERIALIZE_SCALAR(rxState);
2546 SERIALIZE_SCALAR(rxEnable);
2547 SERIALIZE_SCALAR(CRDD);
2548 SERIALIZE_SCALAR(rxPktBytes);
2549 SERIALIZE_SCALAR(rxFragPtr);
2550 SERIALIZE_SCALAR(rxDescCnt);
2551 int rxDmaState = this->rxDmaState;
2552 SERIALIZE_SCALAR(rxDmaState);
2553 SERIALIZE_SCALAR(rxKickTick);
2554
2555 /*
2556 * Serialize EEPROM state machine
2557 */
2558 int eepromState = this->eepromState;
2559 SERIALIZE_SCALAR(eepromState);
2560 SERIALIZE_SCALAR(eepromClk);
2561 SERIALIZE_SCALAR(eepromBitsToRx);
2562 SERIALIZE_SCALAR(eepromOpcode);
2563 SERIALIZE_SCALAR(eepromAddress);
2564 SERIALIZE_SCALAR(eepromData);
2565
2566 /*
2567 * If there's a pending transmit, store the time so we can
2568 * reschedule it later
2569 */
2570 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2571 SERIALIZE_SCALAR(transmitTick);
2572
2573 /*
2574 * receive address filter settings
2575 */
2576 SERIALIZE_SCALAR(rxFilterEnable);
2577 SERIALIZE_SCALAR(acceptBroadcast);
2578 SERIALIZE_SCALAR(acceptMulticast);
2579 SERIALIZE_SCALAR(acceptUnicast);
2580 SERIALIZE_SCALAR(acceptPerfect);
2581 SERIALIZE_SCALAR(acceptArp);
2582 SERIALIZE_SCALAR(multicastHashEnable);
2583
2584 /*
2585 * Keep track of pending interrupt status.
2586 */
2587 SERIALIZE_SCALAR(intrTick);
2588 SERIALIZE_SCALAR(cpuPendingIntr);
2589 Tick intrEventTick = 0;
2590 if (intrEvent)
2591 intrEventTick = intrEvent->when();
2592 SERIALIZE_SCALAR(intrEventTick);
2593
2594 }
2595
2596 void
2597 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2598 {
2599 // Unserialize the PciDev base class
2600 PciDev::unserialize(cp, section);
2601
2602 UNSERIALIZE_SCALAR(regs.command);
2603 UNSERIALIZE_SCALAR(regs.config);
2604 UNSERIALIZE_SCALAR(regs.mear);
2605 UNSERIALIZE_SCALAR(regs.ptscr);
2606 UNSERIALIZE_SCALAR(regs.isr);
2607 UNSERIALIZE_SCALAR(regs.imr);
2608 UNSERIALIZE_SCALAR(regs.ier);
2609 UNSERIALIZE_SCALAR(regs.ihr);
2610 UNSERIALIZE_SCALAR(regs.txdp);
2611 UNSERIALIZE_SCALAR(regs.txdp_hi);
2612 UNSERIALIZE_SCALAR(regs.txcfg);
2613 UNSERIALIZE_SCALAR(regs.gpior);
2614 UNSERIALIZE_SCALAR(regs.rxdp);
2615 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2616 UNSERIALIZE_SCALAR(regs.rxcfg);
2617 UNSERIALIZE_SCALAR(regs.pqcr);
2618 UNSERIALIZE_SCALAR(regs.wcsr);
2619 UNSERIALIZE_SCALAR(regs.pcr);
2620 UNSERIALIZE_SCALAR(regs.rfcr);
2621 UNSERIALIZE_SCALAR(regs.rfdr);
2622 UNSERIALIZE_SCALAR(regs.brar);
2623 UNSERIALIZE_SCALAR(regs.brdr);
2624 UNSERIALIZE_SCALAR(regs.srr);
2625 UNSERIALIZE_SCALAR(regs.mibc);
2626 UNSERIALIZE_SCALAR(regs.vrcr);
2627 UNSERIALIZE_SCALAR(regs.vtcr);
2628 UNSERIALIZE_SCALAR(regs.vdr);
2629 UNSERIALIZE_SCALAR(regs.ccsr);
2630 UNSERIALIZE_SCALAR(regs.tbicr);
2631 UNSERIALIZE_SCALAR(regs.tbisr);
2632 UNSERIALIZE_SCALAR(regs.tanar);
2633 UNSERIALIZE_SCALAR(regs.tanlpar);
2634 UNSERIALIZE_SCALAR(regs.taner);
2635 UNSERIALIZE_SCALAR(regs.tesr);
2636
2637 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2638 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2639
2640 UNSERIALIZE_SCALAR(ioEnable);
2641
2642 /*
2643 * unserialize the data fifos
2644 */
2645 rxFifo.unserialize("rxFifo", cp, section);
2646 txFifo.unserialize("txFifo", cp, section);
2647
2648 /*
2649 * unserialize the various helper variables
2650 */
2651 bool txPacketExists;
2652 UNSERIALIZE_SCALAR(txPacketExists);
2653 if (txPacketExists) {
2654 txPacket = new EthPacketData(16384);
2655 txPacket->unserialize("txPacket", cp, section);
2656 uint32_t txPktBufPtr;
2657 UNSERIALIZE_SCALAR(txPktBufPtr);
2658 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2659 } else
2660 txPacket = 0;
2661
2662 bool rxPacketExists;
2663 UNSERIALIZE_SCALAR(rxPacketExists);
2664 rxPacket = 0;
2665 if (rxPacketExists) {
2666 rxPacket = new EthPacketData(16384);
2667 rxPacket->unserialize("rxPacket", cp, section);
2668 uint32_t rxPktBufPtr;
2669 UNSERIALIZE_SCALAR(rxPktBufPtr);
2670 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2671 } else
2672 rxPacket = 0;
2673
2674 UNSERIALIZE_SCALAR(txXferLen);
2675 UNSERIALIZE_SCALAR(rxXferLen);
2676
2677 /*
2678 * Unserialize Cached Descriptors
2679 */
2680 UNSERIALIZE_SCALAR(rxDesc64.link);
2681 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2682 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2683 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2684 UNSERIALIZE_SCALAR(txDesc64.link);
2685 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2686 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2687 UNSERIALIZE_SCALAR(txDesc64.extsts);
2688 UNSERIALIZE_SCALAR(rxDesc32.link);
2689 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2690 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2691 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2692 UNSERIALIZE_SCALAR(txDesc32.link);
2693 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2694 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2695 UNSERIALIZE_SCALAR(txDesc32.extsts);
2696 UNSERIALIZE_SCALAR(extstsEnable);
2697
2698 /*
2699 * unserialize tx state machine
2700 */
2701 int txState;
2702 UNSERIALIZE_SCALAR(txState);
2703 this->txState = (TxState) txState;
2704 UNSERIALIZE_SCALAR(txEnable);
2705 UNSERIALIZE_SCALAR(CTDD);
2706 UNSERIALIZE_SCALAR(txFragPtr);
2707 UNSERIALIZE_SCALAR(txDescCnt);
2708 int txDmaState;
2709 UNSERIALIZE_SCALAR(txDmaState);
2710 this->txDmaState = (DmaState) txDmaState;
2711 UNSERIALIZE_SCALAR(txKickTick);
2712 if (txKickTick)
2713 txKickEvent.schedule(txKickTick);
2714
2715 /*
2716 * unserialize rx state machine
2717 */
2718 int rxState;
2719 UNSERIALIZE_SCALAR(rxState);
2720 this->rxState = (RxState) rxState;
2721 UNSERIALIZE_SCALAR(rxEnable);
2722 UNSERIALIZE_SCALAR(CRDD);
2723 UNSERIALIZE_SCALAR(rxPktBytes);
2724 UNSERIALIZE_SCALAR(rxFragPtr);
2725 UNSERIALIZE_SCALAR(rxDescCnt);
2726 int rxDmaState;
2727 UNSERIALIZE_SCALAR(rxDmaState);
2728 this->rxDmaState = (DmaState) rxDmaState;
2729 UNSERIALIZE_SCALAR(rxKickTick);
2730 if (rxKickTick)
2731 rxKickEvent.schedule(rxKickTick);
2732
2733 /*
2734 * Unserialize EEPROM state machine
2735 */
2736 int eepromState;
2737 UNSERIALIZE_SCALAR(eepromState);
2738 this->eepromState = (EEPROMState) eepromState;
2739 UNSERIALIZE_SCALAR(eepromClk);
2740 UNSERIALIZE_SCALAR(eepromBitsToRx);
2741 UNSERIALIZE_SCALAR(eepromOpcode);
2742 UNSERIALIZE_SCALAR(eepromAddress);
2743 UNSERIALIZE_SCALAR(eepromData);
2744
2745 /*
2746 * If there's a pending transmit, reschedule it now
2747 */
2748 Tick transmitTick;
2749 UNSERIALIZE_SCALAR(transmitTick);
2750 if (transmitTick)
2751 txEvent.schedule(curTick + transmitTick);
2752
2753 /*
2754 * unserialize receive address filter settings
2755 */
2756 UNSERIALIZE_SCALAR(rxFilterEnable);
2757 UNSERIALIZE_SCALAR(acceptBroadcast);
2758 UNSERIALIZE_SCALAR(acceptMulticast);
2759 UNSERIALIZE_SCALAR(acceptUnicast);
2760 UNSERIALIZE_SCALAR(acceptPerfect);
2761 UNSERIALIZE_SCALAR(acceptArp);
2762 UNSERIALIZE_SCALAR(multicastHashEnable);
2763
2764 /*
2765 * Keep track of pending interrupt status.
2766 */
2767 UNSERIALIZE_SCALAR(intrTick);
2768 UNSERIALIZE_SCALAR(cpuPendingIntr);
2769 Tick intrEventTick;
2770 UNSERIALIZE_SCALAR(intrEventTick);
2771 if (intrEventTick) {
2772 intrEvent = new IntrEvent(this, intrEventTick, true);
2773 }
2774 }
2775
2776 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2777
2778 SimObjectParam<EtherInt *> peer;
2779 SimObjectParam<NSGigE *> device;
2780
2781 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2782
2783 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2784
2785 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2786 INIT_PARAM(device, "Ethernet device of this interface")
2787
2788 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2789
2790 CREATE_SIM_OBJECT(NSGigEInt)
2791 {
2792 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2793
2794 EtherInt *p = (EtherInt *)peer;
2795 if (p) {
2796 dev_int->setPeer(p);
2797 p->setPeer(dev_int);
2798 }
2799
2800 return dev_int;
2801 }
2802
2803 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2804
2805
2806 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2807
2808 SimObjectParam<System *> system;
2809 SimObjectParam<Platform *> platform;
2810 Param<Tick> min_backoff_delay;
2811 Param<Tick> max_backoff_delay;
2812 SimObjectParam<PciConfigData *> configdata;
2813 Param<uint32_t> pci_bus;
2814 Param<uint32_t> pci_dev;
2815 Param<uint32_t> pci_func;
2816 Param<Tick> pio_latency;
2817 Param<Tick> config_latency;
2818
2819 Param<Tick> clock;
2820 Param<bool> dma_desc_free;
2821 Param<bool> dma_data_free;
2822 Param<Tick> dma_read_delay;
2823 Param<Tick> dma_write_delay;
2824 Param<Tick> dma_read_factor;
2825 Param<Tick> dma_write_factor;
2826 Param<bool> dma_no_allocate;
2827 Param<Tick> intr_delay;
2828
2829 Param<Tick> rx_delay;
2830 Param<Tick> tx_delay;
2831 Param<uint32_t> rx_fifo_size;
2832 Param<uint32_t> tx_fifo_size;
2833
2834 Param<bool> rx_filter;
2835 Param<string> hardware_address;
2836 Param<bool> rx_thread;
2837 Param<bool> tx_thread;
2838 Param<bool> rss;
2839
2840 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2841
2842 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2843
2844 INIT_PARAM(system, "System pointer"),
2845 INIT_PARAM(platform, "Platform pointer"),
2846 INIT_PARAM(min_backoff_delay, "Minimum delay after receving a nack packed"),
2847 INIT_PARAM(max_backoff_delay, "Maximum delay after receving a nack packed"),
2848 INIT_PARAM(configdata, "PCI Config data"),
2849 INIT_PARAM(pci_bus, "PCI bus ID"),
2850 INIT_PARAM(pci_dev, "PCI device number"),
2851 INIT_PARAM(pci_func, "PCI function code"),
2852 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2853 INIT_PARAM(config_latency, "Number of cycles for a config read or write"),
2854 INIT_PARAM(clock, "State machine cycle time"),
2855
2856 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
2857 INIT_PARAM(dma_data_free, "DMA of Data is free"),
2858 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
2859 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
2860 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
2861 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
2862 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
2863 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2864
2865 INIT_PARAM(rx_delay, "Receive Delay"),
2866 INIT_PARAM(tx_delay, "Transmit Delay"),
2867 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
2868 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
2869
2870 INIT_PARAM(rx_filter, "Enable Receive Filter"),
2871 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
2872 INIT_PARAM(rx_thread, ""),
2873 INIT_PARAM(tx_thread, ""),
2874 INIT_PARAM(rss, "")
2875
2876 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2877
2878
2879 CREATE_SIM_OBJECT(NSGigE)
2880 {
2881 NSGigE::Params *params = new NSGigE::Params;
2882
2883 params->name = getInstanceName();
2884 params->platform = platform;
2885 params->system = system;
2886 params->min_backoff_delay = min_backoff_delay;
2887 params->max_backoff_delay = max_backoff_delay;
2888 params->configData = configdata;
2889 params->busNum = pci_bus;
2890 params->deviceNum = pci_dev;
2891 params->functionNum = pci_func;
2892 params->pio_delay = pio_latency;
2893 params->config_delay = config_latency;
2894
2895 params->clock = clock;
2896 params->dma_desc_free = dma_desc_free;
2897 params->dma_data_free = dma_data_free;
2898 params->dma_read_delay = dma_read_delay;
2899 params->dma_write_delay = dma_write_delay;
2900 params->dma_read_factor = dma_read_factor;
2901 params->dma_write_factor = dma_write_factor;
2902 params->dma_no_allocate = dma_no_allocate;
2903 params->pio_delay = pio_latency;
2904 params->intr_delay = intr_delay;
2905
2906 params->rx_delay = rx_delay;
2907 params->tx_delay = tx_delay;
2908 params->rx_fifo_size = rx_fifo_size;
2909 params->tx_fifo_size = tx_fifo_size;
2910
2911 params->rx_filter = rx_filter;
2912 params->eaddr = hardware_address;
2913 params->rx_thread = rx_thread;
2914 params->tx_thread = tx_thread;
2915 params->rss = rss;
2916
2917 return new NSGigE(params);
2918 }
2919
2920 REGISTER_SIM_OBJECT("NSGigE", NSGigE)