Merge zizzer.eecs.umich.edu:/bk/newmem/
[gem5.git] / src / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Nathan Binkert
29 * Lisa Hsu
30 */
31
32 /** @file
33 * Device module for modelling the National Semiconductor
34 * DP83820 ethernet controller. Does not support priority queueing
35 */
36 #include <deque>
37 #include <string>
38
39 #include "base/inet.hh"
40 #include "cpu/thread_context.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/packet.hh"
45 #include "mem/packet_access.hh"
46 #include "sim/builder.hh"
47 #include "sim/debug.hh"
48 #include "sim/host.hh"
49 #include "sim/stats.hh"
50 #include "sim/system.hh"
51
52 const char *NsRxStateStrings[] =
53 {
54 "rxIdle",
55 "rxDescRefr",
56 "rxDescRead",
57 "rxFifoBlock",
58 "rxFragWrite",
59 "rxDescWrite",
60 "rxAdvance"
61 };
62
63 const char *NsTxStateStrings[] =
64 {
65 "txIdle",
66 "txDescRefr",
67 "txDescRead",
68 "txFifoBlock",
69 "txFragRead",
70 "txDescWrite",
71 "txAdvance"
72 };
73
74 const char *NsDmaState[] =
75 {
76 "dmaIdle",
77 "dmaReading",
78 "dmaWriting",
79 "dmaReadWaiting",
80 "dmaWriteWaiting"
81 };
82
83 using namespace std;
84 using namespace Net;
85 using namespace TheISA;
86
87 ///////////////////////////////////////////////////////////////////////
88 //
89 // NSGigE PCI Device
90 //
91 NSGigE::NSGigE(Params *p)
92 : PciDev(p), ioEnable(false),
93 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
94 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
95 txXferLen(0), rxXferLen(0), clock(p->clock),
96 txState(txIdle), txEnable(false), CTDD(false),
97 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
98 rxEnable(false), CRDD(false), rxPktBytes(0),
99 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
100 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
101 txDmaReadEvent(this), txDmaWriteEvent(this),
102 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
103 txDelay(p->tx_delay), rxDelay(p->rx_delay),
104 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
105 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
106 acceptMulticast(false), acceptUnicast(false),
107 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
108 intrTick(0), cpuPendingIntr(false),
109 intrEvent(0), interface(0)
110 {
111
112 intrDelay = p->intr_delay;
113 dmaReadDelay = p->dma_read_delay;
114 dmaWriteDelay = p->dma_write_delay;
115 dmaReadFactor = p->dma_read_factor;
116 dmaWriteFactor = p->dma_write_factor;
117
118 regsReset();
119 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
120
121 memset(&rxDesc32, 0, sizeof(rxDesc32));
122 memset(&txDesc32, 0, sizeof(txDesc32));
123 memset(&rxDesc64, 0, sizeof(rxDesc64));
124 memset(&txDesc64, 0, sizeof(txDesc64));
125 }
126
127 NSGigE::~NSGigE()
128 {}
129
130 void
131 NSGigE::regStats()
132 {
133 txBytes
134 .name(name() + ".txBytes")
135 .desc("Bytes Transmitted")
136 .prereq(txBytes)
137 ;
138
139 rxBytes
140 .name(name() + ".rxBytes")
141 .desc("Bytes Received")
142 .prereq(rxBytes)
143 ;
144
145 txPackets
146 .name(name() + ".txPackets")
147 .desc("Number of Packets Transmitted")
148 .prereq(txBytes)
149 ;
150
151 rxPackets
152 .name(name() + ".rxPackets")
153 .desc("Number of Packets Received")
154 .prereq(rxBytes)
155 ;
156
157 txIpChecksums
158 .name(name() + ".txIpChecksums")
159 .desc("Number of tx IP Checksums done by device")
160 .precision(0)
161 .prereq(txBytes)
162 ;
163
164 rxIpChecksums
165 .name(name() + ".rxIpChecksums")
166 .desc("Number of rx IP Checksums done by device")
167 .precision(0)
168 .prereq(rxBytes)
169 ;
170
171 txTcpChecksums
172 .name(name() + ".txTcpChecksums")
173 .desc("Number of tx TCP Checksums done by device")
174 .precision(0)
175 .prereq(txBytes)
176 ;
177
178 rxTcpChecksums
179 .name(name() + ".rxTcpChecksums")
180 .desc("Number of rx TCP Checksums done by device")
181 .precision(0)
182 .prereq(rxBytes)
183 ;
184
185 txUdpChecksums
186 .name(name() + ".txUdpChecksums")
187 .desc("Number of tx UDP Checksums done by device")
188 .precision(0)
189 .prereq(txBytes)
190 ;
191
192 rxUdpChecksums
193 .name(name() + ".rxUdpChecksums")
194 .desc("Number of rx UDP Checksums done by device")
195 .precision(0)
196 .prereq(rxBytes)
197 ;
198
199 descDmaReads
200 .name(name() + ".descDMAReads")
201 .desc("Number of descriptors the device read w/ DMA")
202 .precision(0)
203 ;
204
205 descDmaWrites
206 .name(name() + ".descDMAWrites")
207 .desc("Number of descriptors the device wrote w/ DMA")
208 .precision(0)
209 ;
210
211 descDmaRdBytes
212 .name(name() + ".descDmaReadBytes")
213 .desc("number of descriptor bytes read w/ DMA")
214 .precision(0)
215 ;
216
217 descDmaWrBytes
218 .name(name() + ".descDmaWriteBytes")
219 .desc("number of descriptor bytes write w/ DMA")
220 .precision(0)
221 ;
222
223 txBandwidth
224 .name(name() + ".txBandwidth")
225 .desc("Transmit Bandwidth (bits/s)")
226 .precision(0)
227 .prereq(txBytes)
228 ;
229
230 rxBandwidth
231 .name(name() + ".rxBandwidth")
232 .desc("Receive Bandwidth (bits/s)")
233 .precision(0)
234 .prereq(rxBytes)
235 ;
236
237 totBandwidth
238 .name(name() + ".totBandwidth")
239 .desc("Total Bandwidth (bits/s)")
240 .precision(0)
241 .prereq(totBytes)
242 ;
243
244 totPackets
245 .name(name() + ".totPackets")
246 .desc("Total Packets")
247 .precision(0)
248 .prereq(totBytes)
249 ;
250
251 totBytes
252 .name(name() + ".totBytes")
253 .desc("Total Bytes")
254 .precision(0)
255 .prereq(totBytes)
256 ;
257
258 totPacketRate
259 .name(name() + ".totPPS")
260 .desc("Total Tranmission Rate (packets/s)")
261 .precision(0)
262 .prereq(totBytes)
263 ;
264
265 txPacketRate
266 .name(name() + ".txPPS")
267 .desc("Packet Tranmission Rate (packets/s)")
268 .precision(0)
269 .prereq(txBytes)
270 ;
271
272 rxPacketRate
273 .name(name() + ".rxPPS")
274 .desc("Packet Reception Rate (packets/s)")
275 .precision(0)
276 .prereq(rxBytes)
277 ;
278
279 postedSwi
280 .name(name() + ".postedSwi")
281 .desc("number of software interrupts posted to CPU")
282 .precision(0)
283 ;
284
285 totalSwi
286 .name(name() + ".totalSwi")
287 .desc("total number of Swi written to ISR")
288 .precision(0)
289 ;
290
291 coalescedSwi
292 .name(name() + ".coalescedSwi")
293 .desc("average number of Swi's coalesced into each post")
294 .precision(0)
295 ;
296
297 postedRxIdle
298 .name(name() + ".postedRxIdle")
299 .desc("number of rxIdle interrupts posted to CPU")
300 .precision(0)
301 ;
302
303 totalRxIdle
304 .name(name() + ".totalRxIdle")
305 .desc("total number of RxIdle written to ISR")
306 .precision(0)
307 ;
308
309 coalescedRxIdle
310 .name(name() + ".coalescedRxIdle")
311 .desc("average number of RxIdle's coalesced into each post")
312 .precision(0)
313 ;
314
315 postedRxOk
316 .name(name() + ".postedRxOk")
317 .desc("number of RxOk interrupts posted to CPU")
318 .precision(0)
319 ;
320
321 totalRxOk
322 .name(name() + ".totalRxOk")
323 .desc("total number of RxOk written to ISR")
324 .precision(0)
325 ;
326
327 coalescedRxOk
328 .name(name() + ".coalescedRxOk")
329 .desc("average number of RxOk's coalesced into each post")
330 .precision(0)
331 ;
332
333 postedRxDesc
334 .name(name() + ".postedRxDesc")
335 .desc("number of RxDesc interrupts posted to CPU")
336 .precision(0)
337 ;
338
339 totalRxDesc
340 .name(name() + ".totalRxDesc")
341 .desc("total number of RxDesc written to ISR")
342 .precision(0)
343 ;
344
345 coalescedRxDesc
346 .name(name() + ".coalescedRxDesc")
347 .desc("average number of RxDesc's coalesced into each post")
348 .precision(0)
349 ;
350
351 postedTxOk
352 .name(name() + ".postedTxOk")
353 .desc("number of TxOk interrupts posted to CPU")
354 .precision(0)
355 ;
356
357 totalTxOk
358 .name(name() + ".totalTxOk")
359 .desc("total number of TxOk written to ISR")
360 .precision(0)
361 ;
362
363 coalescedTxOk
364 .name(name() + ".coalescedTxOk")
365 .desc("average number of TxOk's coalesced into each post")
366 .precision(0)
367 ;
368
369 postedTxIdle
370 .name(name() + ".postedTxIdle")
371 .desc("number of TxIdle interrupts posted to CPU")
372 .precision(0)
373 ;
374
375 totalTxIdle
376 .name(name() + ".totalTxIdle")
377 .desc("total number of TxIdle written to ISR")
378 .precision(0)
379 ;
380
381 coalescedTxIdle
382 .name(name() + ".coalescedTxIdle")
383 .desc("average number of TxIdle's coalesced into each post")
384 .precision(0)
385 ;
386
387 postedTxDesc
388 .name(name() + ".postedTxDesc")
389 .desc("number of TxDesc interrupts posted to CPU")
390 .precision(0)
391 ;
392
393 totalTxDesc
394 .name(name() + ".totalTxDesc")
395 .desc("total number of TxDesc written to ISR")
396 .precision(0)
397 ;
398
399 coalescedTxDesc
400 .name(name() + ".coalescedTxDesc")
401 .desc("average number of TxDesc's coalesced into each post")
402 .precision(0)
403 ;
404
405 postedRxOrn
406 .name(name() + ".postedRxOrn")
407 .desc("number of RxOrn posted to CPU")
408 .precision(0)
409 ;
410
411 totalRxOrn
412 .name(name() + ".totalRxOrn")
413 .desc("total number of RxOrn written to ISR")
414 .precision(0)
415 ;
416
417 coalescedRxOrn
418 .name(name() + ".coalescedRxOrn")
419 .desc("average number of RxOrn's coalesced into each post")
420 .precision(0)
421 ;
422
423 coalescedTotal
424 .name(name() + ".coalescedTotal")
425 .desc("average number of interrupts coalesced into each post")
426 .precision(0)
427 ;
428
429 postedInterrupts
430 .name(name() + ".postedInterrupts")
431 .desc("number of posts to CPU")
432 .precision(0)
433 ;
434
435 droppedPackets
436 .name(name() + ".droppedPackets")
437 .desc("number of packets dropped")
438 .precision(0)
439 ;
440
441 coalescedSwi = totalSwi / postedInterrupts;
442 coalescedRxIdle = totalRxIdle / postedInterrupts;
443 coalescedRxOk = totalRxOk / postedInterrupts;
444 coalescedRxDesc = totalRxDesc / postedInterrupts;
445 coalescedTxOk = totalTxOk / postedInterrupts;
446 coalescedTxIdle = totalTxIdle / postedInterrupts;
447 coalescedTxDesc = totalTxDesc / postedInterrupts;
448 coalescedRxOrn = totalRxOrn / postedInterrupts;
449
450 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
451 totalTxOk + totalTxIdle + totalTxDesc +
452 totalRxOrn) / postedInterrupts;
453
454 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
455 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
456 totBandwidth = txBandwidth + rxBandwidth;
457 totBytes = txBytes + rxBytes;
458 totPackets = txPackets + rxPackets;
459
460 txPacketRate = txPackets / simSeconds;
461 rxPacketRate = rxPackets / simSeconds;
462 }
463
464
465 /**
466 * This is to write to the PCI general configuration registers
467 */
468 Tick
469 NSGigE::writeConfig(PacketPtr pkt)
470 {
471 int offset = pkt->getAddr() & PCI_CONFIG_SIZE;
472 if (offset < PCI_DEVICE_SPECIFIC)
473 PciDev::writeConfig(pkt);
474 else
475 panic("Device specific PCI config space not implemented!\n");
476
477 switch (offset) {
478 // seems to work fine without all these PCI settings, but i
479 // put in the IO to double check, an assertion will fail if we
480 // need to properly implement it
481 case PCI_COMMAND:
482 if (config.data[offset] & PCI_CMD_IOSE)
483 ioEnable = true;
484 else
485 ioEnable = false;
486 break;
487 }
488 pkt->result = Packet::Success;
489 return configDelay;
490 }
491
492 /**
493 * This reads the device registers, which are detailed in the NS83820
494 * spec sheet
495 */
496 Tick
497 NSGigE::read(PacketPtr pkt)
498 {
499 assert(ioEnable);
500
501 pkt->allocate();
502
503 //The mask is to give you only the offset into the device register file
504 Addr daddr = pkt->getAddr() & 0xfff;
505 DPRINTF(EthernetPIO, "read da=%#x pa=%#x size=%d\n",
506 daddr, pkt->getAddr(), pkt->getSize());
507
508
509 // there are some reserved registers, you can see ns_gige_reg.h and
510 // the spec sheet for details
511 if (daddr > LAST && daddr <= RESERVED) {
512 panic("Accessing reserved register");
513 } else if (daddr > RESERVED && daddr <= 0x3FC) {
514 return readConfig(pkt);
515 } else if (daddr >= MIB_START && daddr <= MIB_END) {
516 // don't implement all the MIB's. hopefully the kernel
517 // doesn't actually DEPEND upon their values
518 // MIB are just hardware stats keepers
519 pkt->set<uint32_t>(0);
520 pkt->result = Packet::Success;
521 return pioDelay;
522 } else if (daddr > 0x3FC)
523 panic("Something is messed up!\n");
524
525 assert(pkt->getSize() == sizeof(uint32_t));
526 uint32_t &reg = *pkt->getPtr<uint32_t>();
527 uint16_t rfaddr;
528
529 switch (daddr) {
530 case CR:
531 reg = regs.command;
532 //these are supposed to be cleared on a read
533 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
534 break;
535
536 case CFGR:
537 reg = regs.config;
538 break;
539
540 case MEAR:
541 reg = regs.mear;
542 break;
543
544 case PTSCR:
545 reg = regs.ptscr;
546 break;
547
548 case ISR:
549 reg = regs.isr;
550 devIntrClear(ISR_ALL);
551 break;
552
553 case IMR:
554 reg = regs.imr;
555 break;
556
557 case IER:
558 reg = regs.ier;
559 break;
560
561 case IHR:
562 reg = regs.ihr;
563 break;
564
565 case TXDP:
566 reg = regs.txdp;
567 break;
568
569 case TXDP_HI:
570 reg = regs.txdp_hi;
571 break;
572
573 case TX_CFG:
574 reg = regs.txcfg;
575 break;
576
577 case GPIOR:
578 reg = regs.gpior;
579 break;
580
581 case RXDP:
582 reg = regs.rxdp;
583 break;
584
585 case RXDP_HI:
586 reg = regs.rxdp_hi;
587 break;
588
589 case RX_CFG:
590 reg = regs.rxcfg;
591 break;
592
593 case PQCR:
594 reg = regs.pqcr;
595 break;
596
597 case WCSR:
598 reg = regs.wcsr;
599 break;
600
601 case PCR:
602 reg = regs.pcr;
603 break;
604
605 // see the spec sheet for how RFCR and RFDR work
606 // basically, you write to RFCR to tell the machine
607 // what you want to do next, then you act upon RFDR,
608 // and the device will be prepared b/c of what you
609 // wrote to RFCR
610 case RFCR:
611 reg = regs.rfcr;
612 break;
613
614 case RFDR:
615 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
616 switch (rfaddr) {
617 // Read from perfect match ROM octets
618 case 0x000:
619 reg = rom.perfectMatch[1];
620 reg = reg << 8;
621 reg += rom.perfectMatch[0];
622 break;
623 case 0x002:
624 reg = rom.perfectMatch[3] << 8;
625 reg += rom.perfectMatch[2];
626 break;
627 case 0x004:
628 reg = rom.perfectMatch[5] << 8;
629 reg += rom.perfectMatch[4];
630 break;
631 default:
632 // Read filter hash table
633 if (rfaddr >= FHASH_ADDR &&
634 rfaddr < FHASH_ADDR + FHASH_SIZE) {
635
636 // Only word-aligned reads supported
637 if (rfaddr % 2)
638 panic("unaligned read from filter hash table!");
639
640 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
641 reg += rom.filterHash[rfaddr - FHASH_ADDR];
642 break;
643 }
644
645 panic("reading RFDR for something other than pattern"
646 " matching or hashing! %#x\n", rfaddr);
647 }
648 break;
649
650 case SRR:
651 reg = regs.srr;
652 break;
653
654 case MIBC:
655 reg = regs.mibc;
656 reg &= ~(MIBC_MIBS | MIBC_ACLR);
657 break;
658
659 case VRCR:
660 reg = regs.vrcr;
661 break;
662
663 case VTCR:
664 reg = regs.vtcr;
665 break;
666
667 case VDR:
668 reg = regs.vdr;
669 break;
670
671 case CCSR:
672 reg = regs.ccsr;
673 break;
674
675 case TBICR:
676 reg = regs.tbicr;
677 break;
678
679 case TBISR:
680 reg = regs.tbisr;
681 break;
682
683 case TANAR:
684 reg = regs.tanar;
685 break;
686
687 case TANLPAR:
688 reg = regs.tanlpar;
689 break;
690
691 case TANER:
692 reg = regs.taner;
693 break;
694
695 case TESR:
696 reg = regs.tesr;
697 break;
698
699 case M5REG:
700 reg = 0;
701 if (params()->rx_thread)
702 reg |= M5REG_RX_THREAD;
703 if (params()->tx_thread)
704 reg |= M5REG_TX_THREAD;
705 if (params()->rss)
706 reg |= M5REG_RSS;
707 break;
708
709 default:
710 panic("reading unimplemented register: addr=%#x", daddr);
711 }
712
713 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
714 daddr, reg, reg);
715
716 pkt->result = Packet::Success;
717 return pioDelay;
718 }
719
720 Tick
721 NSGigE::write(PacketPtr pkt)
722 {
723 assert(ioEnable);
724
725 Addr daddr = pkt->getAddr() & 0xfff;
726 DPRINTF(EthernetPIO, "write da=%#x pa=%#x size=%d\n",
727 daddr, pkt->getAddr(), pkt->getSize());
728
729 if (daddr > LAST && daddr <= RESERVED) {
730 panic("Accessing reserved register");
731 } else if (daddr > RESERVED && daddr <= 0x3FC) {
732 return writeConfig(pkt);
733 } else if (daddr > 0x3FC)
734 panic("Something is messed up!\n");
735
736 if (pkt->getSize() == sizeof(uint32_t)) {
737 uint32_t reg = pkt->get<uint32_t>();
738 uint16_t rfaddr;
739
740 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
741
742 switch (daddr) {
743 case CR:
744 regs.command = reg;
745 if (reg & CR_TXD) {
746 txEnable = false;
747 } else if (reg & CR_TXE) {
748 txEnable = true;
749
750 // the kernel is enabling the transmit machine
751 if (txState == txIdle)
752 txKick();
753 }
754
755 if (reg & CR_RXD) {
756 rxEnable = false;
757 } else if (reg & CR_RXE) {
758 rxEnable = true;
759
760 if (rxState == rxIdle)
761 rxKick();
762 }
763
764 if (reg & CR_TXR)
765 txReset();
766
767 if (reg & CR_RXR)
768 rxReset();
769
770 if (reg & CR_SWI)
771 devIntrPost(ISR_SWI);
772
773 if (reg & CR_RST) {
774 txReset();
775 rxReset();
776
777 regsReset();
778 }
779 break;
780
781 case CFGR:
782 if (reg & CFGR_LNKSTS ||
783 reg & CFGR_SPDSTS ||
784 reg & CFGR_DUPSTS ||
785 reg & CFGR_RESERVED ||
786 reg & CFGR_T64ADDR ||
787 reg & CFGR_PCI64_DET)
788
789 // First clear all writable bits
790 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
791 CFGR_RESERVED | CFGR_T64ADDR |
792 CFGR_PCI64_DET;
793 // Now set the appropriate writable bits
794 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
795 CFGR_RESERVED | CFGR_T64ADDR |
796 CFGR_PCI64_DET);
797
798 // all these #if 0's are because i don't THINK the kernel needs to
799 // have these implemented. if there is a problem relating to one of
800 // these, you may need to add functionality in.
801 if (reg & CFGR_TBI_EN) ;
802 if (reg & CFGR_MODE_1000) ;
803
804 if (reg & CFGR_AUTO_1000)
805 panic("CFGR_AUTO_1000 not implemented!\n");
806
807 if (reg & CFGR_PINT_DUPSTS ||
808 reg & CFGR_PINT_LNKSTS ||
809 reg & CFGR_PINT_SPDSTS)
810 ;
811
812 if (reg & CFGR_TMRTEST) ;
813 if (reg & CFGR_MRM_DIS) ;
814 if (reg & CFGR_MWI_DIS) ;
815
816 if (reg & CFGR_T64ADDR) ;
817 // panic("CFGR_T64ADDR is read only register!\n");
818
819 if (reg & CFGR_PCI64_DET)
820 panic("CFGR_PCI64_DET is read only register!\n");
821
822 if (reg & CFGR_DATA64_EN) ;
823 if (reg & CFGR_M64ADDR) ;
824 if (reg & CFGR_PHY_RST) ;
825 if (reg & CFGR_PHY_DIS) ;
826
827 if (reg & CFGR_EXTSTS_EN)
828 extstsEnable = true;
829 else
830 extstsEnable = false;
831
832 if (reg & CFGR_REQALG) ;
833 if (reg & CFGR_SB) ;
834 if (reg & CFGR_POW) ;
835 if (reg & CFGR_EXD) ;
836 if (reg & CFGR_PESEL) ;
837 if (reg & CFGR_BROM_DIS) ;
838 if (reg & CFGR_EXT_125) ;
839 if (reg & CFGR_BEM) ;
840 break;
841
842 case MEAR:
843 // Clear writable bits
844 regs.mear &= MEAR_EEDO;
845 // Set appropriate writable bits
846 regs.mear |= reg & ~MEAR_EEDO;
847
848 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
849 // even though it could get it through RFDR
850 if (reg & MEAR_EESEL) {
851 // Rising edge of clock
852 if (reg & MEAR_EECLK && !eepromClk)
853 eepromKick();
854 }
855 else {
856 eepromState = eepromStart;
857 regs.mear &= ~MEAR_EEDI;
858 }
859
860 eepromClk = reg & MEAR_EECLK;
861
862 // since phy is completely faked, MEAR_MD* don't matter
863 if (reg & MEAR_MDIO) ;
864 if (reg & MEAR_MDDIR) ;
865 if (reg & MEAR_MDC) ;
866 break;
867
868 case PTSCR:
869 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
870 // these control BISTs for various parts of chip - we
871 // don't care or do just fake that the BIST is done
872 if (reg & PTSCR_RBIST_EN)
873 regs.ptscr |= PTSCR_RBIST_DONE;
874 if (reg & PTSCR_EEBIST_EN)
875 regs.ptscr &= ~PTSCR_EEBIST_EN;
876 if (reg & PTSCR_EELOAD_EN)
877 regs.ptscr &= ~PTSCR_EELOAD_EN;
878 break;
879
880 case ISR: /* writing to the ISR has no effect */
881 panic("ISR is a read only register!\n");
882
883 case IMR:
884 regs.imr = reg;
885 devIntrChangeMask();
886 break;
887
888 case IER:
889 regs.ier = reg;
890 break;
891
892 case IHR:
893 regs.ihr = reg;
894 /* not going to implement real interrupt holdoff */
895 break;
896
897 case TXDP:
898 regs.txdp = (reg & 0xFFFFFFFC);
899 assert(txState == txIdle);
900 CTDD = false;
901 break;
902
903 case TXDP_HI:
904 regs.txdp_hi = reg;
905 break;
906
907 case TX_CFG:
908 regs.txcfg = reg;
909 #if 0
910 if (reg & TX_CFG_CSI) ;
911 if (reg & TX_CFG_HBI) ;
912 if (reg & TX_CFG_MLB) ;
913 if (reg & TX_CFG_ATP) ;
914 if (reg & TX_CFG_ECRETRY) {
915 /*
916 * this could easily be implemented, but considering
917 * the network is just a fake pipe, wouldn't make
918 * sense to do this
919 */
920 }
921
922 if (reg & TX_CFG_BRST_DIS) ;
923 #endif
924
925 #if 0
926 /* we handle our own DMA, ignore the kernel's exhortations */
927 if (reg & TX_CFG_MXDMA) ;
928 #endif
929
930 // also, we currently don't care about fill/drain
931 // thresholds though this may change in the future with
932 // more realistic networks or a driver which changes it
933 // according to feedback
934
935 break;
936
937 case GPIOR:
938 // Only write writable bits
939 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
940 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
941 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
942 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
943 /* these just control general purpose i/o pins, don't matter */
944 break;
945
946 case RXDP:
947 regs.rxdp = reg;
948 CRDD = false;
949 break;
950
951 case RXDP_HI:
952 regs.rxdp_hi = reg;
953 break;
954
955 case RX_CFG:
956 regs.rxcfg = reg;
957 #if 0
958 if (reg & RX_CFG_AEP) ;
959 if (reg & RX_CFG_ARP) ;
960 if (reg & RX_CFG_STRIPCRC) ;
961 if (reg & RX_CFG_RX_RD) ;
962 if (reg & RX_CFG_ALP) ;
963 if (reg & RX_CFG_AIRL) ;
964
965 /* we handle our own DMA, ignore what kernel says about it */
966 if (reg & RX_CFG_MXDMA) ;
967
968 //also, we currently don't care about fill/drain thresholds
969 //though this may change in the future with more realistic
970 //networks or a driver which changes it according to feedback
971 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
972 #endif
973 break;
974
975 case PQCR:
976 /* there is no priority queueing used in the linux 2.6 driver */
977 regs.pqcr = reg;
978 break;
979
980 case WCSR:
981 /* not going to implement wake on LAN */
982 regs.wcsr = reg;
983 break;
984
985 case PCR:
986 /* not going to implement pause control */
987 regs.pcr = reg;
988 break;
989
990 case RFCR:
991 regs.rfcr = reg;
992
993 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
994 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
995 acceptMulticast = (reg & RFCR_AAM) ? true : false;
996 acceptUnicast = (reg & RFCR_AAU) ? true : false;
997 acceptPerfect = (reg & RFCR_APM) ? true : false;
998 acceptArp = (reg & RFCR_AARP) ? true : false;
999 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1000
1001 #if 0
1002 if (reg & RFCR_APAT)
1003 panic("RFCR_APAT not implemented!\n");
1004 #endif
1005 if (reg & RFCR_UHEN)
1006 panic("Unicast hash filtering not used by drivers!\n");
1007
1008 if (reg & RFCR_ULM)
1009 panic("RFCR_ULM not implemented!\n");
1010
1011 break;
1012
1013 case RFDR:
1014 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1015 switch (rfaddr) {
1016 case 0x000:
1017 rom.perfectMatch[0] = (uint8_t)reg;
1018 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1019 break;
1020 case 0x002:
1021 rom.perfectMatch[2] = (uint8_t)reg;
1022 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1023 break;
1024 case 0x004:
1025 rom.perfectMatch[4] = (uint8_t)reg;
1026 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1027 break;
1028 default:
1029
1030 if (rfaddr >= FHASH_ADDR &&
1031 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1032
1033 // Only word-aligned writes supported
1034 if (rfaddr % 2)
1035 panic("unaligned write to filter hash table!");
1036
1037 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1038 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1039 = (uint8_t)(reg >> 8);
1040 break;
1041 }
1042 panic("writing RFDR for something other than pattern matching\
1043 or hashing! %#x\n", rfaddr);
1044 }
1045
1046 case BRAR:
1047 regs.brar = reg;
1048 break;
1049
1050 case BRDR:
1051 panic("the driver never uses BRDR, something is wrong!\n");
1052
1053 case SRR:
1054 panic("SRR is read only register!\n");
1055
1056 case MIBC:
1057 panic("the driver never uses MIBC, something is wrong!\n");
1058
1059 case VRCR:
1060 regs.vrcr = reg;
1061 break;
1062
1063 case VTCR:
1064 regs.vtcr = reg;
1065 break;
1066
1067 case VDR:
1068 panic("the driver never uses VDR, something is wrong!\n");
1069
1070 case CCSR:
1071 /* not going to implement clockrun stuff */
1072 regs.ccsr = reg;
1073 break;
1074
1075 case TBICR:
1076 regs.tbicr = reg;
1077 if (reg & TBICR_MR_LOOPBACK)
1078 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1079
1080 if (reg & TBICR_MR_AN_ENABLE) {
1081 regs.tanlpar = regs.tanar;
1082 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1083 }
1084
1085 #if 0
1086 if (reg & TBICR_MR_RESTART_AN) ;
1087 #endif
1088
1089 break;
1090
1091 case TBISR:
1092 panic("TBISR is read only register!\n");
1093
1094 case TANAR:
1095 // Only write the writable bits
1096 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1097 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1098
1099 // Pause capability unimplemented
1100 #if 0
1101 if (reg & TANAR_PS2) ;
1102 if (reg & TANAR_PS1) ;
1103 #endif
1104
1105 break;
1106
1107 case TANLPAR:
1108 panic("this should only be written to by the fake phy!\n");
1109
1110 case TANER:
1111 panic("TANER is read only register!\n");
1112
1113 case TESR:
1114 regs.tesr = reg;
1115 break;
1116
1117 default:
1118 panic("invalid register access daddr=%#x", daddr);
1119 }
1120 } else {
1121 panic("Invalid Request Size");
1122 }
1123 pkt->result = Packet::Success;
1124 return pioDelay;
1125 }
1126
1127 void
1128 NSGigE::devIntrPost(uint32_t interrupts)
1129 {
1130 if (interrupts & ISR_RESERVE)
1131 panic("Cannot set a reserved interrupt");
1132
1133 if (interrupts & ISR_NOIMPL)
1134 warn("interrupt not implemented %#x\n", interrupts);
1135
1136 interrupts &= ISR_IMPL;
1137 regs.isr |= interrupts;
1138
1139 if (interrupts & regs.imr) {
1140 if (interrupts & ISR_SWI) {
1141 totalSwi++;
1142 }
1143 if (interrupts & ISR_RXIDLE) {
1144 totalRxIdle++;
1145 }
1146 if (interrupts & ISR_RXOK) {
1147 totalRxOk++;
1148 }
1149 if (interrupts & ISR_RXDESC) {
1150 totalRxDesc++;
1151 }
1152 if (interrupts & ISR_TXOK) {
1153 totalTxOk++;
1154 }
1155 if (interrupts & ISR_TXIDLE) {
1156 totalTxIdle++;
1157 }
1158 if (interrupts & ISR_TXDESC) {
1159 totalTxDesc++;
1160 }
1161 if (interrupts & ISR_RXORN) {
1162 totalRxOrn++;
1163 }
1164 }
1165
1166 DPRINTF(EthernetIntr,
1167 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1168 interrupts, regs.isr, regs.imr);
1169
1170 if ((regs.isr & regs.imr)) {
1171 Tick when = curTick;
1172 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1173 when += intrDelay;
1174 cpuIntrPost(when);
1175 }
1176 }
1177
1178 /* writing this interrupt counting stats inside this means that this function
1179 is now limited to being used to clear all interrupts upon the kernel
1180 reading isr and servicing. just telling you in case you were thinking
1181 of expanding use.
1182 */
1183 void
1184 NSGigE::devIntrClear(uint32_t interrupts)
1185 {
1186 if (interrupts & ISR_RESERVE)
1187 panic("Cannot clear a reserved interrupt");
1188
1189 if (regs.isr & regs.imr & ISR_SWI) {
1190 postedSwi++;
1191 }
1192 if (regs.isr & regs.imr & ISR_RXIDLE) {
1193 postedRxIdle++;
1194 }
1195 if (regs.isr & regs.imr & ISR_RXOK) {
1196 postedRxOk++;
1197 }
1198 if (regs.isr & regs.imr & ISR_RXDESC) {
1199 postedRxDesc++;
1200 }
1201 if (regs.isr & regs.imr & ISR_TXOK) {
1202 postedTxOk++;
1203 }
1204 if (regs.isr & regs.imr & ISR_TXIDLE) {
1205 postedTxIdle++;
1206 }
1207 if (regs.isr & regs.imr & ISR_TXDESC) {
1208 postedTxDesc++;
1209 }
1210 if (regs.isr & regs.imr & ISR_RXORN) {
1211 postedRxOrn++;
1212 }
1213
1214 if (regs.isr & regs.imr & ISR_IMPL)
1215 postedInterrupts++;
1216
1217 interrupts &= ~ISR_NOIMPL;
1218 regs.isr &= ~interrupts;
1219
1220 DPRINTF(EthernetIntr,
1221 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1222 interrupts, regs.isr, regs.imr);
1223
1224 if (!(regs.isr & regs.imr))
1225 cpuIntrClear();
1226 }
1227
1228 void
1229 NSGigE::devIntrChangeMask()
1230 {
1231 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1232 regs.isr, regs.imr, regs.isr & regs.imr);
1233
1234 if (regs.isr & regs.imr)
1235 cpuIntrPost(curTick);
1236 else
1237 cpuIntrClear();
1238 }
1239
1240 void
1241 NSGigE::cpuIntrPost(Tick when)
1242 {
1243 // If the interrupt you want to post is later than an interrupt
1244 // already scheduled, just let it post in the coming one and don't
1245 // schedule another.
1246 // HOWEVER, must be sure that the scheduled intrTick is in the
1247 // future (this was formerly the source of a bug)
1248 /**
1249 * @todo this warning should be removed and the intrTick code should
1250 * be fixed.
1251 */
1252 assert(when >= curTick);
1253 assert(intrTick >= curTick || intrTick == 0);
1254 if (when > intrTick && intrTick != 0) {
1255 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1256 intrTick);
1257 return;
1258 }
1259
1260 intrTick = when;
1261 if (intrTick < curTick) {
1262 debug_break();
1263 intrTick = curTick;
1264 }
1265
1266 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1267 intrTick);
1268
1269 if (intrEvent)
1270 intrEvent->squash();
1271 intrEvent = new IntrEvent(this, true);
1272 intrEvent->schedule(intrTick);
1273 }
1274
1275 void
1276 NSGigE::cpuInterrupt()
1277 {
1278 assert(intrTick == curTick);
1279
1280 // Whether or not there's a pending interrupt, we don't care about
1281 // it anymore
1282 intrEvent = 0;
1283 intrTick = 0;
1284
1285 // Don't send an interrupt if there's already one
1286 if (cpuPendingIntr) {
1287 DPRINTF(EthernetIntr,
1288 "would send an interrupt now, but there's already pending\n");
1289 } else {
1290 // Send interrupt
1291 cpuPendingIntr = true;
1292
1293 DPRINTF(EthernetIntr, "posting interrupt\n");
1294 intrPost();
1295 }
1296 }
1297
1298 void
1299 NSGigE::cpuIntrClear()
1300 {
1301 if (!cpuPendingIntr)
1302 return;
1303
1304 if (intrEvent) {
1305 intrEvent->squash();
1306 intrEvent = 0;
1307 }
1308
1309 intrTick = 0;
1310
1311 cpuPendingIntr = false;
1312
1313 DPRINTF(EthernetIntr, "clearing interrupt\n");
1314 intrClear();
1315 }
1316
1317 bool
1318 NSGigE::cpuIntrPending() const
1319 { return cpuPendingIntr; }
1320
1321 void
1322 NSGigE::txReset()
1323 {
1324
1325 DPRINTF(Ethernet, "transmit reset\n");
1326
1327 CTDD = false;
1328 txEnable = false;;
1329 txFragPtr = 0;
1330 assert(txDescCnt == 0);
1331 txFifo.clear();
1332 txState = txIdle;
1333 assert(txDmaState == dmaIdle);
1334 }
1335
1336 void
1337 NSGigE::rxReset()
1338 {
1339 DPRINTF(Ethernet, "receive reset\n");
1340
1341 CRDD = false;
1342 assert(rxPktBytes == 0);
1343 rxEnable = false;
1344 rxFragPtr = 0;
1345 assert(rxDescCnt == 0);
1346 assert(rxDmaState == dmaIdle);
1347 rxFifo.clear();
1348 rxState = rxIdle;
1349 }
1350
1351 void
1352 NSGigE::regsReset()
1353 {
1354 memset(&regs, 0, sizeof(regs));
1355 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1356 regs.mear = 0x12;
1357 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1358 // fill threshold to 32 bytes
1359 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1360 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1361 regs.mibc = MIBC_FRZ;
1362 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1363 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1364 regs.brar = 0xffffffff;
1365
1366 extstsEnable = false;
1367 acceptBroadcast = false;
1368 acceptMulticast = false;
1369 acceptUnicast = false;
1370 acceptPerfect = false;
1371 acceptArp = false;
1372 }
1373
1374 bool
1375 NSGigE::doRxDmaRead()
1376 {
1377 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1378 rxDmaState = dmaReading;
1379
1380 if (dmaPending() || getState() != Running)
1381 rxDmaState = dmaReadWaiting;
1382 else
1383 dmaRead(rxDmaAddr, rxDmaLen, &rxDmaReadEvent, (uint8_t*)rxDmaData);
1384
1385 return true;
1386 }
1387
1388 void
1389 NSGigE::rxDmaReadDone()
1390 {
1391 assert(rxDmaState == dmaReading);
1392 rxDmaState = dmaIdle;
1393
1394 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1395 rxDmaAddr, rxDmaLen);
1396 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1397
1398 // If the transmit state machine has a pending DMA, let it go first
1399 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1400 txKick();
1401
1402 rxKick();
1403 }
1404
1405 bool
1406 NSGigE::doRxDmaWrite()
1407 {
1408 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1409 rxDmaState = dmaWriting;
1410
1411 if (dmaPending() || getState() != Running)
1412 rxDmaState = dmaWriteWaiting;
1413 else
1414 dmaWrite(rxDmaAddr, rxDmaLen, &rxDmaWriteEvent, (uint8_t*)rxDmaData);
1415 return true;
1416 }
1417
1418 void
1419 NSGigE::rxDmaWriteDone()
1420 {
1421 assert(rxDmaState == dmaWriting);
1422 rxDmaState = dmaIdle;
1423
1424 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1425 rxDmaAddr, rxDmaLen);
1426 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1427
1428 // If the transmit state machine has a pending DMA, let it go first
1429 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1430 txKick();
1431
1432 rxKick();
1433 }
1434
1435 void
1436 NSGigE::rxKick()
1437 {
1438 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1439
1440 DPRINTF(EthernetSM,
1441 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1442 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1443
1444 Addr link, bufptr;
1445 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1446 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1447
1448 next:
1449 if (clock) {
1450 if (rxKickTick > curTick) {
1451 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1452 rxKickTick);
1453
1454 goto exit;
1455 }
1456
1457 // Go to the next state machine clock tick.
1458 rxKickTick = curTick + cycles(1);
1459 }
1460
1461 switch(rxDmaState) {
1462 case dmaReadWaiting:
1463 if (doRxDmaRead())
1464 goto exit;
1465 break;
1466 case dmaWriteWaiting:
1467 if (doRxDmaWrite())
1468 goto exit;
1469 break;
1470 default:
1471 break;
1472 }
1473
1474 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1475 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1476
1477 // see state machine from spec for details
1478 // the way this works is, if you finish work on one state and can
1479 // go directly to another, you do that through jumping to the
1480 // label "next". however, if you have intermediate work, like DMA
1481 // so that you can't go to the next state yet, you go to exit and
1482 // exit the loop. however, when the DMA is done it will trigger
1483 // an event and come back to this loop.
1484 switch (rxState) {
1485 case rxIdle:
1486 if (!rxEnable) {
1487 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1488 goto exit;
1489 }
1490
1491 if (CRDD) {
1492 rxState = rxDescRefr;
1493
1494 rxDmaAddr = regs.rxdp & 0x3fffffff;
1495 rxDmaData =
1496 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1497 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1498 rxDmaFree = dmaDescFree;
1499
1500 descDmaReads++;
1501 descDmaRdBytes += rxDmaLen;
1502
1503 if (doRxDmaRead())
1504 goto exit;
1505 } else {
1506 rxState = rxDescRead;
1507
1508 rxDmaAddr = regs.rxdp & 0x3fffffff;
1509 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1510 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1511 rxDmaFree = dmaDescFree;
1512
1513 descDmaReads++;
1514 descDmaRdBytes += rxDmaLen;
1515
1516 if (doRxDmaRead())
1517 goto exit;
1518 }
1519 break;
1520
1521 case rxDescRefr:
1522 if (rxDmaState != dmaIdle)
1523 goto exit;
1524
1525 rxState = rxAdvance;
1526 break;
1527
1528 case rxDescRead:
1529 if (rxDmaState != dmaIdle)
1530 goto exit;
1531
1532 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1533 regs.rxdp & 0x3fffffff);
1534 DPRINTF(EthernetDesc,
1535 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1536 link, bufptr, cmdsts, extsts);
1537
1538 if (cmdsts & CMDSTS_OWN) {
1539 devIntrPost(ISR_RXIDLE);
1540 rxState = rxIdle;
1541 goto exit;
1542 } else {
1543 rxState = rxFifoBlock;
1544 rxFragPtr = bufptr;
1545 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1546 }
1547 break;
1548
1549 case rxFifoBlock:
1550 if (!rxPacket) {
1551 /**
1552 * @todo in reality, we should be able to start processing
1553 * the packet as it arrives, and not have to wait for the
1554 * full packet ot be in the receive fifo.
1555 */
1556 if (rxFifo.empty())
1557 goto exit;
1558
1559 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1560
1561 // If we don't have a packet, grab a new one from the fifo.
1562 rxPacket = rxFifo.front();
1563 rxPktBytes = rxPacket->length;
1564 rxPacketBufPtr = rxPacket->data;
1565
1566 #if TRACING_ON
1567 if (DTRACE(Ethernet)) {
1568 IpPtr ip(rxPacket);
1569 if (ip) {
1570 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1571 TcpPtr tcp(ip);
1572 if (tcp) {
1573 DPRINTF(Ethernet,
1574 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1575 tcp->sport(), tcp->dport(), tcp->seq(),
1576 tcp->ack());
1577 }
1578 }
1579 }
1580 #endif
1581
1582 // sanity check - i think the driver behaves like this
1583 assert(rxDescCnt >= rxPktBytes);
1584 rxFifo.pop();
1585 }
1586
1587
1588 // dont' need the && rxDescCnt > 0 if driver sanity check
1589 // above holds
1590 if (rxPktBytes > 0) {
1591 rxState = rxFragWrite;
1592 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1593 // check holds
1594 rxXferLen = rxPktBytes;
1595
1596 rxDmaAddr = rxFragPtr & 0x3fffffff;
1597 rxDmaData = rxPacketBufPtr;
1598 rxDmaLen = rxXferLen;
1599 rxDmaFree = dmaDataFree;
1600
1601 if (doRxDmaWrite())
1602 goto exit;
1603
1604 } else {
1605 rxState = rxDescWrite;
1606
1607 //if (rxPktBytes == 0) { /* packet is done */
1608 assert(rxPktBytes == 0);
1609 DPRINTF(EthernetSM, "done with receiving packet\n");
1610
1611 cmdsts |= CMDSTS_OWN;
1612 cmdsts &= ~CMDSTS_MORE;
1613 cmdsts |= CMDSTS_OK;
1614 cmdsts &= 0xffff0000;
1615 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1616
1617 #if 0
1618 /*
1619 * all the driver uses these are for its own stats keeping
1620 * which we don't care about, aren't necessary for
1621 * functionality and doing this would just slow us down.
1622 * if they end up using this in a later version for
1623 * functional purposes, just undef
1624 */
1625 if (rxFilterEnable) {
1626 cmdsts &= ~CMDSTS_DEST_MASK;
1627 const EthAddr &dst = rxFifoFront()->dst();
1628 if (dst->unicast())
1629 cmdsts |= CMDSTS_DEST_SELF;
1630 if (dst->multicast())
1631 cmdsts |= CMDSTS_DEST_MULTI;
1632 if (dst->broadcast())
1633 cmdsts |= CMDSTS_DEST_MASK;
1634 }
1635 #endif
1636
1637 IpPtr ip(rxPacket);
1638 if (extstsEnable && ip) {
1639 extsts |= EXTSTS_IPPKT;
1640 rxIpChecksums++;
1641 if (cksum(ip) != 0) {
1642 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1643 extsts |= EXTSTS_IPERR;
1644 }
1645 TcpPtr tcp(ip);
1646 UdpPtr udp(ip);
1647 if (tcp) {
1648 extsts |= EXTSTS_TCPPKT;
1649 rxTcpChecksums++;
1650 if (cksum(tcp) != 0) {
1651 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1652 extsts |= EXTSTS_TCPERR;
1653
1654 }
1655 } else if (udp) {
1656 extsts |= EXTSTS_UDPPKT;
1657 rxUdpChecksums++;
1658 if (cksum(udp) != 0) {
1659 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1660 extsts |= EXTSTS_UDPERR;
1661 }
1662 }
1663 }
1664 rxPacket = 0;
1665
1666 /*
1667 * the driver seems to always receive into desc buffers
1668 * of size 1514, so you never have a pkt that is split
1669 * into multiple descriptors on the receive side, so
1670 * i don't implement that case, hence the assert above.
1671 */
1672
1673 DPRINTF(EthernetDesc,
1674 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1675 regs.rxdp & 0x3fffffff);
1676 DPRINTF(EthernetDesc,
1677 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1678 link, bufptr, cmdsts, extsts);
1679
1680 rxDmaAddr = regs.rxdp & 0x3fffffff;
1681 rxDmaData = &cmdsts;
1682 if (is64bit) {
1683 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1684 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1685 } else {
1686 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1687 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1688 }
1689 rxDmaFree = dmaDescFree;
1690
1691 descDmaWrites++;
1692 descDmaWrBytes += rxDmaLen;
1693
1694 if (doRxDmaWrite())
1695 goto exit;
1696 }
1697 break;
1698
1699 case rxFragWrite:
1700 if (rxDmaState != dmaIdle)
1701 goto exit;
1702
1703 rxPacketBufPtr += rxXferLen;
1704 rxFragPtr += rxXferLen;
1705 rxPktBytes -= rxXferLen;
1706
1707 rxState = rxFifoBlock;
1708 break;
1709
1710 case rxDescWrite:
1711 if (rxDmaState != dmaIdle)
1712 goto exit;
1713
1714 assert(cmdsts & CMDSTS_OWN);
1715
1716 assert(rxPacket == 0);
1717 devIntrPost(ISR_RXOK);
1718
1719 if (cmdsts & CMDSTS_INTR)
1720 devIntrPost(ISR_RXDESC);
1721
1722 if (!rxEnable) {
1723 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1724 rxState = rxIdle;
1725 goto exit;
1726 } else
1727 rxState = rxAdvance;
1728 break;
1729
1730 case rxAdvance:
1731 if (link == 0) {
1732 devIntrPost(ISR_RXIDLE);
1733 rxState = rxIdle;
1734 CRDD = true;
1735 goto exit;
1736 } else {
1737 if (rxDmaState != dmaIdle)
1738 goto exit;
1739 rxState = rxDescRead;
1740 regs.rxdp = link;
1741 CRDD = false;
1742
1743 rxDmaAddr = regs.rxdp & 0x3fffffff;
1744 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1745 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1746 rxDmaFree = dmaDescFree;
1747
1748 if (doRxDmaRead())
1749 goto exit;
1750 }
1751 break;
1752
1753 default:
1754 panic("Invalid rxState!");
1755 }
1756
1757 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1758 NsRxStateStrings[rxState]);
1759 goto next;
1760
1761 exit:
1762 /**
1763 * @todo do we want to schedule a future kick?
1764 */
1765 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1766 NsRxStateStrings[rxState]);
1767
1768 if (clock && !rxKickEvent.scheduled())
1769 rxKickEvent.schedule(rxKickTick);
1770 }
1771
1772 void
1773 NSGigE::transmit()
1774 {
1775 if (txFifo.empty()) {
1776 DPRINTF(Ethernet, "nothing to transmit\n");
1777 return;
1778 }
1779
1780 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1781 txFifo.size());
1782 if (interface->sendPacket(txFifo.front())) {
1783 #if TRACING_ON
1784 if (DTRACE(Ethernet)) {
1785 IpPtr ip(txFifo.front());
1786 if (ip) {
1787 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1788 TcpPtr tcp(ip);
1789 if (tcp) {
1790 DPRINTF(Ethernet,
1791 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1792 tcp->sport(), tcp->dport(), tcp->seq(),
1793 tcp->ack());
1794 }
1795 }
1796 }
1797 #endif
1798
1799 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1800 txBytes += txFifo.front()->length;
1801 txPackets++;
1802
1803 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1804 txFifo.avail());
1805 txFifo.pop();
1806
1807 /*
1808 * normally do a writeback of the descriptor here, and ONLY
1809 * after that is done, send this interrupt. but since our
1810 * stuff never actually fails, just do this interrupt here,
1811 * otherwise the code has to stray from this nice format.
1812 * besides, it's functionally the same.
1813 */
1814 devIntrPost(ISR_TXOK);
1815 }
1816
1817 if (!txFifo.empty() && !txEvent.scheduled()) {
1818 DPRINTF(Ethernet, "reschedule transmit\n");
1819 txEvent.schedule(curTick + retryTime);
1820 }
1821 }
1822
1823 bool
1824 NSGigE::doTxDmaRead()
1825 {
1826 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1827 txDmaState = dmaReading;
1828
1829 if (dmaPending() || getState() != Running)
1830 txDmaState = dmaReadWaiting;
1831 else
1832 dmaRead(txDmaAddr, txDmaLen, &txDmaReadEvent, (uint8_t*)txDmaData);
1833
1834 return true;
1835 }
1836
1837 void
1838 NSGigE::txDmaReadDone()
1839 {
1840 assert(txDmaState == dmaReading);
1841 txDmaState = dmaIdle;
1842
1843 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1844 txDmaAddr, txDmaLen);
1845 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1846
1847 // If the receive state machine has a pending DMA, let it go first
1848 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1849 rxKick();
1850
1851 txKick();
1852 }
1853
1854 bool
1855 NSGigE::doTxDmaWrite()
1856 {
1857 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1858 txDmaState = dmaWriting;
1859
1860 if (dmaPending() || getState() != Running)
1861 txDmaState = dmaWriteWaiting;
1862 else
1863 dmaWrite(txDmaAddr, txDmaLen, &txDmaWriteEvent, (uint8_t*)txDmaData);
1864 return true;
1865 }
1866
1867 void
1868 NSGigE::txDmaWriteDone()
1869 {
1870 assert(txDmaState == dmaWriting);
1871 txDmaState = dmaIdle;
1872
1873 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1874 txDmaAddr, txDmaLen);
1875 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1876
1877 // If the receive state machine has a pending DMA, let it go first
1878 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1879 rxKick();
1880
1881 txKick();
1882 }
1883
1884 void
1885 NSGigE::txKick()
1886 {
1887 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1888
1889 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
1890 NsTxStateStrings[txState], is64bit ? 64 : 32);
1891
1892 Addr link, bufptr;
1893 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
1894 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
1895
1896 next:
1897 if (clock) {
1898 if (txKickTick > curTick) {
1899 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1900 txKickTick);
1901 goto exit;
1902 }
1903
1904 // Go to the next state machine clock tick.
1905 txKickTick = curTick + cycles(1);
1906 }
1907
1908 switch(txDmaState) {
1909 case dmaReadWaiting:
1910 if (doTxDmaRead())
1911 goto exit;
1912 break;
1913 case dmaWriteWaiting:
1914 if (doTxDmaWrite())
1915 goto exit;
1916 break;
1917 default:
1918 break;
1919 }
1920
1921 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
1922 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
1923 switch (txState) {
1924 case txIdle:
1925 if (!txEnable) {
1926 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1927 goto exit;
1928 }
1929
1930 if (CTDD) {
1931 txState = txDescRefr;
1932
1933 txDmaAddr = regs.txdp & 0x3fffffff;
1934 txDmaData =
1935 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
1936 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
1937 txDmaFree = dmaDescFree;
1938
1939 descDmaReads++;
1940 descDmaRdBytes += txDmaLen;
1941
1942 if (doTxDmaRead())
1943 goto exit;
1944
1945 } else {
1946 txState = txDescRead;
1947
1948 txDmaAddr = regs.txdp & 0x3fffffff;
1949 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
1950 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
1951 txDmaFree = dmaDescFree;
1952
1953 descDmaReads++;
1954 descDmaRdBytes += txDmaLen;
1955
1956 if (doTxDmaRead())
1957 goto exit;
1958 }
1959 break;
1960
1961 case txDescRefr:
1962 if (txDmaState != dmaIdle)
1963 goto exit;
1964
1965 txState = txAdvance;
1966 break;
1967
1968 case txDescRead:
1969 if (txDmaState != dmaIdle)
1970 goto exit;
1971
1972 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
1973 regs.txdp & 0x3fffffff);
1974 DPRINTF(EthernetDesc,
1975 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
1976 link, bufptr, cmdsts, extsts);
1977
1978 if (cmdsts & CMDSTS_OWN) {
1979 txState = txFifoBlock;
1980 txFragPtr = bufptr;
1981 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
1982 } else {
1983 devIntrPost(ISR_TXIDLE);
1984 txState = txIdle;
1985 goto exit;
1986 }
1987 break;
1988
1989 case txFifoBlock:
1990 if (!txPacket) {
1991 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
1992 txPacket = new EthPacketData(16384);
1993 txPacketBufPtr = txPacket->data;
1994 }
1995
1996 if (txDescCnt == 0) {
1997 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
1998 if (cmdsts & CMDSTS_MORE) {
1999 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2000 txState = txDescWrite;
2001
2002 cmdsts &= ~CMDSTS_OWN;
2003
2004 txDmaAddr = regs.txdp & 0x3fffffff;
2005 txDmaData = &cmdsts;
2006 if (is64bit) {
2007 txDmaAddr += offsetof(ns_desc64, cmdsts);
2008 txDmaLen = sizeof(txDesc64.cmdsts);
2009 } else {
2010 txDmaAddr += offsetof(ns_desc32, cmdsts);
2011 txDmaLen = sizeof(txDesc32.cmdsts);
2012 }
2013 txDmaFree = dmaDescFree;
2014
2015 if (doTxDmaWrite())
2016 goto exit;
2017
2018 } else { /* this packet is totally done */
2019 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2020 /* deal with the the packet that just finished */
2021 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2022 IpPtr ip(txPacket);
2023 if (extsts & EXTSTS_UDPPKT) {
2024 UdpPtr udp(ip);
2025 udp->sum(0);
2026 udp->sum(cksum(udp));
2027 txUdpChecksums++;
2028 } else if (extsts & EXTSTS_TCPPKT) {
2029 TcpPtr tcp(ip);
2030 tcp->sum(0);
2031 tcp->sum(cksum(tcp));
2032 txTcpChecksums++;
2033 }
2034 if (extsts & EXTSTS_IPPKT) {
2035 ip->sum(0);
2036 ip->sum(cksum(ip));
2037 txIpChecksums++;
2038 }
2039 }
2040
2041 txPacket->length = txPacketBufPtr - txPacket->data;
2042 // this is just because the receive can't handle a
2043 // packet bigger want to make sure
2044 if (txPacket->length > 1514)
2045 panic("transmit packet too large, %s > 1514\n",
2046 txPacket->length);
2047
2048 #ifndef NDEBUG
2049 bool success =
2050 #endif
2051 txFifo.push(txPacket);
2052 assert(success);
2053
2054 /*
2055 * this following section is not tqo spec, but
2056 * functionally shouldn't be any different. normally,
2057 * the chip will wait til the transmit has occurred
2058 * before writing back the descriptor because it has
2059 * to wait to see that it was successfully transmitted
2060 * to decide whether to set CMDSTS_OK or not.
2061 * however, in the simulator since it is always
2062 * successfully transmitted, and writing it exactly to
2063 * spec would complicate the code, we just do it here
2064 */
2065
2066 cmdsts &= ~CMDSTS_OWN;
2067 cmdsts |= CMDSTS_OK;
2068
2069 DPRINTF(EthernetDesc,
2070 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2071 cmdsts, extsts);
2072
2073 txDmaFree = dmaDescFree;
2074 txDmaAddr = regs.txdp & 0x3fffffff;
2075 txDmaData = &cmdsts;
2076 if (is64bit) {
2077 txDmaAddr += offsetof(ns_desc64, cmdsts);
2078 txDmaLen =
2079 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2080 } else {
2081 txDmaAddr += offsetof(ns_desc32, cmdsts);
2082 txDmaLen =
2083 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2084 }
2085
2086 descDmaWrites++;
2087 descDmaWrBytes += txDmaLen;
2088
2089 transmit();
2090 txPacket = 0;
2091
2092 if (!txEnable) {
2093 DPRINTF(EthernetSM, "halting TX state machine\n");
2094 txState = txIdle;
2095 goto exit;
2096 } else
2097 txState = txAdvance;
2098
2099 if (doTxDmaWrite())
2100 goto exit;
2101 }
2102 } else {
2103 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2104 if (!txFifo.full()) {
2105 txState = txFragRead;
2106
2107 /*
2108 * The number of bytes transferred is either whatever
2109 * is left in the descriptor (txDescCnt), or if there
2110 * is not enough room in the fifo, just whatever room
2111 * is left in the fifo
2112 */
2113 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2114
2115 txDmaAddr = txFragPtr & 0x3fffffff;
2116 txDmaData = txPacketBufPtr;
2117 txDmaLen = txXferLen;
2118 txDmaFree = dmaDataFree;
2119
2120 if (doTxDmaRead())
2121 goto exit;
2122 } else {
2123 txState = txFifoBlock;
2124 transmit();
2125
2126 goto exit;
2127 }
2128
2129 }
2130 break;
2131
2132 case txFragRead:
2133 if (txDmaState != dmaIdle)
2134 goto exit;
2135
2136 txPacketBufPtr += txXferLen;
2137 txFragPtr += txXferLen;
2138 txDescCnt -= txXferLen;
2139 txFifo.reserve(txXferLen);
2140
2141 txState = txFifoBlock;
2142 break;
2143
2144 case txDescWrite:
2145 if (txDmaState != dmaIdle)
2146 goto exit;
2147
2148 if (cmdsts & CMDSTS_INTR)
2149 devIntrPost(ISR_TXDESC);
2150
2151 if (!txEnable) {
2152 DPRINTF(EthernetSM, "halting TX state machine\n");
2153 txState = txIdle;
2154 goto exit;
2155 } else
2156 txState = txAdvance;
2157 break;
2158
2159 case txAdvance:
2160 if (link == 0) {
2161 devIntrPost(ISR_TXIDLE);
2162 txState = txIdle;
2163 goto exit;
2164 } else {
2165 if (txDmaState != dmaIdle)
2166 goto exit;
2167 txState = txDescRead;
2168 regs.txdp = link;
2169 CTDD = false;
2170
2171 txDmaAddr = link & 0x3fffffff;
2172 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2173 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2174 txDmaFree = dmaDescFree;
2175
2176 if (doTxDmaRead())
2177 goto exit;
2178 }
2179 break;
2180
2181 default:
2182 panic("invalid state");
2183 }
2184
2185 DPRINTF(EthernetSM, "entering next txState=%s\n",
2186 NsTxStateStrings[txState]);
2187 goto next;
2188
2189 exit:
2190 /**
2191 * @todo do we want to schedule a future kick?
2192 */
2193 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2194 NsTxStateStrings[txState]);
2195
2196 if (clock && !txKickEvent.scheduled())
2197 txKickEvent.schedule(txKickTick);
2198 }
2199
2200 /**
2201 * Advance the EEPROM state machine
2202 * Called on rising edge of EEPROM clock bit in MEAR
2203 */
2204 void
2205 NSGigE::eepromKick()
2206 {
2207 switch (eepromState) {
2208
2209 case eepromStart:
2210
2211 // Wait for start bit
2212 if (regs.mear & MEAR_EEDI) {
2213 // Set up to get 2 opcode bits
2214 eepromState = eepromGetOpcode;
2215 eepromBitsToRx = 2;
2216 eepromOpcode = 0;
2217 }
2218 break;
2219
2220 case eepromGetOpcode:
2221 eepromOpcode <<= 1;
2222 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2223 --eepromBitsToRx;
2224
2225 // Done getting opcode
2226 if (eepromBitsToRx == 0) {
2227 if (eepromOpcode != EEPROM_READ)
2228 panic("only EEPROM reads are implemented!");
2229
2230 // Set up to get address
2231 eepromState = eepromGetAddress;
2232 eepromBitsToRx = 6;
2233 eepromAddress = 0;
2234 }
2235 break;
2236
2237 case eepromGetAddress:
2238 eepromAddress <<= 1;
2239 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2240 --eepromBitsToRx;
2241
2242 // Done getting address
2243 if (eepromBitsToRx == 0) {
2244
2245 if (eepromAddress >= EEPROM_SIZE)
2246 panic("EEPROM read access out of range!");
2247
2248 switch (eepromAddress) {
2249
2250 case EEPROM_PMATCH2_ADDR:
2251 eepromData = rom.perfectMatch[5];
2252 eepromData <<= 8;
2253 eepromData += rom.perfectMatch[4];
2254 break;
2255
2256 case EEPROM_PMATCH1_ADDR:
2257 eepromData = rom.perfectMatch[3];
2258 eepromData <<= 8;
2259 eepromData += rom.perfectMatch[2];
2260 break;
2261
2262 case EEPROM_PMATCH0_ADDR:
2263 eepromData = rom.perfectMatch[1];
2264 eepromData <<= 8;
2265 eepromData += rom.perfectMatch[0];
2266 break;
2267
2268 default:
2269 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2270 }
2271 // Set up to read data
2272 eepromState = eepromRead;
2273 eepromBitsToRx = 16;
2274
2275 // Clear data in bit
2276 regs.mear &= ~MEAR_EEDI;
2277 }
2278 break;
2279
2280 case eepromRead:
2281 // Clear Data Out bit
2282 regs.mear &= ~MEAR_EEDO;
2283 // Set bit to value of current EEPROM bit
2284 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2285
2286 eepromData <<= 1;
2287 --eepromBitsToRx;
2288
2289 // All done
2290 if (eepromBitsToRx == 0) {
2291 eepromState = eepromStart;
2292 }
2293 break;
2294
2295 default:
2296 panic("invalid EEPROM state");
2297 }
2298
2299 }
2300
2301 void
2302 NSGigE::transferDone()
2303 {
2304 if (txFifo.empty()) {
2305 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2306 return;
2307 }
2308
2309 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2310
2311 if (txEvent.scheduled())
2312 txEvent.reschedule(curTick + cycles(1));
2313 else
2314 txEvent.schedule(curTick + cycles(1));
2315 }
2316
2317 bool
2318 NSGigE::rxFilter(const EthPacketPtr &packet)
2319 {
2320 EthPtr eth = packet;
2321 bool drop = true;
2322 string type;
2323
2324 const EthAddr &dst = eth->dst();
2325 if (dst.unicast()) {
2326 // If we're accepting all unicast addresses
2327 if (acceptUnicast)
2328 drop = false;
2329
2330 // If we make a perfect match
2331 if (acceptPerfect && dst == rom.perfectMatch)
2332 drop = false;
2333
2334 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2335 drop = false;
2336
2337 } else if (dst.broadcast()) {
2338 // if we're accepting broadcasts
2339 if (acceptBroadcast)
2340 drop = false;
2341
2342 } else if (dst.multicast()) {
2343 // if we're accepting all multicasts
2344 if (acceptMulticast)
2345 drop = false;
2346
2347 // Multicast hashing faked - all packets accepted
2348 if (multicastHashEnable)
2349 drop = false;
2350 }
2351
2352 if (drop) {
2353 DPRINTF(Ethernet, "rxFilter drop\n");
2354 DDUMP(EthernetData, packet->data, packet->length);
2355 }
2356
2357 return drop;
2358 }
2359
2360 bool
2361 NSGigE::recvPacket(EthPacketPtr packet)
2362 {
2363 rxBytes += packet->length;
2364 rxPackets++;
2365
2366 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2367 rxFifo.avail());
2368
2369 if (!rxEnable) {
2370 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2371 return true;
2372 }
2373
2374 if (!rxFilterEnable) {
2375 DPRINTF(Ethernet,
2376 "receive packet filtering disabled . . . packet dropped\n");
2377 return true;
2378 }
2379
2380 if (rxFilter(packet)) {
2381 DPRINTF(Ethernet, "packet filtered...dropped\n");
2382 return true;
2383 }
2384
2385 if (rxFifo.avail() < packet->length) {
2386 #if TRACING_ON
2387 IpPtr ip(packet);
2388 TcpPtr tcp(ip);
2389 if (ip) {
2390 DPRINTF(Ethernet,
2391 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2392 ip->id());
2393 if (tcp) {
2394 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2395 }
2396 }
2397 #endif
2398 droppedPackets++;
2399 devIntrPost(ISR_RXORN);
2400 return false;
2401 }
2402
2403 rxFifo.push(packet);
2404
2405 rxKick();
2406 return true;
2407 }
2408
2409
2410 void
2411 NSGigE::resume()
2412 {
2413 SimObject::resume();
2414
2415 // During drain we could have left the state machines in a waiting state and
2416 // they wouldn't get out until some other event occured to kick them.
2417 // This way they'll get out immediately
2418 txKick();
2419 rxKick();
2420 }
2421
2422
2423 //=====================================================================
2424 //
2425 //
2426 void
2427 NSGigE::serialize(ostream &os)
2428 {
2429 // Serialize the PciDev base class
2430 PciDev::serialize(os);
2431
2432 /*
2433 * Finalize any DMA events now.
2434 */
2435 // @todo will mem system save pending dma?
2436
2437 /*
2438 * Serialize the device registers
2439 */
2440 SERIALIZE_SCALAR(regs.command);
2441 SERIALIZE_SCALAR(regs.config);
2442 SERIALIZE_SCALAR(regs.mear);
2443 SERIALIZE_SCALAR(regs.ptscr);
2444 SERIALIZE_SCALAR(regs.isr);
2445 SERIALIZE_SCALAR(regs.imr);
2446 SERIALIZE_SCALAR(regs.ier);
2447 SERIALIZE_SCALAR(regs.ihr);
2448 SERIALIZE_SCALAR(regs.txdp);
2449 SERIALIZE_SCALAR(regs.txdp_hi);
2450 SERIALIZE_SCALAR(regs.txcfg);
2451 SERIALIZE_SCALAR(regs.gpior);
2452 SERIALIZE_SCALAR(regs.rxdp);
2453 SERIALIZE_SCALAR(regs.rxdp_hi);
2454 SERIALIZE_SCALAR(regs.rxcfg);
2455 SERIALIZE_SCALAR(regs.pqcr);
2456 SERIALIZE_SCALAR(regs.wcsr);
2457 SERIALIZE_SCALAR(regs.pcr);
2458 SERIALIZE_SCALAR(regs.rfcr);
2459 SERIALIZE_SCALAR(regs.rfdr);
2460 SERIALIZE_SCALAR(regs.brar);
2461 SERIALIZE_SCALAR(regs.brdr);
2462 SERIALIZE_SCALAR(regs.srr);
2463 SERIALIZE_SCALAR(regs.mibc);
2464 SERIALIZE_SCALAR(regs.vrcr);
2465 SERIALIZE_SCALAR(regs.vtcr);
2466 SERIALIZE_SCALAR(regs.vdr);
2467 SERIALIZE_SCALAR(regs.ccsr);
2468 SERIALIZE_SCALAR(regs.tbicr);
2469 SERIALIZE_SCALAR(regs.tbisr);
2470 SERIALIZE_SCALAR(regs.tanar);
2471 SERIALIZE_SCALAR(regs.tanlpar);
2472 SERIALIZE_SCALAR(regs.taner);
2473 SERIALIZE_SCALAR(regs.tesr);
2474
2475 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2476 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2477
2478 SERIALIZE_SCALAR(ioEnable);
2479
2480 /*
2481 * Serialize the data Fifos
2482 */
2483 rxFifo.serialize("rxFifo", os);
2484 txFifo.serialize("txFifo", os);
2485
2486 /*
2487 * Serialize the various helper variables
2488 */
2489 bool txPacketExists = txPacket;
2490 SERIALIZE_SCALAR(txPacketExists);
2491 if (txPacketExists) {
2492 txPacket->length = txPacketBufPtr - txPacket->data;
2493 txPacket->serialize("txPacket", os);
2494 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2495 SERIALIZE_SCALAR(txPktBufPtr);
2496 }
2497
2498 bool rxPacketExists = rxPacket;
2499 SERIALIZE_SCALAR(rxPacketExists);
2500 if (rxPacketExists) {
2501 rxPacket->serialize("rxPacket", os);
2502 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2503 SERIALIZE_SCALAR(rxPktBufPtr);
2504 }
2505
2506 SERIALIZE_SCALAR(txXferLen);
2507 SERIALIZE_SCALAR(rxXferLen);
2508
2509 /*
2510 * Serialize Cached Descriptors
2511 */
2512 SERIALIZE_SCALAR(rxDesc64.link);
2513 SERIALIZE_SCALAR(rxDesc64.bufptr);
2514 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2515 SERIALIZE_SCALAR(rxDesc64.extsts);
2516 SERIALIZE_SCALAR(txDesc64.link);
2517 SERIALIZE_SCALAR(txDesc64.bufptr);
2518 SERIALIZE_SCALAR(txDesc64.cmdsts);
2519 SERIALIZE_SCALAR(txDesc64.extsts);
2520 SERIALIZE_SCALAR(rxDesc32.link);
2521 SERIALIZE_SCALAR(rxDesc32.bufptr);
2522 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2523 SERIALIZE_SCALAR(rxDesc32.extsts);
2524 SERIALIZE_SCALAR(txDesc32.link);
2525 SERIALIZE_SCALAR(txDesc32.bufptr);
2526 SERIALIZE_SCALAR(txDesc32.cmdsts);
2527 SERIALIZE_SCALAR(txDesc32.extsts);
2528 SERIALIZE_SCALAR(extstsEnable);
2529
2530 /*
2531 * Serialize tx state machine
2532 */
2533 int txState = this->txState;
2534 SERIALIZE_SCALAR(txState);
2535 SERIALIZE_SCALAR(txEnable);
2536 SERIALIZE_SCALAR(CTDD);
2537 SERIALIZE_SCALAR(txFragPtr);
2538 SERIALIZE_SCALAR(txDescCnt);
2539 int txDmaState = this->txDmaState;
2540 SERIALIZE_SCALAR(txDmaState);
2541 SERIALIZE_SCALAR(txKickTick);
2542
2543 /*
2544 * Serialize rx state machine
2545 */
2546 int rxState = this->rxState;
2547 SERIALIZE_SCALAR(rxState);
2548 SERIALIZE_SCALAR(rxEnable);
2549 SERIALIZE_SCALAR(CRDD);
2550 SERIALIZE_SCALAR(rxPktBytes);
2551 SERIALIZE_SCALAR(rxFragPtr);
2552 SERIALIZE_SCALAR(rxDescCnt);
2553 int rxDmaState = this->rxDmaState;
2554 SERIALIZE_SCALAR(rxDmaState);
2555 SERIALIZE_SCALAR(rxKickTick);
2556
2557 /*
2558 * Serialize EEPROM state machine
2559 */
2560 int eepromState = this->eepromState;
2561 SERIALIZE_SCALAR(eepromState);
2562 SERIALIZE_SCALAR(eepromClk);
2563 SERIALIZE_SCALAR(eepromBitsToRx);
2564 SERIALIZE_SCALAR(eepromOpcode);
2565 SERIALIZE_SCALAR(eepromAddress);
2566 SERIALIZE_SCALAR(eepromData);
2567
2568 /*
2569 * If there's a pending transmit, store the time so we can
2570 * reschedule it later
2571 */
2572 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2573 SERIALIZE_SCALAR(transmitTick);
2574
2575 /*
2576 * receive address filter settings
2577 */
2578 SERIALIZE_SCALAR(rxFilterEnable);
2579 SERIALIZE_SCALAR(acceptBroadcast);
2580 SERIALIZE_SCALAR(acceptMulticast);
2581 SERIALIZE_SCALAR(acceptUnicast);
2582 SERIALIZE_SCALAR(acceptPerfect);
2583 SERIALIZE_SCALAR(acceptArp);
2584 SERIALIZE_SCALAR(multicastHashEnable);
2585
2586 /*
2587 * Keep track of pending interrupt status.
2588 */
2589 SERIALIZE_SCALAR(intrTick);
2590 SERIALIZE_SCALAR(cpuPendingIntr);
2591 Tick intrEventTick = 0;
2592 if (intrEvent)
2593 intrEventTick = intrEvent->when();
2594 SERIALIZE_SCALAR(intrEventTick);
2595
2596 }
2597
2598 void
2599 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2600 {
2601 // Unserialize the PciDev base class
2602 PciDev::unserialize(cp, section);
2603
2604 UNSERIALIZE_SCALAR(regs.command);
2605 UNSERIALIZE_SCALAR(regs.config);
2606 UNSERIALIZE_SCALAR(regs.mear);
2607 UNSERIALIZE_SCALAR(regs.ptscr);
2608 UNSERIALIZE_SCALAR(regs.isr);
2609 UNSERIALIZE_SCALAR(regs.imr);
2610 UNSERIALIZE_SCALAR(regs.ier);
2611 UNSERIALIZE_SCALAR(regs.ihr);
2612 UNSERIALIZE_SCALAR(regs.txdp);
2613 UNSERIALIZE_SCALAR(regs.txdp_hi);
2614 UNSERIALIZE_SCALAR(regs.txcfg);
2615 UNSERIALIZE_SCALAR(regs.gpior);
2616 UNSERIALIZE_SCALAR(regs.rxdp);
2617 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2618 UNSERIALIZE_SCALAR(regs.rxcfg);
2619 UNSERIALIZE_SCALAR(regs.pqcr);
2620 UNSERIALIZE_SCALAR(regs.wcsr);
2621 UNSERIALIZE_SCALAR(regs.pcr);
2622 UNSERIALIZE_SCALAR(regs.rfcr);
2623 UNSERIALIZE_SCALAR(regs.rfdr);
2624 UNSERIALIZE_SCALAR(regs.brar);
2625 UNSERIALIZE_SCALAR(regs.brdr);
2626 UNSERIALIZE_SCALAR(regs.srr);
2627 UNSERIALIZE_SCALAR(regs.mibc);
2628 UNSERIALIZE_SCALAR(regs.vrcr);
2629 UNSERIALIZE_SCALAR(regs.vtcr);
2630 UNSERIALIZE_SCALAR(regs.vdr);
2631 UNSERIALIZE_SCALAR(regs.ccsr);
2632 UNSERIALIZE_SCALAR(regs.tbicr);
2633 UNSERIALIZE_SCALAR(regs.tbisr);
2634 UNSERIALIZE_SCALAR(regs.tanar);
2635 UNSERIALIZE_SCALAR(regs.tanlpar);
2636 UNSERIALIZE_SCALAR(regs.taner);
2637 UNSERIALIZE_SCALAR(regs.tesr);
2638
2639 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2640 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2641
2642 UNSERIALIZE_SCALAR(ioEnable);
2643
2644 /*
2645 * unserialize the data fifos
2646 */
2647 rxFifo.unserialize("rxFifo", cp, section);
2648 txFifo.unserialize("txFifo", cp, section);
2649
2650 /*
2651 * unserialize the various helper variables
2652 */
2653 bool txPacketExists;
2654 UNSERIALIZE_SCALAR(txPacketExists);
2655 if (txPacketExists) {
2656 txPacket = new EthPacketData(16384);
2657 txPacket->unserialize("txPacket", cp, section);
2658 uint32_t txPktBufPtr;
2659 UNSERIALIZE_SCALAR(txPktBufPtr);
2660 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2661 } else
2662 txPacket = 0;
2663
2664 bool rxPacketExists;
2665 UNSERIALIZE_SCALAR(rxPacketExists);
2666 rxPacket = 0;
2667 if (rxPacketExists) {
2668 rxPacket = new EthPacketData(16384);
2669 rxPacket->unserialize("rxPacket", cp, section);
2670 uint32_t rxPktBufPtr;
2671 UNSERIALIZE_SCALAR(rxPktBufPtr);
2672 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2673 } else
2674 rxPacket = 0;
2675
2676 UNSERIALIZE_SCALAR(txXferLen);
2677 UNSERIALIZE_SCALAR(rxXferLen);
2678
2679 /*
2680 * Unserialize Cached Descriptors
2681 */
2682 UNSERIALIZE_SCALAR(rxDesc64.link);
2683 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2684 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2685 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2686 UNSERIALIZE_SCALAR(txDesc64.link);
2687 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2688 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2689 UNSERIALIZE_SCALAR(txDesc64.extsts);
2690 UNSERIALIZE_SCALAR(rxDesc32.link);
2691 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2692 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2693 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2694 UNSERIALIZE_SCALAR(txDesc32.link);
2695 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2696 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2697 UNSERIALIZE_SCALAR(txDesc32.extsts);
2698 UNSERIALIZE_SCALAR(extstsEnable);
2699
2700 /*
2701 * unserialize tx state machine
2702 */
2703 int txState;
2704 UNSERIALIZE_SCALAR(txState);
2705 this->txState = (TxState) txState;
2706 UNSERIALIZE_SCALAR(txEnable);
2707 UNSERIALIZE_SCALAR(CTDD);
2708 UNSERIALIZE_SCALAR(txFragPtr);
2709 UNSERIALIZE_SCALAR(txDescCnt);
2710 int txDmaState;
2711 UNSERIALIZE_SCALAR(txDmaState);
2712 this->txDmaState = (DmaState) txDmaState;
2713 UNSERIALIZE_SCALAR(txKickTick);
2714 if (txKickTick)
2715 txKickEvent.schedule(txKickTick);
2716
2717 /*
2718 * unserialize rx state machine
2719 */
2720 int rxState;
2721 UNSERIALIZE_SCALAR(rxState);
2722 this->rxState = (RxState) rxState;
2723 UNSERIALIZE_SCALAR(rxEnable);
2724 UNSERIALIZE_SCALAR(CRDD);
2725 UNSERIALIZE_SCALAR(rxPktBytes);
2726 UNSERIALIZE_SCALAR(rxFragPtr);
2727 UNSERIALIZE_SCALAR(rxDescCnt);
2728 int rxDmaState;
2729 UNSERIALIZE_SCALAR(rxDmaState);
2730 this->rxDmaState = (DmaState) rxDmaState;
2731 UNSERIALIZE_SCALAR(rxKickTick);
2732 if (rxKickTick)
2733 rxKickEvent.schedule(rxKickTick);
2734
2735 /*
2736 * Unserialize EEPROM state machine
2737 */
2738 int eepromState;
2739 UNSERIALIZE_SCALAR(eepromState);
2740 this->eepromState = (EEPROMState) eepromState;
2741 UNSERIALIZE_SCALAR(eepromClk);
2742 UNSERIALIZE_SCALAR(eepromBitsToRx);
2743 UNSERIALIZE_SCALAR(eepromOpcode);
2744 UNSERIALIZE_SCALAR(eepromAddress);
2745 UNSERIALIZE_SCALAR(eepromData);
2746
2747 /*
2748 * If there's a pending transmit, reschedule it now
2749 */
2750 Tick transmitTick;
2751 UNSERIALIZE_SCALAR(transmitTick);
2752 if (transmitTick)
2753 txEvent.schedule(curTick + transmitTick);
2754
2755 /*
2756 * unserialize receive address filter settings
2757 */
2758 UNSERIALIZE_SCALAR(rxFilterEnable);
2759 UNSERIALIZE_SCALAR(acceptBroadcast);
2760 UNSERIALIZE_SCALAR(acceptMulticast);
2761 UNSERIALIZE_SCALAR(acceptUnicast);
2762 UNSERIALIZE_SCALAR(acceptPerfect);
2763 UNSERIALIZE_SCALAR(acceptArp);
2764 UNSERIALIZE_SCALAR(multicastHashEnable);
2765
2766 /*
2767 * Keep track of pending interrupt status.
2768 */
2769 UNSERIALIZE_SCALAR(intrTick);
2770 UNSERIALIZE_SCALAR(cpuPendingIntr);
2771 Tick intrEventTick;
2772 UNSERIALIZE_SCALAR(intrEventTick);
2773 if (intrEventTick) {
2774 intrEvent = new IntrEvent(this, true);
2775 intrEvent->schedule(intrEventTick);
2776 }
2777 }
2778
2779 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2780
2781 SimObjectParam<EtherInt *> peer;
2782 SimObjectParam<NSGigE *> device;
2783
2784 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2785
2786 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2787
2788 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2789 INIT_PARAM(device, "Ethernet device of this interface")
2790
2791 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2792
2793 CREATE_SIM_OBJECT(NSGigEInt)
2794 {
2795 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2796
2797 EtherInt *p = (EtherInt *)peer;
2798 if (p) {
2799 dev_int->setPeer(p);
2800 p->setPeer(dev_int);
2801 }
2802
2803 return dev_int;
2804 }
2805
2806 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2807
2808
2809 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2810
2811 SimObjectParam<System *> system;
2812 SimObjectParam<Platform *> platform;
2813 SimObjectParam<PciConfigData *> configdata;
2814 Param<uint32_t> pci_bus;
2815 Param<uint32_t> pci_dev;
2816 Param<uint32_t> pci_func;
2817 Param<Tick> pio_latency;
2818 Param<Tick> config_latency;
2819
2820 Param<Tick> clock;
2821 Param<bool> dma_desc_free;
2822 Param<bool> dma_data_free;
2823 Param<Tick> dma_read_delay;
2824 Param<Tick> dma_write_delay;
2825 Param<Tick> dma_read_factor;
2826 Param<Tick> dma_write_factor;
2827 Param<bool> dma_no_allocate;
2828 Param<Tick> intr_delay;
2829
2830 Param<Tick> rx_delay;
2831 Param<Tick> tx_delay;
2832 Param<uint32_t> rx_fifo_size;
2833 Param<uint32_t> tx_fifo_size;
2834
2835 Param<bool> rx_filter;
2836 Param<string> hardware_address;
2837 Param<bool> rx_thread;
2838 Param<bool> tx_thread;
2839 Param<bool> rss;
2840
2841 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2842
2843 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2844
2845 INIT_PARAM(system, "System pointer"),
2846 INIT_PARAM(platform, "Platform pointer"),
2847 INIT_PARAM(configdata, "PCI Config data"),
2848 INIT_PARAM(pci_bus, "PCI bus ID"),
2849 INIT_PARAM(pci_dev, "PCI device number"),
2850 INIT_PARAM(pci_func, "PCI function code"),
2851 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2852 INIT_PARAM(config_latency, "Number of cycles for a config read or write"),
2853 INIT_PARAM(clock, "State machine cycle time"),
2854
2855 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
2856 INIT_PARAM(dma_data_free, "DMA of Data is free"),
2857 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
2858 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
2859 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
2860 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
2861 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
2862 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2863
2864 INIT_PARAM(rx_delay, "Receive Delay"),
2865 INIT_PARAM(tx_delay, "Transmit Delay"),
2866 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
2867 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
2868
2869 INIT_PARAM(rx_filter, "Enable Receive Filter"),
2870 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
2871 INIT_PARAM(rx_thread, ""),
2872 INIT_PARAM(tx_thread, ""),
2873 INIT_PARAM(rss, "")
2874
2875 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2876
2877
2878 CREATE_SIM_OBJECT(NSGigE)
2879 {
2880 NSGigE::Params *params = new NSGigE::Params;
2881
2882 params->name = getInstanceName();
2883 params->platform = platform;
2884 params->system = system;
2885 params->configData = configdata;
2886 params->busNum = pci_bus;
2887 params->deviceNum = pci_dev;
2888 params->functionNum = pci_func;
2889 params->pio_delay = pio_latency;
2890 params->config_delay = config_latency;
2891
2892 params->clock = clock;
2893 params->dma_desc_free = dma_desc_free;
2894 params->dma_data_free = dma_data_free;
2895 params->dma_read_delay = dma_read_delay;
2896 params->dma_write_delay = dma_write_delay;
2897 params->dma_read_factor = dma_read_factor;
2898 params->dma_write_factor = dma_write_factor;
2899 params->dma_no_allocate = dma_no_allocate;
2900 params->pio_delay = pio_latency;
2901 params->intr_delay = intr_delay;
2902
2903 params->rx_delay = rx_delay;
2904 params->tx_delay = tx_delay;
2905 params->rx_fifo_size = rx_fifo_size;
2906 params->tx_fifo_size = tx_fifo_size;
2907
2908 params->rx_filter = rx_filter;
2909 params->eaddr = hardware_address;
2910 params->rx_thread = rx_thread;
2911 params->tx_thread = tx_thread;
2912 params->rss = rss;
2913
2914 return new NSGigE(params);
2915 }
2916
2917 REGISTER_SIM_OBJECT("NSGigE", NSGigE)