Merge gblack@m5.eecs.umich.edu:/bk/multiarch
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "arch/vtophys.hh"
53
54 const char *NsRxStateStrings[] =
55 {
56 "rxIdle",
57 "rxDescRefr",
58 "rxDescRead",
59 "rxFifoBlock",
60 "rxFragWrite",
61 "rxDescWrite",
62 "rxAdvance"
63 };
64
65 const char *NsTxStateStrings[] =
66 {
67 "txIdle",
68 "txDescRefr",
69 "txDescRead",
70 "txFifoBlock",
71 "txFragRead",
72 "txDescWrite",
73 "txAdvance"
74 };
75
76 const char *NsDmaState[] =
77 {
78 "dmaIdle",
79 "dmaReading",
80 "dmaWriting",
81 "dmaReadWaiting",
82 "dmaWriteWaiting"
83 };
84
85 using namespace std;
86 using namespace Net;
87 using namespace TheISA;
88
89 ///////////////////////////////////////////////////////////////////////
90 //
91 // NSGigE PCI Device
92 //
93 NSGigE::NSGigE(Params *p)
94 : PciDev(p), ioEnable(false),
95 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
97 txXferLen(0), rxXferLen(0), clock(p->clock),
98 txState(txIdle), txEnable(false), CTDD(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
102 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
105 txDelay(p->tx_delay), rxDelay(p->rx_delay),
106 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
107 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
110 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
112 {
113 if (p->pio_bus) {
114 pioInterface = newPioInterface(name() + ".pio", p->hier,
115 p->pio_bus, this,
116 &NSGigE::cacheAccess);
117 pioLatency = p->pio_latency * p->pio_bus->clockRate;
118 }
119
120 if (p->header_bus) {
121 if (p->payload_bus)
122 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
123 p->header_bus,
124 p->payload_bus, 1,
125 p->dma_no_allocate);
126 else
127 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
128 p->header_bus,
129 p->header_bus, 1,
130 p->dma_no_allocate);
131 } else if (p->payload_bus)
132 panic("Must define a header bus if defining a payload bus");
133
134 intrDelay = p->intr_delay;
135 dmaReadDelay = p->dma_read_delay;
136 dmaWriteDelay = p->dma_write_delay;
137 dmaReadFactor = p->dma_read_factor;
138 dmaWriteFactor = p->dma_write_factor;
139
140 regsReset();
141 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
142
143 memset(&rxDesc32, 0, sizeof(rxDesc32));
144 memset(&txDesc32, 0, sizeof(txDesc32));
145 memset(&rxDesc64, 0, sizeof(rxDesc64));
146 memset(&txDesc64, 0, sizeof(txDesc64));
147 }
148
149 NSGigE::~NSGigE()
150 {}
151
152 void
153 NSGigE::regStats()
154 {
155 txBytes
156 .name(name() + ".txBytes")
157 .desc("Bytes Transmitted")
158 .prereq(txBytes)
159 ;
160
161 rxBytes
162 .name(name() + ".rxBytes")
163 .desc("Bytes Received")
164 .prereq(rxBytes)
165 ;
166
167 txPackets
168 .name(name() + ".txPackets")
169 .desc("Number of Packets Transmitted")
170 .prereq(txBytes)
171 ;
172
173 rxPackets
174 .name(name() + ".rxPackets")
175 .desc("Number of Packets Received")
176 .prereq(rxBytes)
177 ;
178
179 txIpChecksums
180 .name(name() + ".txIpChecksums")
181 .desc("Number of tx IP Checksums done by device")
182 .precision(0)
183 .prereq(txBytes)
184 ;
185
186 rxIpChecksums
187 .name(name() + ".rxIpChecksums")
188 .desc("Number of rx IP Checksums done by device")
189 .precision(0)
190 .prereq(rxBytes)
191 ;
192
193 txTcpChecksums
194 .name(name() + ".txTcpChecksums")
195 .desc("Number of tx TCP Checksums done by device")
196 .precision(0)
197 .prereq(txBytes)
198 ;
199
200 rxTcpChecksums
201 .name(name() + ".rxTcpChecksums")
202 .desc("Number of rx TCP Checksums done by device")
203 .precision(0)
204 .prereq(rxBytes)
205 ;
206
207 txUdpChecksums
208 .name(name() + ".txUdpChecksums")
209 .desc("Number of tx UDP Checksums done by device")
210 .precision(0)
211 .prereq(txBytes)
212 ;
213
214 rxUdpChecksums
215 .name(name() + ".rxUdpChecksums")
216 .desc("Number of rx UDP Checksums done by device")
217 .precision(0)
218 .prereq(rxBytes)
219 ;
220
221 descDmaReads
222 .name(name() + ".descDMAReads")
223 .desc("Number of descriptors the device read w/ DMA")
224 .precision(0)
225 ;
226
227 descDmaWrites
228 .name(name() + ".descDMAWrites")
229 .desc("Number of descriptors the device wrote w/ DMA")
230 .precision(0)
231 ;
232
233 descDmaRdBytes
234 .name(name() + ".descDmaReadBytes")
235 .desc("number of descriptor bytes read w/ DMA")
236 .precision(0)
237 ;
238
239 descDmaWrBytes
240 .name(name() + ".descDmaWriteBytes")
241 .desc("number of descriptor bytes write w/ DMA")
242 .precision(0)
243 ;
244
245 txBandwidth
246 .name(name() + ".txBandwidth")
247 .desc("Transmit Bandwidth (bits/s)")
248 .precision(0)
249 .prereq(txBytes)
250 ;
251
252 rxBandwidth
253 .name(name() + ".rxBandwidth")
254 .desc("Receive Bandwidth (bits/s)")
255 .precision(0)
256 .prereq(rxBytes)
257 ;
258
259 totBandwidth
260 .name(name() + ".totBandwidth")
261 .desc("Total Bandwidth (bits/s)")
262 .precision(0)
263 .prereq(totBytes)
264 ;
265
266 totPackets
267 .name(name() + ".totPackets")
268 .desc("Total Packets")
269 .precision(0)
270 .prereq(totBytes)
271 ;
272
273 totBytes
274 .name(name() + ".totBytes")
275 .desc("Total Bytes")
276 .precision(0)
277 .prereq(totBytes)
278 ;
279
280 totPacketRate
281 .name(name() + ".totPPS")
282 .desc("Total Tranmission Rate (packets/s)")
283 .precision(0)
284 .prereq(totBytes)
285 ;
286
287 txPacketRate
288 .name(name() + ".txPPS")
289 .desc("Packet Tranmission Rate (packets/s)")
290 .precision(0)
291 .prereq(txBytes)
292 ;
293
294 rxPacketRate
295 .name(name() + ".rxPPS")
296 .desc("Packet Reception Rate (packets/s)")
297 .precision(0)
298 .prereq(rxBytes)
299 ;
300
301 postedSwi
302 .name(name() + ".postedSwi")
303 .desc("number of software interrupts posted to CPU")
304 .precision(0)
305 ;
306
307 totalSwi
308 .name(name() + ".totalSwi")
309 .desc("total number of Swi written to ISR")
310 .precision(0)
311 ;
312
313 coalescedSwi
314 .name(name() + ".coalescedSwi")
315 .desc("average number of Swi's coalesced into each post")
316 .precision(0)
317 ;
318
319 postedRxIdle
320 .name(name() + ".postedRxIdle")
321 .desc("number of rxIdle interrupts posted to CPU")
322 .precision(0)
323 ;
324
325 totalRxIdle
326 .name(name() + ".totalRxIdle")
327 .desc("total number of RxIdle written to ISR")
328 .precision(0)
329 ;
330
331 coalescedRxIdle
332 .name(name() + ".coalescedRxIdle")
333 .desc("average number of RxIdle's coalesced into each post")
334 .precision(0)
335 ;
336
337 postedRxOk
338 .name(name() + ".postedRxOk")
339 .desc("number of RxOk interrupts posted to CPU")
340 .precision(0)
341 ;
342
343 totalRxOk
344 .name(name() + ".totalRxOk")
345 .desc("total number of RxOk written to ISR")
346 .precision(0)
347 ;
348
349 coalescedRxOk
350 .name(name() + ".coalescedRxOk")
351 .desc("average number of RxOk's coalesced into each post")
352 .precision(0)
353 ;
354
355 postedRxDesc
356 .name(name() + ".postedRxDesc")
357 .desc("number of RxDesc interrupts posted to CPU")
358 .precision(0)
359 ;
360
361 totalRxDesc
362 .name(name() + ".totalRxDesc")
363 .desc("total number of RxDesc written to ISR")
364 .precision(0)
365 ;
366
367 coalescedRxDesc
368 .name(name() + ".coalescedRxDesc")
369 .desc("average number of RxDesc's coalesced into each post")
370 .precision(0)
371 ;
372
373 postedTxOk
374 .name(name() + ".postedTxOk")
375 .desc("number of TxOk interrupts posted to CPU")
376 .precision(0)
377 ;
378
379 totalTxOk
380 .name(name() + ".totalTxOk")
381 .desc("total number of TxOk written to ISR")
382 .precision(0)
383 ;
384
385 coalescedTxOk
386 .name(name() + ".coalescedTxOk")
387 .desc("average number of TxOk's coalesced into each post")
388 .precision(0)
389 ;
390
391 postedTxIdle
392 .name(name() + ".postedTxIdle")
393 .desc("number of TxIdle interrupts posted to CPU")
394 .precision(0)
395 ;
396
397 totalTxIdle
398 .name(name() + ".totalTxIdle")
399 .desc("total number of TxIdle written to ISR")
400 .precision(0)
401 ;
402
403 coalescedTxIdle
404 .name(name() + ".coalescedTxIdle")
405 .desc("average number of TxIdle's coalesced into each post")
406 .precision(0)
407 ;
408
409 postedTxDesc
410 .name(name() + ".postedTxDesc")
411 .desc("number of TxDesc interrupts posted to CPU")
412 .precision(0)
413 ;
414
415 totalTxDesc
416 .name(name() + ".totalTxDesc")
417 .desc("total number of TxDesc written to ISR")
418 .precision(0)
419 ;
420
421 coalescedTxDesc
422 .name(name() + ".coalescedTxDesc")
423 .desc("average number of TxDesc's coalesced into each post")
424 .precision(0)
425 ;
426
427 postedRxOrn
428 .name(name() + ".postedRxOrn")
429 .desc("number of RxOrn posted to CPU")
430 .precision(0)
431 ;
432
433 totalRxOrn
434 .name(name() + ".totalRxOrn")
435 .desc("total number of RxOrn written to ISR")
436 .precision(0)
437 ;
438
439 coalescedRxOrn
440 .name(name() + ".coalescedRxOrn")
441 .desc("average number of RxOrn's coalesced into each post")
442 .precision(0)
443 ;
444
445 coalescedTotal
446 .name(name() + ".coalescedTotal")
447 .desc("average number of interrupts coalesced into each post")
448 .precision(0)
449 ;
450
451 postedInterrupts
452 .name(name() + ".postedInterrupts")
453 .desc("number of posts to CPU")
454 .precision(0)
455 ;
456
457 droppedPackets
458 .name(name() + ".droppedPackets")
459 .desc("number of packets dropped")
460 .precision(0)
461 ;
462
463 coalescedSwi = totalSwi / postedInterrupts;
464 coalescedRxIdle = totalRxIdle / postedInterrupts;
465 coalescedRxOk = totalRxOk / postedInterrupts;
466 coalescedRxDesc = totalRxDesc / postedInterrupts;
467 coalescedTxOk = totalTxOk / postedInterrupts;
468 coalescedTxIdle = totalTxIdle / postedInterrupts;
469 coalescedTxDesc = totalTxDesc / postedInterrupts;
470 coalescedRxOrn = totalRxOrn / postedInterrupts;
471
472 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
473 totalTxOk + totalTxIdle + totalTxDesc +
474 totalRxOrn) / postedInterrupts;
475
476 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
477 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
478 totBandwidth = txBandwidth + rxBandwidth;
479 totBytes = txBytes + rxBytes;
480 totPackets = txPackets + rxPackets;
481
482 txPacketRate = txPackets / simSeconds;
483 rxPacketRate = rxPackets / simSeconds;
484 }
485
486 /**
487 * This is to read the PCI general configuration registers
488 */
489 void
490 NSGigE::readConfig(int offset, int size, uint8_t *data)
491 {
492 if (offset < PCI_DEVICE_SPECIFIC)
493 PciDev::readConfig(offset, size, data);
494 else
495 panic("Device specific PCI config space not implemented!\n");
496 }
497
498 /**
499 * This is to write to the PCI general configuration registers
500 */
501 void
502 NSGigE::writeConfig(int offset, int size, const uint8_t* data)
503 {
504 if (offset < PCI_DEVICE_SPECIFIC)
505 PciDev::writeConfig(offset, size, data);
506 else
507 panic("Device specific PCI config space not implemented!\n");
508
509 // Need to catch writes to BARs to update the PIO interface
510 switch (offset) {
511 // seems to work fine without all these PCI settings, but i
512 // put in the IO to double check, an assertion will fail if we
513 // need to properly implement it
514 case PCI_COMMAND:
515 if (config.data[offset] & PCI_CMD_IOSE)
516 ioEnable = true;
517 else
518 ioEnable = false;
519
520 #if 0
521 if (config.data[offset] & PCI_CMD_BME) {
522 bmEnabled = true;
523 }
524 else {
525 bmEnabled = false;
526 }
527
528 if (config.data[offset] & PCI_CMD_MSE) {
529 memEnable = true;
530 }
531 else {
532 memEnable = false;
533 }
534 #endif
535 break;
536
537 case PCI0_BASE_ADDR0:
538 if (BARAddrs[0] != 0) {
539 if (pioInterface)
540 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
541
542 BARAddrs[0] &= EV5::PAddrUncachedMask;
543 }
544 break;
545 case PCI0_BASE_ADDR1:
546 if (BARAddrs[1] != 0) {
547 if (pioInterface)
548 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
549
550 BARAddrs[1] &= EV5::PAddrUncachedMask;
551 }
552 break;
553 }
554 }
555
556 /**
557 * This reads the device registers, which are detailed in the NS83820
558 * spec sheet
559 */
560 Fault
561 NSGigE::read(MemReqPtr &req, uint8_t *data)
562 {
563 assert(ioEnable);
564
565 //The mask is to give you only the offset into the device register file
566 Addr daddr = req->paddr & 0xfff;
567 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
568 daddr, req->paddr, req->vaddr, req->size);
569
570
571 // there are some reserved registers, you can see ns_gige_reg.h and
572 // the spec sheet for details
573 if (daddr > LAST && daddr <= RESERVED) {
574 panic("Accessing reserved register");
575 } else if (daddr > RESERVED && daddr <= 0x3FC) {
576 readConfig(daddr & 0xff, req->size, data);
577 return NoFault;
578 } else if (daddr >= MIB_START && daddr <= MIB_END) {
579 // don't implement all the MIB's. hopefully the kernel
580 // doesn't actually DEPEND upon their values
581 // MIB are just hardware stats keepers
582 uint32_t &reg = *(uint32_t *) data;
583 reg = 0;
584 return NoFault;
585 } else if (daddr > 0x3FC)
586 panic("Something is messed up!\n");
587
588 switch (req->size) {
589 case sizeof(uint32_t):
590 {
591 uint32_t &reg = *(uint32_t *)data;
592 uint16_t rfaddr;
593
594 switch (daddr) {
595 case CR:
596 reg = regs.command;
597 //these are supposed to be cleared on a read
598 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
599 break;
600
601 case CFGR:
602 reg = regs.config;
603 break;
604
605 case MEAR:
606 reg = regs.mear;
607 break;
608
609 case PTSCR:
610 reg = regs.ptscr;
611 break;
612
613 case ISR:
614 reg = regs.isr;
615 devIntrClear(ISR_ALL);
616 break;
617
618 case IMR:
619 reg = regs.imr;
620 break;
621
622 case IER:
623 reg = regs.ier;
624 break;
625
626 case IHR:
627 reg = regs.ihr;
628 break;
629
630 case TXDP:
631 reg = regs.txdp;
632 break;
633
634 case TXDP_HI:
635 reg = regs.txdp_hi;
636 break;
637
638 case TX_CFG:
639 reg = regs.txcfg;
640 break;
641
642 case GPIOR:
643 reg = regs.gpior;
644 break;
645
646 case RXDP:
647 reg = regs.rxdp;
648 break;
649
650 case RXDP_HI:
651 reg = regs.rxdp_hi;
652 break;
653
654 case RX_CFG:
655 reg = regs.rxcfg;
656 break;
657
658 case PQCR:
659 reg = regs.pqcr;
660 break;
661
662 case WCSR:
663 reg = regs.wcsr;
664 break;
665
666 case PCR:
667 reg = regs.pcr;
668 break;
669
670 // see the spec sheet for how RFCR and RFDR work
671 // basically, you write to RFCR to tell the machine
672 // what you want to do next, then you act upon RFDR,
673 // and the device will be prepared b/c of what you
674 // wrote to RFCR
675 case RFCR:
676 reg = regs.rfcr;
677 break;
678
679 case RFDR:
680 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
681 switch (rfaddr) {
682 // Read from perfect match ROM octets
683 case 0x000:
684 reg = rom.perfectMatch[1];
685 reg = reg << 8;
686 reg += rom.perfectMatch[0];
687 break;
688 case 0x002:
689 reg = rom.perfectMatch[3] << 8;
690 reg += rom.perfectMatch[2];
691 break;
692 case 0x004:
693 reg = rom.perfectMatch[5] << 8;
694 reg += rom.perfectMatch[4];
695 break;
696 default:
697 // Read filter hash table
698 if (rfaddr >= FHASH_ADDR &&
699 rfaddr < FHASH_ADDR + FHASH_SIZE) {
700
701 // Only word-aligned reads supported
702 if (rfaddr % 2)
703 panic("unaligned read from filter hash table!");
704
705 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
706 reg += rom.filterHash[rfaddr - FHASH_ADDR];
707 break;
708 }
709
710 panic("reading RFDR for something other than pattern"
711 " matching or hashing! %#x\n", rfaddr);
712 }
713 break;
714
715 case SRR:
716 reg = regs.srr;
717 break;
718
719 case MIBC:
720 reg = regs.mibc;
721 reg &= ~(MIBC_MIBS | MIBC_ACLR);
722 break;
723
724 case VRCR:
725 reg = regs.vrcr;
726 break;
727
728 case VTCR:
729 reg = regs.vtcr;
730 break;
731
732 case VDR:
733 reg = regs.vdr;
734 break;
735
736 case CCSR:
737 reg = regs.ccsr;
738 break;
739
740 case TBICR:
741 reg = regs.tbicr;
742 break;
743
744 case TBISR:
745 reg = regs.tbisr;
746 break;
747
748 case TANAR:
749 reg = regs.tanar;
750 break;
751
752 case TANLPAR:
753 reg = regs.tanlpar;
754 break;
755
756 case TANER:
757 reg = regs.taner;
758 break;
759
760 case TESR:
761 reg = regs.tesr;
762 break;
763
764 case M5REG:
765 reg = 0;
766 if (params()->rx_thread)
767 reg |= M5REG_RX_THREAD;
768 if (params()->tx_thread)
769 reg |= M5REG_TX_THREAD;
770 break;
771
772 default:
773 panic("reading unimplemented register: addr=%#x", daddr);
774 }
775
776 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
777 daddr, reg, reg);
778 }
779 break;
780
781 default:
782 panic("accessing register with invalid size: addr=%#x, size=%d",
783 daddr, req->size);
784 }
785
786 return NoFault;
787 }
788
789 Fault
790 NSGigE::write(MemReqPtr &req, const uint8_t *data)
791 {
792 assert(ioEnable);
793
794 Addr daddr = req->paddr & 0xfff;
795 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
796 daddr, req->paddr, req->vaddr, req->size);
797
798 if (daddr > LAST && daddr <= RESERVED) {
799 panic("Accessing reserved register");
800 } else if (daddr > RESERVED && daddr <= 0x3FC) {
801 writeConfig(daddr & 0xff, req->size, data);
802 return NoFault;
803 } else if (daddr > 0x3FC)
804 panic("Something is messed up!\n");
805
806 if (req->size == sizeof(uint32_t)) {
807 uint32_t reg = *(uint32_t *)data;
808 uint16_t rfaddr;
809
810 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
811
812 switch (daddr) {
813 case CR:
814 regs.command = reg;
815 if (reg & CR_TXD) {
816 txEnable = false;
817 } else if (reg & CR_TXE) {
818 txEnable = true;
819
820 // the kernel is enabling the transmit machine
821 if (txState == txIdle)
822 txKick();
823 }
824
825 if (reg & CR_RXD) {
826 rxEnable = false;
827 } else if (reg & CR_RXE) {
828 rxEnable = true;
829
830 if (rxState == rxIdle)
831 rxKick();
832 }
833
834 if (reg & CR_TXR)
835 txReset();
836
837 if (reg & CR_RXR)
838 rxReset();
839
840 if (reg & CR_SWI)
841 devIntrPost(ISR_SWI);
842
843 if (reg & CR_RST) {
844 txReset();
845 rxReset();
846
847 regsReset();
848 }
849 break;
850
851 case CFGR:
852 if (reg & CFGR_LNKSTS ||
853 reg & CFGR_SPDSTS ||
854 reg & CFGR_DUPSTS ||
855 reg & CFGR_RESERVED ||
856 reg & CFGR_T64ADDR ||
857 reg & CFGR_PCI64_DET)
858
859 // First clear all writable bits
860 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
861 CFGR_RESERVED | CFGR_T64ADDR |
862 CFGR_PCI64_DET;
863 // Now set the appropriate writable bits
864 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
865 CFGR_RESERVED | CFGR_T64ADDR |
866 CFGR_PCI64_DET);
867
868 // all these #if 0's are because i don't THINK the kernel needs to
869 // have these implemented. if there is a problem relating to one of
870 // these, you may need to add functionality in.
871 if (reg & CFGR_TBI_EN) ;
872 if (reg & CFGR_MODE_1000) ;
873
874 if (reg & CFGR_AUTO_1000)
875 panic("CFGR_AUTO_1000 not implemented!\n");
876
877 if (reg & CFGR_PINT_DUPSTS ||
878 reg & CFGR_PINT_LNKSTS ||
879 reg & CFGR_PINT_SPDSTS)
880 ;
881
882 if (reg & CFGR_TMRTEST) ;
883 if (reg & CFGR_MRM_DIS) ;
884 if (reg & CFGR_MWI_DIS) ;
885
886 if (reg & CFGR_T64ADDR) ;
887 // panic("CFGR_T64ADDR is read only register!\n");
888
889 if (reg & CFGR_PCI64_DET)
890 panic("CFGR_PCI64_DET is read only register!\n");
891
892 if (reg & CFGR_DATA64_EN) ;
893 if (reg & CFGR_M64ADDR) ;
894 if (reg & CFGR_PHY_RST) ;
895 if (reg & CFGR_PHY_DIS) ;
896
897 if (reg & CFGR_EXTSTS_EN)
898 extstsEnable = true;
899 else
900 extstsEnable = false;
901
902 if (reg & CFGR_REQALG) ;
903 if (reg & CFGR_SB) ;
904 if (reg & CFGR_POW) ;
905 if (reg & CFGR_EXD) ;
906 if (reg & CFGR_PESEL) ;
907 if (reg & CFGR_BROM_DIS) ;
908 if (reg & CFGR_EXT_125) ;
909 if (reg & CFGR_BEM) ;
910 break;
911
912 case MEAR:
913 // Clear writable bits
914 regs.mear &= MEAR_EEDO;
915 // Set appropriate writable bits
916 regs.mear |= reg & ~MEAR_EEDO;
917
918 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
919 // even though it could get it through RFDR
920 if (reg & MEAR_EESEL) {
921 // Rising edge of clock
922 if (reg & MEAR_EECLK && !eepromClk)
923 eepromKick();
924 }
925 else {
926 eepromState = eepromStart;
927 regs.mear &= ~MEAR_EEDI;
928 }
929
930 eepromClk = reg & MEAR_EECLK;
931
932 // since phy is completely faked, MEAR_MD* don't matter
933 if (reg & MEAR_MDIO) ;
934 if (reg & MEAR_MDDIR) ;
935 if (reg & MEAR_MDC) ;
936 break;
937
938 case PTSCR:
939 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
940 // these control BISTs for various parts of chip - we
941 // don't care or do just fake that the BIST is done
942 if (reg & PTSCR_RBIST_EN)
943 regs.ptscr |= PTSCR_RBIST_DONE;
944 if (reg & PTSCR_EEBIST_EN)
945 regs.ptscr &= ~PTSCR_EEBIST_EN;
946 if (reg & PTSCR_EELOAD_EN)
947 regs.ptscr &= ~PTSCR_EELOAD_EN;
948 break;
949
950 case ISR: /* writing to the ISR has no effect */
951 panic("ISR is a read only register!\n");
952
953 case IMR:
954 regs.imr = reg;
955 devIntrChangeMask();
956 break;
957
958 case IER:
959 regs.ier = reg;
960 break;
961
962 case IHR:
963 regs.ihr = reg;
964 /* not going to implement real interrupt holdoff */
965 break;
966
967 case TXDP:
968 regs.txdp = (reg & 0xFFFFFFFC);
969 assert(txState == txIdle);
970 CTDD = false;
971 break;
972
973 case TXDP_HI:
974 regs.txdp_hi = reg;
975 break;
976
977 case TX_CFG:
978 regs.txcfg = reg;
979 #if 0
980 if (reg & TX_CFG_CSI) ;
981 if (reg & TX_CFG_HBI) ;
982 if (reg & TX_CFG_MLB) ;
983 if (reg & TX_CFG_ATP) ;
984 if (reg & TX_CFG_ECRETRY) {
985 /*
986 * this could easily be implemented, but considering
987 * the network is just a fake pipe, wouldn't make
988 * sense to do this
989 */
990 }
991
992 if (reg & TX_CFG_BRST_DIS) ;
993 #endif
994
995 #if 0
996 /* we handle our own DMA, ignore the kernel's exhortations */
997 if (reg & TX_CFG_MXDMA) ;
998 #endif
999
1000 // also, we currently don't care about fill/drain
1001 // thresholds though this may change in the future with
1002 // more realistic networks or a driver which changes it
1003 // according to feedback
1004
1005 break;
1006
1007 case GPIOR:
1008 // Only write writable bits
1009 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1010 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
1011 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1012 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
1013 /* these just control general purpose i/o pins, don't matter */
1014 break;
1015
1016 case RXDP:
1017 regs.rxdp = reg;
1018 CRDD = false;
1019 break;
1020
1021 case RXDP_HI:
1022 regs.rxdp_hi = reg;
1023 break;
1024
1025 case RX_CFG:
1026 regs.rxcfg = reg;
1027 #if 0
1028 if (reg & RX_CFG_AEP) ;
1029 if (reg & RX_CFG_ARP) ;
1030 if (reg & RX_CFG_STRIPCRC) ;
1031 if (reg & RX_CFG_RX_RD) ;
1032 if (reg & RX_CFG_ALP) ;
1033 if (reg & RX_CFG_AIRL) ;
1034
1035 /* we handle our own DMA, ignore what kernel says about it */
1036 if (reg & RX_CFG_MXDMA) ;
1037
1038 //also, we currently don't care about fill/drain thresholds
1039 //though this may change in the future with more realistic
1040 //networks or a driver which changes it according to feedback
1041 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1042 #endif
1043 break;
1044
1045 case PQCR:
1046 /* there is no priority queueing used in the linux 2.6 driver */
1047 regs.pqcr = reg;
1048 break;
1049
1050 case WCSR:
1051 /* not going to implement wake on LAN */
1052 regs.wcsr = reg;
1053 break;
1054
1055 case PCR:
1056 /* not going to implement pause control */
1057 regs.pcr = reg;
1058 break;
1059
1060 case RFCR:
1061 regs.rfcr = reg;
1062
1063 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1064 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1065 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1066 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1067 acceptPerfect = (reg & RFCR_APM) ? true : false;
1068 acceptArp = (reg & RFCR_AARP) ? true : false;
1069 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1070
1071 #if 0
1072 if (reg & RFCR_APAT)
1073 panic("RFCR_APAT not implemented!\n");
1074 #endif
1075 if (reg & RFCR_UHEN)
1076 panic("Unicast hash filtering not used by drivers!\n");
1077
1078 if (reg & RFCR_ULM)
1079 panic("RFCR_ULM not implemented!\n");
1080
1081 break;
1082
1083 case RFDR:
1084 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1085 switch (rfaddr) {
1086 case 0x000:
1087 rom.perfectMatch[0] = (uint8_t)reg;
1088 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1089 break;
1090 case 0x002:
1091 rom.perfectMatch[2] = (uint8_t)reg;
1092 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1093 break;
1094 case 0x004:
1095 rom.perfectMatch[4] = (uint8_t)reg;
1096 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1097 break;
1098 default:
1099
1100 if (rfaddr >= FHASH_ADDR &&
1101 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1102
1103 // Only word-aligned writes supported
1104 if (rfaddr % 2)
1105 panic("unaligned write to filter hash table!");
1106
1107 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1108 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1109 = (uint8_t)(reg >> 8);
1110 break;
1111 }
1112 panic("writing RFDR for something other than pattern matching\
1113 or hashing! %#x\n", rfaddr);
1114 }
1115
1116 case BRAR:
1117 regs.brar = reg;
1118 break;
1119
1120 case BRDR:
1121 panic("the driver never uses BRDR, something is wrong!\n");
1122
1123 case SRR:
1124 panic("SRR is read only register!\n");
1125
1126 case MIBC:
1127 panic("the driver never uses MIBC, something is wrong!\n");
1128
1129 case VRCR:
1130 regs.vrcr = reg;
1131 break;
1132
1133 case VTCR:
1134 regs.vtcr = reg;
1135 break;
1136
1137 case VDR:
1138 panic("the driver never uses VDR, something is wrong!\n");
1139
1140 case CCSR:
1141 /* not going to implement clockrun stuff */
1142 regs.ccsr = reg;
1143 break;
1144
1145 case TBICR:
1146 regs.tbicr = reg;
1147 if (reg & TBICR_MR_LOOPBACK)
1148 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1149
1150 if (reg & TBICR_MR_AN_ENABLE) {
1151 regs.tanlpar = regs.tanar;
1152 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1153 }
1154
1155 #if 0
1156 if (reg & TBICR_MR_RESTART_AN) ;
1157 #endif
1158
1159 break;
1160
1161 case TBISR:
1162 panic("TBISR is read only register!\n");
1163
1164 case TANAR:
1165 // Only write the writable bits
1166 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1167 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1168
1169 // Pause capability unimplemented
1170 #if 0
1171 if (reg & TANAR_PS2) ;
1172 if (reg & TANAR_PS1) ;
1173 #endif
1174
1175 break;
1176
1177 case TANLPAR:
1178 panic("this should only be written to by the fake phy!\n");
1179
1180 case TANER:
1181 panic("TANER is read only register!\n");
1182
1183 case TESR:
1184 regs.tesr = reg;
1185 break;
1186
1187 default:
1188 panic("invalid register access daddr=%#x", daddr);
1189 }
1190 } else {
1191 panic("Invalid Request Size");
1192 }
1193
1194 return NoFault;
1195 }
1196
1197 void
1198 NSGigE::devIntrPost(uint32_t interrupts)
1199 {
1200 if (interrupts & ISR_RESERVE)
1201 panic("Cannot set a reserved interrupt");
1202
1203 if (interrupts & ISR_NOIMPL)
1204 warn("interrupt not implemented %#x\n", interrupts);
1205
1206 interrupts &= ISR_IMPL;
1207 regs.isr |= interrupts;
1208
1209 if (interrupts & regs.imr) {
1210 if (interrupts & ISR_SWI) {
1211 totalSwi++;
1212 }
1213 if (interrupts & ISR_RXIDLE) {
1214 totalRxIdle++;
1215 }
1216 if (interrupts & ISR_RXOK) {
1217 totalRxOk++;
1218 }
1219 if (interrupts & ISR_RXDESC) {
1220 totalRxDesc++;
1221 }
1222 if (interrupts & ISR_TXOK) {
1223 totalTxOk++;
1224 }
1225 if (interrupts & ISR_TXIDLE) {
1226 totalTxIdle++;
1227 }
1228 if (interrupts & ISR_TXDESC) {
1229 totalTxDesc++;
1230 }
1231 if (interrupts & ISR_RXORN) {
1232 totalRxOrn++;
1233 }
1234 }
1235
1236 DPRINTF(EthernetIntr,
1237 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1238 interrupts, regs.isr, regs.imr);
1239
1240 if ((regs.isr & regs.imr)) {
1241 Tick when = curTick;
1242 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1243 when += intrDelay;
1244 cpuIntrPost(when);
1245 }
1246 }
1247
1248 /* writing this interrupt counting stats inside this means that this function
1249 is now limited to being used to clear all interrupts upon the kernel
1250 reading isr and servicing. just telling you in case you were thinking
1251 of expanding use.
1252 */
1253 void
1254 NSGigE::devIntrClear(uint32_t interrupts)
1255 {
1256 if (interrupts & ISR_RESERVE)
1257 panic("Cannot clear a reserved interrupt");
1258
1259 if (regs.isr & regs.imr & ISR_SWI) {
1260 postedSwi++;
1261 }
1262 if (regs.isr & regs.imr & ISR_RXIDLE) {
1263 postedRxIdle++;
1264 }
1265 if (regs.isr & regs.imr & ISR_RXOK) {
1266 postedRxOk++;
1267 }
1268 if (regs.isr & regs.imr & ISR_RXDESC) {
1269 postedRxDesc++;
1270 }
1271 if (regs.isr & regs.imr & ISR_TXOK) {
1272 postedTxOk++;
1273 }
1274 if (regs.isr & regs.imr & ISR_TXIDLE) {
1275 postedTxIdle++;
1276 }
1277 if (regs.isr & regs.imr & ISR_TXDESC) {
1278 postedTxDesc++;
1279 }
1280 if (regs.isr & regs.imr & ISR_RXORN) {
1281 postedRxOrn++;
1282 }
1283
1284 if (regs.isr & regs.imr & ISR_IMPL)
1285 postedInterrupts++;
1286
1287 interrupts &= ~ISR_NOIMPL;
1288 regs.isr &= ~interrupts;
1289
1290 DPRINTF(EthernetIntr,
1291 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1292 interrupts, regs.isr, regs.imr);
1293
1294 if (!(regs.isr & regs.imr))
1295 cpuIntrClear();
1296 }
1297
1298 void
1299 NSGigE::devIntrChangeMask()
1300 {
1301 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1302 regs.isr, regs.imr, regs.isr & regs.imr);
1303
1304 if (regs.isr & regs.imr)
1305 cpuIntrPost(curTick);
1306 else
1307 cpuIntrClear();
1308 }
1309
1310 void
1311 NSGigE::cpuIntrPost(Tick when)
1312 {
1313 // If the interrupt you want to post is later than an interrupt
1314 // already scheduled, just let it post in the coming one and don't
1315 // schedule another.
1316 // HOWEVER, must be sure that the scheduled intrTick is in the
1317 // future (this was formerly the source of a bug)
1318 /**
1319 * @todo this warning should be removed and the intrTick code should
1320 * be fixed.
1321 */
1322 assert(when >= curTick);
1323 assert(intrTick >= curTick || intrTick == 0);
1324 if (when > intrTick && intrTick != 0) {
1325 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1326 intrTick);
1327 return;
1328 }
1329
1330 intrTick = when;
1331 if (intrTick < curTick) {
1332 debug_break();
1333 intrTick = curTick;
1334 }
1335
1336 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1337 intrTick);
1338
1339 if (intrEvent)
1340 intrEvent->squash();
1341 intrEvent = new IntrEvent(this, true);
1342 intrEvent->schedule(intrTick);
1343 }
1344
1345 void
1346 NSGigE::cpuInterrupt()
1347 {
1348 assert(intrTick == curTick);
1349
1350 // Whether or not there's a pending interrupt, we don't care about
1351 // it anymore
1352 intrEvent = 0;
1353 intrTick = 0;
1354
1355 // Don't send an interrupt if there's already one
1356 if (cpuPendingIntr) {
1357 DPRINTF(EthernetIntr,
1358 "would send an interrupt now, but there's already pending\n");
1359 } else {
1360 // Send interrupt
1361 cpuPendingIntr = true;
1362
1363 DPRINTF(EthernetIntr, "posting interrupt\n");
1364 intrPost();
1365 }
1366 }
1367
1368 void
1369 NSGigE::cpuIntrClear()
1370 {
1371 if (!cpuPendingIntr)
1372 return;
1373
1374 if (intrEvent) {
1375 intrEvent->squash();
1376 intrEvent = 0;
1377 }
1378
1379 intrTick = 0;
1380
1381 cpuPendingIntr = false;
1382
1383 DPRINTF(EthernetIntr, "clearing interrupt\n");
1384 intrClear();
1385 }
1386
1387 bool
1388 NSGigE::cpuIntrPending() const
1389 { return cpuPendingIntr; }
1390
1391 void
1392 NSGigE::txReset()
1393 {
1394
1395 DPRINTF(Ethernet, "transmit reset\n");
1396
1397 CTDD = false;
1398 txEnable = false;;
1399 txFragPtr = 0;
1400 assert(txDescCnt == 0);
1401 txFifo.clear();
1402 txState = txIdle;
1403 assert(txDmaState == dmaIdle);
1404 }
1405
1406 void
1407 NSGigE::rxReset()
1408 {
1409 DPRINTF(Ethernet, "receive reset\n");
1410
1411 CRDD = false;
1412 assert(rxPktBytes == 0);
1413 rxEnable = false;
1414 rxFragPtr = 0;
1415 assert(rxDescCnt == 0);
1416 assert(rxDmaState == dmaIdle);
1417 rxFifo.clear();
1418 rxState = rxIdle;
1419 }
1420
1421 void
1422 NSGigE::regsReset()
1423 {
1424 memset(&regs, 0, sizeof(regs));
1425 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1426 regs.mear = 0x12;
1427 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1428 // fill threshold to 32 bytes
1429 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1430 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1431 regs.mibc = MIBC_FRZ;
1432 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1433 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1434 regs.brar = 0xffffffff;
1435
1436 extstsEnable = false;
1437 acceptBroadcast = false;
1438 acceptMulticast = false;
1439 acceptUnicast = false;
1440 acceptPerfect = false;
1441 acceptArp = false;
1442 }
1443
1444 void
1445 NSGigE::rxDmaReadCopy()
1446 {
1447 assert(rxDmaState == dmaReading);
1448
1449 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1450 rxDmaState = dmaIdle;
1451
1452 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1453 rxDmaAddr, rxDmaLen);
1454 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1455 }
1456
1457 bool
1458 NSGigE::doRxDmaRead()
1459 {
1460 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1461 rxDmaState = dmaReading;
1462
1463 if (dmaInterface && !rxDmaFree) {
1464 if (dmaInterface->busy())
1465 rxDmaState = dmaReadWaiting;
1466 else
1467 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1468 &rxDmaReadEvent, true);
1469 return true;
1470 }
1471
1472 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1473 rxDmaReadCopy();
1474 return false;
1475 }
1476
1477 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1478 Tick start = curTick + dmaReadDelay + factor;
1479 rxDmaReadEvent.schedule(start);
1480 return true;
1481 }
1482
1483 void
1484 NSGigE::rxDmaReadDone()
1485 {
1486 assert(rxDmaState == dmaReading);
1487 rxDmaReadCopy();
1488
1489 // If the transmit state machine has a pending DMA, let it go first
1490 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1491 txKick();
1492
1493 rxKick();
1494 }
1495
1496 void
1497 NSGigE::rxDmaWriteCopy()
1498 {
1499 assert(rxDmaState == dmaWriting);
1500
1501 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1502 rxDmaState = dmaIdle;
1503
1504 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1505 rxDmaAddr, rxDmaLen);
1506 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1507 }
1508
1509 bool
1510 NSGigE::doRxDmaWrite()
1511 {
1512 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1513 rxDmaState = dmaWriting;
1514
1515 if (dmaInterface && !rxDmaFree) {
1516 if (dmaInterface->busy())
1517 rxDmaState = dmaWriteWaiting;
1518 else
1519 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1520 &rxDmaWriteEvent, true);
1521 return true;
1522 }
1523
1524 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1525 rxDmaWriteCopy();
1526 return false;
1527 }
1528
1529 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1530 Tick start = curTick + dmaWriteDelay + factor;
1531 rxDmaWriteEvent.schedule(start);
1532 return true;
1533 }
1534
1535 void
1536 NSGigE::rxDmaWriteDone()
1537 {
1538 assert(rxDmaState == dmaWriting);
1539 rxDmaWriteCopy();
1540
1541 // If the transmit state machine has a pending DMA, let it go first
1542 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1543 txKick();
1544
1545 rxKick();
1546 }
1547
1548 void
1549 NSGigE::rxKick()
1550 {
1551 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1552
1553 DPRINTF(EthernetSM,
1554 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1555 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1556
1557 Addr link, bufptr;
1558 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1559 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1560
1561 next:
1562 if (clock) {
1563 if (rxKickTick > curTick) {
1564 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1565 rxKickTick);
1566
1567 goto exit;
1568 }
1569
1570 // Go to the next state machine clock tick.
1571 rxKickTick = curTick + cycles(1);
1572 }
1573
1574 switch(rxDmaState) {
1575 case dmaReadWaiting:
1576 if (doRxDmaRead())
1577 goto exit;
1578 break;
1579 case dmaWriteWaiting:
1580 if (doRxDmaWrite())
1581 goto exit;
1582 break;
1583 default:
1584 break;
1585 }
1586
1587 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1588 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1589
1590 // see state machine from spec for details
1591 // the way this works is, if you finish work on one state and can
1592 // go directly to another, you do that through jumping to the
1593 // label "next". however, if you have intermediate work, like DMA
1594 // so that you can't go to the next state yet, you go to exit and
1595 // exit the loop. however, when the DMA is done it will trigger
1596 // an event and come back to this loop.
1597 switch (rxState) {
1598 case rxIdle:
1599 if (!rxEnable) {
1600 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1601 goto exit;
1602 }
1603
1604 if (CRDD) {
1605 rxState = rxDescRefr;
1606
1607 rxDmaAddr = regs.rxdp & 0x3fffffff;
1608 rxDmaData =
1609 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1610 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1611 rxDmaFree = dmaDescFree;
1612
1613 descDmaReads++;
1614 descDmaRdBytes += rxDmaLen;
1615
1616 if (doRxDmaRead())
1617 goto exit;
1618 } else {
1619 rxState = rxDescRead;
1620
1621 rxDmaAddr = regs.rxdp & 0x3fffffff;
1622 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1623 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1624 rxDmaFree = dmaDescFree;
1625
1626 descDmaReads++;
1627 descDmaRdBytes += rxDmaLen;
1628
1629 if (doRxDmaRead())
1630 goto exit;
1631 }
1632 break;
1633
1634 case rxDescRefr:
1635 if (rxDmaState != dmaIdle)
1636 goto exit;
1637
1638 rxState = rxAdvance;
1639 break;
1640
1641 case rxDescRead:
1642 if (rxDmaState != dmaIdle)
1643 goto exit;
1644
1645 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1646 regs.rxdp & 0x3fffffff);
1647 DPRINTF(EthernetDesc,
1648 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1649 link, bufptr, cmdsts, extsts);
1650
1651 if (cmdsts & CMDSTS_OWN) {
1652 devIntrPost(ISR_RXIDLE);
1653 rxState = rxIdle;
1654 goto exit;
1655 } else {
1656 rxState = rxFifoBlock;
1657 rxFragPtr = bufptr;
1658 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1659 }
1660 break;
1661
1662 case rxFifoBlock:
1663 if (!rxPacket) {
1664 /**
1665 * @todo in reality, we should be able to start processing
1666 * the packet as it arrives, and not have to wait for the
1667 * full packet ot be in the receive fifo.
1668 */
1669 if (rxFifo.empty())
1670 goto exit;
1671
1672 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1673
1674 // If we don't have a packet, grab a new one from the fifo.
1675 rxPacket = rxFifo.front();
1676 rxPktBytes = rxPacket->length;
1677 rxPacketBufPtr = rxPacket->data;
1678
1679 #if TRACING_ON
1680 if (DTRACE(Ethernet)) {
1681 IpPtr ip(rxPacket);
1682 if (ip) {
1683 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1684 TcpPtr tcp(ip);
1685 if (tcp) {
1686 DPRINTF(Ethernet,
1687 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1688 tcp->sport(), tcp->dport(), tcp->seq(),
1689 tcp->ack());
1690 }
1691 }
1692 }
1693 #endif
1694
1695 // sanity check - i think the driver behaves like this
1696 assert(rxDescCnt >= rxPktBytes);
1697 rxFifo.pop();
1698 }
1699
1700
1701 // dont' need the && rxDescCnt > 0 if driver sanity check
1702 // above holds
1703 if (rxPktBytes > 0) {
1704 rxState = rxFragWrite;
1705 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1706 // check holds
1707 rxXferLen = rxPktBytes;
1708
1709 rxDmaAddr = rxFragPtr & 0x3fffffff;
1710 rxDmaData = rxPacketBufPtr;
1711 rxDmaLen = rxXferLen;
1712 rxDmaFree = dmaDataFree;
1713
1714 if (doRxDmaWrite())
1715 goto exit;
1716
1717 } else {
1718 rxState = rxDescWrite;
1719
1720 //if (rxPktBytes == 0) { /* packet is done */
1721 assert(rxPktBytes == 0);
1722 DPRINTF(EthernetSM, "done with receiving packet\n");
1723
1724 cmdsts |= CMDSTS_OWN;
1725 cmdsts &= ~CMDSTS_MORE;
1726 cmdsts |= CMDSTS_OK;
1727 cmdsts &= 0xffff0000;
1728 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1729
1730 #if 0
1731 /*
1732 * all the driver uses these are for its own stats keeping
1733 * which we don't care about, aren't necessary for
1734 * functionality and doing this would just slow us down.
1735 * if they end up using this in a later version for
1736 * functional purposes, just undef
1737 */
1738 if (rxFilterEnable) {
1739 cmdsts &= ~CMDSTS_DEST_MASK;
1740 const EthAddr &dst = rxFifoFront()->dst();
1741 if (dst->unicast())
1742 cmdsts |= CMDSTS_DEST_SELF;
1743 if (dst->multicast())
1744 cmdsts |= CMDSTS_DEST_MULTI;
1745 if (dst->broadcast())
1746 cmdsts |= CMDSTS_DEST_MASK;
1747 }
1748 #endif
1749
1750 IpPtr ip(rxPacket);
1751 if (extstsEnable && ip) {
1752 extsts |= EXTSTS_IPPKT;
1753 rxIpChecksums++;
1754 if (cksum(ip) != 0) {
1755 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1756 extsts |= EXTSTS_IPERR;
1757 }
1758 TcpPtr tcp(ip);
1759 UdpPtr udp(ip);
1760 if (tcp) {
1761 extsts |= EXTSTS_TCPPKT;
1762 rxTcpChecksums++;
1763 if (cksum(tcp) != 0) {
1764 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1765 extsts |= EXTSTS_TCPERR;
1766
1767 }
1768 } else if (udp) {
1769 extsts |= EXTSTS_UDPPKT;
1770 rxUdpChecksums++;
1771 if (cksum(udp) != 0) {
1772 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1773 extsts |= EXTSTS_UDPERR;
1774 }
1775 }
1776 }
1777 rxPacket = 0;
1778
1779 /*
1780 * the driver seems to always receive into desc buffers
1781 * of size 1514, so you never have a pkt that is split
1782 * into multiple descriptors on the receive side, so
1783 * i don't implement that case, hence the assert above.
1784 */
1785
1786 DPRINTF(EthernetDesc,
1787 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1788 regs.rxdp & 0x3fffffff);
1789 DPRINTF(EthernetDesc,
1790 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1791 link, bufptr, cmdsts, extsts);
1792
1793 rxDmaAddr = regs.rxdp & 0x3fffffff;
1794 rxDmaData = &cmdsts;
1795 if (is64bit) {
1796 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1797 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1798 } else {
1799 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1800 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1801 }
1802 rxDmaFree = dmaDescFree;
1803
1804 descDmaWrites++;
1805 descDmaWrBytes += rxDmaLen;
1806
1807 if (doRxDmaWrite())
1808 goto exit;
1809 }
1810 break;
1811
1812 case rxFragWrite:
1813 if (rxDmaState != dmaIdle)
1814 goto exit;
1815
1816 rxPacketBufPtr += rxXferLen;
1817 rxFragPtr += rxXferLen;
1818 rxPktBytes -= rxXferLen;
1819
1820 rxState = rxFifoBlock;
1821 break;
1822
1823 case rxDescWrite:
1824 if (rxDmaState != dmaIdle)
1825 goto exit;
1826
1827 assert(cmdsts & CMDSTS_OWN);
1828
1829 assert(rxPacket == 0);
1830 devIntrPost(ISR_RXOK);
1831
1832 if (cmdsts & CMDSTS_INTR)
1833 devIntrPost(ISR_RXDESC);
1834
1835 if (!rxEnable) {
1836 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1837 rxState = rxIdle;
1838 goto exit;
1839 } else
1840 rxState = rxAdvance;
1841 break;
1842
1843 case rxAdvance:
1844 if (link == 0) {
1845 devIntrPost(ISR_RXIDLE);
1846 rxState = rxIdle;
1847 CRDD = true;
1848 goto exit;
1849 } else {
1850 if (rxDmaState != dmaIdle)
1851 goto exit;
1852 rxState = rxDescRead;
1853 regs.rxdp = link;
1854 CRDD = false;
1855
1856 rxDmaAddr = regs.rxdp & 0x3fffffff;
1857 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1858 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1859 rxDmaFree = dmaDescFree;
1860
1861 if (doRxDmaRead())
1862 goto exit;
1863 }
1864 break;
1865
1866 default:
1867 panic("Invalid rxState!");
1868 }
1869
1870 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1871 NsRxStateStrings[rxState]);
1872 goto next;
1873
1874 exit:
1875 /**
1876 * @todo do we want to schedule a future kick?
1877 */
1878 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1879 NsRxStateStrings[rxState]);
1880
1881 if (clock && !rxKickEvent.scheduled())
1882 rxKickEvent.schedule(rxKickTick);
1883 }
1884
1885 void
1886 NSGigE::transmit()
1887 {
1888 if (txFifo.empty()) {
1889 DPRINTF(Ethernet, "nothing to transmit\n");
1890 return;
1891 }
1892
1893 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1894 txFifo.size());
1895 if (interface->sendPacket(txFifo.front())) {
1896 #if TRACING_ON
1897 if (DTRACE(Ethernet)) {
1898 IpPtr ip(txFifo.front());
1899 if (ip) {
1900 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1901 TcpPtr tcp(ip);
1902 if (tcp) {
1903 DPRINTF(Ethernet,
1904 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1905 tcp->sport(), tcp->dport(), tcp->seq(),
1906 tcp->ack());
1907 }
1908 }
1909 }
1910 #endif
1911
1912 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1913 txBytes += txFifo.front()->length;
1914 txPackets++;
1915
1916 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1917 txFifo.avail());
1918 txFifo.pop();
1919
1920 /*
1921 * normally do a writeback of the descriptor here, and ONLY
1922 * after that is done, send this interrupt. but since our
1923 * stuff never actually fails, just do this interrupt here,
1924 * otherwise the code has to stray from this nice format.
1925 * besides, it's functionally the same.
1926 */
1927 devIntrPost(ISR_TXOK);
1928 }
1929
1930 if (!txFifo.empty() && !txEvent.scheduled()) {
1931 DPRINTF(Ethernet, "reschedule transmit\n");
1932 txEvent.schedule(curTick + retryTime);
1933 }
1934 }
1935
1936 void
1937 NSGigE::txDmaReadCopy()
1938 {
1939 assert(txDmaState == dmaReading);
1940
1941 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1942 txDmaState = dmaIdle;
1943
1944 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1945 txDmaAddr, txDmaLen);
1946 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1947 }
1948
1949 bool
1950 NSGigE::doTxDmaRead()
1951 {
1952 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1953 txDmaState = dmaReading;
1954
1955 if (dmaInterface && !txDmaFree) {
1956 if (dmaInterface->busy())
1957 txDmaState = dmaReadWaiting;
1958 else
1959 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1960 &txDmaReadEvent, true);
1961 return true;
1962 }
1963
1964 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1965 txDmaReadCopy();
1966 return false;
1967 }
1968
1969 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1970 Tick start = curTick + dmaReadDelay + factor;
1971 txDmaReadEvent.schedule(start);
1972 return true;
1973 }
1974
1975 void
1976 NSGigE::txDmaReadDone()
1977 {
1978 assert(txDmaState == dmaReading);
1979 txDmaReadCopy();
1980
1981 // If the receive state machine has a pending DMA, let it go first
1982 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1983 rxKick();
1984
1985 txKick();
1986 }
1987
1988 void
1989 NSGigE::txDmaWriteCopy()
1990 {
1991 assert(txDmaState == dmaWriting);
1992
1993 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1994 txDmaState = dmaIdle;
1995
1996 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1997 txDmaAddr, txDmaLen);
1998 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1999 }
2000
2001 bool
2002 NSGigE::doTxDmaWrite()
2003 {
2004 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
2005 txDmaState = dmaWriting;
2006
2007 if (dmaInterface && !txDmaFree) {
2008 if (dmaInterface->busy())
2009 txDmaState = dmaWriteWaiting;
2010 else
2011 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
2012 &txDmaWriteEvent, true);
2013 return true;
2014 }
2015
2016 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
2017 txDmaWriteCopy();
2018 return false;
2019 }
2020
2021 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
2022 Tick start = curTick + dmaWriteDelay + factor;
2023 txDmaWriteEvent.schedule(start);
2024 return true;
2025 }
2026
2027 void
2028 NSGigE::txDmaWriteDone()
2029 {
2030 assert(txDmaState == dmaWriting);
2031 txDmaWriteCopy();
2032
2033 // If the receive state machine has a pending DMA, let it go first
2034 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
2035 rxKick();
2036
2037 txKick();
2038 }
2039
2040 void
2041 NSGigE::txKick()
2042 {
2043 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
2044
2045 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
2046 NsTxStateStrings[txState], is64bit ? 64 : 32);
2047
2048 Addr link, bufptr;
2049 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
2050 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
2051
2052 next:
2053 if (clock) {
2054 if (txKickTick > curTick) {
2055 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
2056 txKickTick);
2057 goto exit;
2058 }
2059
2060 // Go to the next state machine clock tick.
2061 txKickTick = curTick + cycles(1);
2062 }
2063
2064 switch(txDmaState) {
2065 case dmaReadWaiting:
2066 if (doTxDmaRead())
2067 goto exit;
2068 break;
2069 case dmaWriteWaiting:
2070 if (doTxDmaWrite())
2071 goto exit;
2072 break;
2073 default:
2074 break;
2075 }
2076
2077 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
2078 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
2079 switch (txState) {
2080 case txIdle:
2081 if (!txEnable) {
2082 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
2083 goto exit;
2084 }
2085
2086 if (CTDD) {
2087 txState = txDescRefr;
2088
2089 txDmaAddr = regs.txdp & 0x3fffffff;
2090 txDmaData =
2091 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
2092 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
2093 txDmaFree = dmaDescFree;
2094
2095 descDmaReads++;
2096 descDmaRdBytes += txDmaLen;
2097
2098 if (doTxDmaRead())
2099 goto exit;
2100
2101 } else {
2102 txState = txDescRead;
2103
2104 txDmaAddr = regs.txdp & 0x3fffffff;
2105 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2106 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2107 txDmaFree = dmaDescFree;
2108
2109 descDmaReads++;
2110 descDmaRdBytes += txDmaLen;
2111
2112 if (doTxDmaRead())
2113 goto exit;
2114 }
2115 break;
2116
2117 case txDescRefr:
2118 if (txDmaState != dmaIdle)
2119 goto exit;
2120
2121 txState = txAdvance;
2122 break;
2123
2124 case txDescRead:
2125 if (txDmaState != dmaIdle)
2126 goto exit;
2127
2128 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
2129 regs.txdp & 0x3fffffff);
2130 DPRINTF(EthernetDesc,
2131 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
2132 link, bufptr, cmdsts, extsts);
2133
2134 if (cmdsts & CMDSTS_OWN) {
2135 txState = txFifoBlock;
2136 txFragPtr = bufptr;
2137 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
2138 } else {
2139 devIntrPost(ISR_TXIDLE);
2140 txState = txIdle;
2141 goto exit;
2142 }
2143 break;
2144
2145 case txFifoBlock:
2146 if (!txPacket) {
2147 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2148 txPacket = new PacketData(16384);
2149 txPacketBufPtr = txPacket->data;
2150 }
2151
2152 if (txDescCnt == 0) {
2153 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2154 if (cmdsts & CMDSTS_MORE) {
2155 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2156 txState = txDescWrite;
2157
2158 cmdsts &= ~CMDSTS_OWN;
2159
2160 txDmaAddr = regs.txdp & 0x3fffffff;
2161 txDmaData = &cmdsts;
2162 if (is64bit) {
2163 txDmaAddr += offsetof(ns_desc64, cmdsts);
2164 txDmaLen = sizeof(txDesc64.cmdsts);
2165 } else {
2166 txDmaAddr += offsetof(ns_desc32, cmdsts);
2167 txDmaLen = sizeof(txDesc32.cmdsts);
2168 }
2169 txDmaFree = dmaDescFree;
2170
2171 if (doTxDmaWrite())
2172 goto exit;
2173
2174 } else { /* this packet is totally done */
2175 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2176 /* deal with the the packet that just finished */
2177 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2178 IpPtr ip(txPacket);
2179 if (extsts & EXTSTS_UDPPKT) {
2180 UdpPtr udp(ip);
2181 udp->sum(0);
2182 udp->sum(cksum(udp));
2183 txUdpChecksums++;
2184 } else if (extsts & EXTSTS_TCPPKT) {
2185 TcpPtr tcp(ip);
2186 tcp->sum(0);
2187 tcp->sum(cksum(tcp));
2188 txTcpChecksums++;
2189 }
2190 if (extsts & EXTSTS_IPPKT) {
2191 ip->sum(0);
2192 ip->sum(cksum(ip));
2193 txIpChecksums++;
2194 }
2195 }
2196
2197 txPacket->length = txPacketBufPtr - txPacket->data;
2198 // this is just because the receive can't handle a
2199 // packet bigger want to make sure
2200 if (txPacket->length > 1514)
2201 panic("transmit packet too large, %s > 1514\n",
2202 txPacket->length);
2203
2204 #ifndef NDEBUG
2205 bool success =
2206 #endif
2207 txFifo.push(txPacket);
2208 assert(success);
2209
2210 /*
2211 * this following section is not tqo spec, but
2212 * functionally shouldn't be any different. normally,
2213 * the chip will wait til the transmit has occurred
2214 * before writing back the descriptor because it has
2215 * to wait to see that it was successfully transmitted
2216 * to decide whether to set CMDSTS_OK or not.
2217 * however, in the simulator since it is always
2218 * successfully transmitted, and writing it exactly to
2219 * spec would complicate the code, we just do it here
2220 */
2221
2222 cmdsts &= ~CMDSTS_OWN;
2223 cmdsts |= CMDSTS_OK;
2224
2225 DPRINTF(EthernetDesc,
2226 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2227 cmdsts, extsts);
2228
2229 txDmaFree = dmaDescFree;
2230 txDmaAddr = regs.txdp & 0x3fffffff;
2231 txDmaData = &cmdsts;
2232 if (is64bit) {
2233 txDmaAddr += offsetof(ns_desc64, cmdsts);
2234 txDmaLen =
2235 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2236 } else {
2237 txDmaAddr += offsetof(ns_desc32, cmdsts);
2238 txDmaLen =
2239 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2240 }
2241
2242 descDmaWrites++;
2243 descDmaWrBytes += txDmaLen;
2244
2245 transmit();
2246 txPacket = 0;
2247
2248 if (!txEnable) {
2249 DPRINTF(EthernetSM, "halting TX state machine\n");
2250 txState = txIdle;
2251 goto exit;
2252 } else
2253 txState = txAdvance;
2254
2255 if (doTxDmaWrite())
2256 goto exit;
2257 }
2258 } else {
2259 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2260 if (!txFifo.full()) {
2261 txState = txFragRead;
2262
2263 /*
2264 * The number of bytes transferred is either whatever
2265 * is left in the descriptor (txDescCnt), or if there
2266 * is not enough room in the fifo, just whatever room
2267 * is left in the fifo
2268 */
2269 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2270
2271 txDmaAddr = txFragPtr & 0x3fffffff;
2272 txDmaData = txPacketBufPtr;
2273 txDmaLen = txXferLen;
2274 txDmaFree = dmaDataFree;
2275
2276 if (doTxDmaRead())
2277 goto exit;
2278 } else {
2279 txState = txFifoBlock;
2280 transmit();
2281
2282 goto exit;
2283 }
2284
2285 }
2286 break;
2287
2288 case txFragRead:
2289 if (txDmaState != dmaIdle)
2290 goto exit;
2291
2292 txPacketBufPtr += txXferLen;
2293 txFragPtr += txXferLen;
2294 txDescCnt -= txXferLen;
2295 txFifo.reserve(txXferLen);
2296
2297 txState = txFifoBlock;
2298 break;
2299
2300 case txDescWrite:
2301 if (txDmaState != dmaIdle)
2302 goto exit;
2303
2304 if (cmdsts & CMDSTS_INTR)
2305 devIntrPost(ISR_TXDESC);
2306
2307 if (!txEnable) {
2308 DPRINTF(EthernetSM, "halting TX state machine\n");
2309 txState = txIdle;
2310 goto exit;
2311 } else
2312 txState = txAdvance;
2313 break;
2314
2315 case txAdvance:
2316 if (link == 0) {
2317 devIntrPost(ISR_TXIDLE);
2318 txState = txIdle;
2319 goto exit;
2320 } else {
2321 if (txDmaState != dmaIdle)
2322 goto exit;
2323 txState = txDescRead;
2324 regs.txdp = link;
2325 CTDD = false;
2326
2327 txDmaAddr = link & 0x3fffffff;
2328 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2329 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2330 txDmaFree = dmaDescFree;
2331
2332 if (doTxDmaRead())
2333 goto exit;
2334 }
2335 break;
2336
2337 default:
2338 panic("invalid state");
2339 }
2340
2341 DPRINTF(EthernetSM, "entering next txState=%s\n",
2342 NsTxStateStrings[txState]);
2343 goto next;
2344
2345 exit:
2346 /**
2347 * @todo do we want to schedule a future kick?
2348 */
2349 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2350 NsTxStateStrings[txState]);
2351
2352 if (clock && !txKickEvent.scheduled())
2353 txKickEvent.schedule(txKickTick);
2354 }
2355
2356 /**
2357 * Advance the EEPROM state machine
2358 * Called on rising edge of EEPROM clock bit in MEAR
2359 */
2360 void
2361 NSGigE::eepromKick()
2362 {
2363 switch (eepromState) {
2364
2365 case eepromStart:
2366
2367 // Wait for start bit
2368 if (regs.mear & MEAR_EEDI) {
2369 // Set up to get 2 opcode bits
2370 eepromState = eepromGetOpcode;
2371 eepromBitsToRx = 2;
2372 eepromOpcode = 0;
2373 }
2374 break;
2375
2376 case eepromGetOpcode:
2377 eepromOpcode <<= 1;
2378 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2379 --eepromBitsToRx;
2380
2381 // Done getting opcode
2382 if (eepromBitsToRx == 0) {
2383 if (eepromOpcode != EEPROM_READ)
2384 panic("only EEPROM reads are implemented!");
2385
2386 // Set up to get address
2387 eepromState = eepromGetAddress;
2388 eepromBitsToRx = 6;
2389 eepromAddress = 0;
2390 }
2391 break;
2392
2393 case eepromGetAddress:
2394 eepromAddress <<= 1;
2395 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2396 --eepromBitsToRx;
2397
2398 // Done getting address
2399 if (eepromBitsToRx == 0) {
2400
2401 if (eepromAddress >= EEPROM_SIZE)
2402 panic("EEPROM read access out of range!");
2403
2404 switch (eepromAddress) {
2405
2406 case EEPROM_PMATCH2_ADDR:
2407 eepromData = rom.perfectMatch[5];
2408 eepromData <<= 8;
2409 eepromData += rom.perfectMatch[4];
2410 break;
2411
2412 case EEPROM_PMATCH1_ADDR:
2413 eepromData = rom.perfectMatch[3];
2414 eepromData <<= 8;
2415 eepromData += rom.perfectMatch[2];
2416 break;
2417
2418 case EEPROM_PMATCH0_ADDR:
2419 eepromData = rom.perfectMatch[1];
2420 eepromData <<= 8;
2421 eepromData += rom.perfectMatch[0];
2422 break;
2423
2424 default:
2425 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2426 }
2427 // Set up to read data
2428 eepromState = eepromRead;
2429 eepromBitsToRx = 16;
2430
2431 // Clear data in bit
2432 regs.mear &= ~MEAR_EEDI;
2433 }
2434 break;
2435
2436 case eepromRead:
2437 // Clear Data Out bit
2438 regs.mear &= ~MEAR_EEDO;
2439 // Set bit to value of current EEPROM bit
2440 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2441
2442 eepromData <<= 1;
2443 --eepromBitsToRx;
2444
2445 // All done
2446 if (eepromBitsToRx == 0) {
2447 eepromState = eepromStart;
2448 }
2449 break;
2450
2451 default:
2452 panic("invalid EEPROM state");
2453 }
2454
2455 }
2456
2457 void
2458 NSGigE::transferDone()
2459 {
2460 if (txFifo.empty()) {
2461 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2462 return;
2463 }
2464
2465 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2466
2467 if (txEvent.scheduled())
2468 txEvent.reschedule(curTick + cycles(1));
2469 else
2470 txEvent.schedule(curTick + cycles(1));
2471 }
2472
2473 bool
2474 NSGigE::rxFilter(const PacketPtr &packet)
2475 {
2476 EthPtr eth = packet;
2477 bool drop = true;
2478 string type;
2479
2480 const EthAddr &dst = eth->dst();
2481 if (dst.unicast()) {
2482 // If we're accepting all unicast addresses
2483 if (acceptUnicast)
2484 drop = false;
2485
2486 // If we make a perfect match
2487 if (acceptPerfect && dst == rom.perfectMatch)
2488 drop = false;
2489
2490 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2491 drop = false;
2492
2493 } else if (dst.broadcast()) {
2494 // if we're accepting broadcasts
2495 if (acceptBroadcast)
2496 drop = false;
2497
2498 } else if (dst.multicast()) {
2499 // if we're accepting all multicasts
2500 if (acceptMulticast)
2501 drop = false;
2502
2503 // Multicast hashing faked - all packets accepted
2504 if (multicastHashEnable)
2505 drop = false;
2506 }
2507
2508 if (drop) {
2509 DPRINTF(Ethernet, "rxFilter drop\n");
2510 DDUMP(EthernetData, packet->data, packet->length);
2511 }
2512
2513 return drop;
2514 }
2515
2516 bool
2517 NSGigE::recvPacket(PacketPtr packet)
2518 {
2519 rxBytes += packet->length;
2520 rxPackets++;
2521
2522 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2523 rxFifo.avail());
2524
2525 if (!rxEnable) {
2526 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2527 return true;
2528 }
2529
2530 if (!rxFilterEnable) {
2531 DPRINTF(Ethernet,
2532 "receive packet filtering disabled . . . packet dropped\n");
2533 return true;
2534 }
2535
2536 if (rxFilter(packet)) {
2537 DPRINTF(Ethernet, "packet filtered...dropped\n");
2538 return true;
2539 }
2540
2541 if (rxFifo.avail() < packet->length) {
2542 #if TRACING_ON
2543 IpPtr ip(packet);
2544 TcpPtr tcp(ip);
2545 if (ip) {
2546 DPRINTF(Ethernet,
2547 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2548 ip->id());
2549 if (tcp) {
2550 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2551 }
2552 }
2553 #endif
2554 droppedPackets++;
2555 devIntrPost(ISR_RXORN);
2556 return false;
2557 }
2558
2559 rxFifo.push(packet);
2560
2561 rxKick();
2562 return true;
2563 }
2564
2565 //=====================================================================
2566 //
2567 //
2568 void
2569 NSGigE::serialize(ostream &os)
2570 {
2571 // Serialize the PciDev base class
2572 PciDev::serialize(os);
2573
2574 /*
2575 * Finalize any DMA events now.
2576 */
2577 if (rxDmaReadEvent.scheduled())
2578 rxDmaReadCopy();
2579 if (rxDmaWriteEvent.scheduled())
2580 rxDmaWriteCopy();
2581 if (txDmaReadEvent.scheduled())
2582 txDmaReadCopy();
2583 if (txDmaWriteEvent.scheduled())
2584 txDmaWriteCopy();
2585
2586 /*
2587 * Serialize the device registers
2588 */
2589 SERIALIZE_SCALAR(regs.command);
2590 SERIALIZE_SCALAR(regs.config);
2591 SERIALIZE_SCALAR(regs.mear);
2592 SERIALIZE_SCALAR(regs.ptscr);
2593 SERIALIZE_SCALAR(regs.isr);
2594 SERIALIZE_SCALAR(regs.imr);
2595 SERIALIZE_SCALAR(regs.ier);
2596 SERIALIZE_SCALAR(regs.ihr);
2597 SERIALIZE_SCALAR(regs.txdp);
2598 SERIALIZE_SCALAR(regs.txdp_hi);
2599 SERIALIZE_SCALAR(regs.txcfg);
2600 SERIALIZE_SCALAR(regs.gpior);
2601 SERIALIZE_SCALAR(regs.rxdp);
2602 SERIALIZE_SCALAR(regs.rxdp_hi);
2603 SERIALIZE_SCALAR(regs.rxcfg);
2604 SERIALIZE_SCALAR(regs.pqcr);
2605 SERIALIZE_SCALAR(regs.wcsr);
2606 SERIALIZE_SCALAR(regs.pcr);
2607 SERIALIZE_SCALAR(regs.rfcr);
2608 SERIALIZE_SCALAR(regs.rfdr);
2609 SERIALIZE_SCALAR(regs.brar);
2610 SERIALIZE_SCALAR(regs.brdr);
2611 SERIALIZE_SCALAR(regs.srr);
2612 SERIALIZE_SCALAR(regs.mibc);
2613 SERIALIZE_SCALAR(regs.vrcr);
2614 SERIALIZE_SCALAR(regs.vtcr);
2615 SERIALIZE_SCALAR(regs.vdr);
2616 SERIALIZE_SCALAR(regs.ccsr);
2617 SERIALIZE_SCALAR(regs.tbicr);
2618 SERIALIZE_SCALAR(regs.tbisr);
2619 SERIALIZE_SCALAR(regs.tanar);
2620 SERIALIZE_SCALAR(regs.tanlpar);
2621 SERIALIZE_SCALAR(regs.taner);
2622 SERIALIZE_SCALAR(regs.tesr);
2623
2624 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2625 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2626
2627 SERIALIZE_SCALAR(ioEnable);
2628
2629 /*
2630 * Serialize the data Fifos
2631 */
2632 rxFifo.serialize("rxFifo", os);
2633 txFifo.serialize("txFifo", os);
2634
2635 /*
2636 * Serialize the various helper variables
2637 */
2638 bool txPacketExists = txPacket;
2639 SERIALIZE_SCALAR(txPacketExists);
2640 if (txPacketExists) {
2641 txPacket->length = txPacketBufPtr - txPacket->data;
2642 txPacket->serialize("txPacket", os);
2643 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2644 SERIALIZE_SCALAR(txPktBufPtr);
2645 }
2646
2647 bool rxPacketExists = rxPacket;
2648 SERIALIZE_SCALAR(rxPacketExists);
2649 if (rxPacketExists) {
2650 rxPacket->serialize("rxPacket", os);
2651 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2652 SERIALIZE_SCALAR(rxPktBufPtr);
2653 }
2654
2655 SERIALIZE_SCALAR(txXferLen);
2656 SERIALIZE_SCALAR(rxXferLen);
2657
2658 /*
2659 * Serialize Cached Descriptors
2660 */
2661 SERIALIZE_SCALAR(rxDesc64.link);
2662 SERIALIZE_SCALAR(rxDesc64.bufptr);
2663 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2664 SERIALIZE_SCALAR(rxDesc64.extsts);
2665 SERIALIZE_SCALAR(txDesc64.link);
2666 SERIALIZE_SCALAR(txDesc64.bufptr);
2667 SERIALIZE_SCALAR(txDesc64.cmdsts);
2668 SERIALIZE_SCALAR(txDesc64.extsts);
2669 SERIALIZE_SCALAR(rxDesc32.link);
2670 SERIALIZE_SCALAR(rxDesc32.bufptr);
2671 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2672 SERIALIZE_SCALAR(rxDesc32.extsts);
2673 SERIALIZE_SCALAR(txDesc32.link);
2674 SERIALIZE_SCALAR(txDesc32.bufptr);
2675 SERIALIZE_SCALAR(txDesc32.cmdsts);
2676 SERIALIZE_SCALAR(txDesc32.extsts);
2677 SERIALIZE_SCALAR(extstsEnable);
2678
2679 /*
2680 * Serialize tx state machine
2681 */
2682 int txState = this->txState;
2683 SERIALIZE_SCALAR(txState);
2684 SERIALIZE_SCALAR(txEnable);
2685 SERIALIZE_SCALAR(CTDD);
2686 SERIALIZE_SCALAR(txFragPtr);
2687 SERIALIZE_SCALAR(txDescCnt);
2688 int txDmaState = this->txDmaState;
2689 SERIALIZE_SCALAR(txDmaState);
2690 SERIALIZE_SCALAR(txKickTick);
2691
2692 /*
2693 * Serialize rx state machine
2694 */
2695 int rxState = this->rxState;
2696 SERIALIZE_SCALAR(rxState);
2697 SERIALIZE_SCALAR(rxEnable);
2698 SERIALIZE_SCALAR(CRDD);
2699 SERIALIZE_SCALAR(rxPktBytes);
2700 SERIALIZE_SCALAR(rxFragPtr);
2701 SERIALIZE_SCALAR(rxDescCnt);
2702 int rxDmaState = this->rxDmaState;
2703 SERIALIZE_SCALAR(rxDmaState);
2704 SERIALIZE_SCALAR(rxKickTick);
2705
2706 /*
2707 * Serialize EEPROM state machine
2708 */
2709 int eepromState = this->eepromState;
2710 SERIALIZE_SCALAR(eepromState);
2711 SERIALIZE_SCALAR(eepromClk);
2712 SERIALIZE_SCALAR(eepromBitsToRx);
2713 SERIALIZE_SCALAR(eepromOpcode);
2714 SERIALIZE_SCALAR(eepromAddress);
2715 SERIALIZE_SCALAR(eepromData);
2716
2717 /*
2718 * If there's a pending transmit, store the time so we can
2719 * reschedule it later
2720 */
2721 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2722 SERIALIZE_SCALAR(transmitTick);
2723
2724 /*
2725 * receive address filter settings
2726 */
2727 SERIALIZE_SCALAR(rxFilterEnable);
2728 SERIALIZE_SCALAR(acceptBroadcast);
2729 SERIALIZE_SCALAR(acceptMulticast);
2730 SERIALIZE_SCALAR(acceptUnicast);
2731 SERIALIZE_SCALAR(acceptPerfect);
2732 SERIALIZE_SCALAR(acceptArp);
2733 SERIALIZE_SCALAR(multicastHashEnable);
2734
2735 /*
2736 * Keep track of pending interrupt status.
2737 */
2738 SERIALIZE_SCALAR(intrTick);
2739 SERIALIZE_SCALAR(cpuPendingIntr);
2740 Tick intrEventTick = 0;
2741 if (intrEvent)
2742 intrEventTick = intrEvent->when();
2743 SERIALIZE_SCALAR(intrEventTick);
2744
2745 }
2746
2747 void
2748 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2749 {
2750 // Unserialize the PciDev base class
2751 PciDev::unserialize(cp, section);
2752
2753 UNSERIALIZE_SCALAR(regs.command);
2754 UNSERIALIZE_SCALAR(regs.config);
2755 UNSERIALIZE_SCALAR(regs.mear);
2756 UNSERIALIZE_SCALAR(regs.ptscr);
2757 UNSERIALIZE_SCALAR(regs.isr);
2758 UNSERIALIZE_SCALAR(regs.imr);
2759 UNSERIALIZE_SCALAR(regs.ier);
2760 UNSERIALIZE_SCALAR(regs.ihr);
2761 UNSERIALIZE_SCALAR(regs.txdp);
2762 UNSERIALIZE_SCALAR(regs.txdp_hi);
2763 UNSERIALIZE_SCALAR(regs.txcfg);
2764 UNSERIALIZE_SCALAR(regs.gpior);
2765 UNSERIALIZE_SCALAR(regs.rxdp);
2766 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2767 UNSERIALIZE_SCALAR(regs.rxcfg);
2768 UNSERIALIZE_SCALAR(regs.pqcr);
2769 UNSERIALIZE_SCALAR(regs.wcsr);
2770 UNSERIALIZE_SCALAR(regs.pcr);
2771 UNSERIALIZE_SCALAR(regs.rfcr);
2772 UNSERIALIZE_SCALAR(regs.rfdr);
2773 UNSERIALIZE_SCALAR(regs.brar);
2774 UNSERIALIZE_SCALAR(regs.brdr);
2775 UNSERIALIZE_SCALAR(regs.srr);
2776 UNSERIALIZE_SCALAR(regs.mibc);
2777 UNSERIALIZE_SCALAR(regs.vrcr);
2778 UNSERIALIZE_SCALAR(regs.vtcr);
2779 UNSERIALIZE_SCALAR(regs.vdr);
2780 UNSERIALIZE_SCALAR(regs.ccsr);
2781 UNSERIALIZE_SCALAR(regs.tbicr);
2782 UNSERIALIZE_SCALAR(regs.tbisr);
2783 UNSERIALIZE_SCALAR(regs.tanar);
2784 UNSERIALIZE_SCALAR(regs.tanlpar);
2785 UNSERIALIZE_SCALAR(regs.taner);
2786 UNSERIALIZE_SCALAR(regs.tesr);
2787
2788 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2789 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2790
2791 UNSERIALIZE_SCALAR(ioEnable);
2792
2793 /*
2794 * unserialize the data fifos
2795 */
2796 rxFifo.unserialize("rxFifo", cp, section);
2797 txFifo.unserialize("txFifo", cp, section);
2798
2799 /*
2800 * unserialize the various helper variables
2801 */
2802 bool txPacketExists;
2803 UNSERIALIZE_SCALAR(txPacketExists);
2804 if (txPacketExists) {
2805 txPacket = new PacketData(16384);
2806 txPacket->unserialize("txPacket", cp, section);
2807 uint32_t txPktBufPtr;
2808 UNSERIALIZE_SCALAR(txPktBufPtr);
2809 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2810 } else
2811 txPacket = 0;
2812
2813 bool rxPacketExists;
2814 UNSERIALIZE_SCALAR(rxPacketExists);
2815 rxPacket = 0;
2816 if (rxPacketExists) {
2817 rxPacket = new PacketData(16384);
2818 rxPacket->unserialize("rxPacket", cp, section);
2819 uint32_t rxPktBufPtr;
2820 UNSERIALIZE_SCALAR(rxPktBufPtr);
2821 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2822 } else
2823 rxPacket = 0;
2824
2825 UNSERIALIZE_SCALAR(txXferLen);
2826 UNSERIALIZE_SCALAR(rxXferLen);
2827
2828 /*
2829 * Unserialize Cached Descriptors
2830 */
2831 UNSERIALIZE_SCALAR(rxDesc64.link);
2832 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2833 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2834 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2835 UNSERIALIZE_SCALAR(txDesc64.link);
2836 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2837 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2838 UNSERIALIZE_SCALAR(txDesc64.extsts);
2839 UNSERIALIZE_SCALAR(rxDesc32.link);
2840 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2841 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2842 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2843 UNSERIALIZE_SCALAR(txDesc32.link);
2844 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2845 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2846 UNSERIALIZE_SCALAR(txDesc32.extsts);
2847 UNSERIALIZE_SCALAR(extstsEnable);
2848
2849 /*
2850 * unserialize tx state machine
2851 */
2852 int txState;
2853 UNSERIALIZE_SCALAR(txState);
2854 this->txState = (TxState) txState;
2855 UNSERIALIZE_SCALAR(txEnable);
2856 UNSERIALIZE_SCALAR(CTDD);
2857 UNSERIALIZE_SCALAR(txFragPtr);
2858 UNSERIALIZE_SCALAR(txDescCnt);
2859 int txDmaState;
2860 UNSERIALIZE_SCALAR(txDmaState);
2861 this->txDmaState = (DmaState) txDmaState;
2862 UNSERIALIZE_SCALAR(txKickTick);
2863 if (txKickTick)
2864 txKickEvent.schedule(txKickTick);
2865
2866 /*
2867 * unserialize rx state machine
2868 */
2869 int rxState;
2870 UNSERIALIZE_SCALAR(rxState);
2871 this->rxState = (RxState) rxState;
2872 UNSERIALIZE_SCALAR(rxEnable);
2873 UNSERIALIZE_SCALAR(CRDD);
2874 UNSERIALIZE_SCALAR(rxPktBytes);
2875 UNSERIALIZE_SCALAR(rxFragPtr);
2876 UNSERIALIZE_SCALAR(rxDescCnt);
2877 int rxDmaState;
2878 UNSERIALIZE_SCALAR(rxDmaState);
2879 this->rxDmaState = (DmaState) rxDmaState;
2880 UNSERIALIZE_SCALAR(rxKickTick);
2881 if (rxKickTick)
2882 rxKickEvent.schedule(rxKickTick);
2883
2884 /*
2885 * Unserialize EEPROM state machine
2886 */
2887 int eepromState;
2888 UNSERIALIZE_SCALAR(eepromState);
2889 this->eepromState = (EEPROMState) eepromState;
2890 UNSERIALIZE_SCALAR(eepromClk);
2891 UNSERIALIZE_SCALAR(eepromBitsToRx);
2892 UNSERIALIZE_SCALAR(eepromOpcode);
2893 UNSERIALIZE_SCALAR(eepromAddress);
2894 UNSERIALIZE_SCALAR(eepromData);
2895
2896 /*
2897 * If there's a pending transmit, reschedule it now
2898 */
2899 Tick transmitTick;
2900 UNSERIALIZE_SCALAR(transmitTick);
2901 if (transmitTick)
2902 txEvent.schedule(curTick + transmitTick);
2903
2904 /*
2905 * unserialize receive address filter settings
2906 */
2907 UNSERIALIZE_SCALAR(rxFilterEnable);
2908 UNSERIALIZE_SCALAR(acceptBroadcast);
2909 UNSERIALIZE_SCALAR(acceptMulticast);
2910 UNSERIALIZE_SCALAR(acceptUnicast);
2911 UNSERIALIZE_SCALAR(acceptPerfect);
2912 UNSERIALIZE_SCALAR(acceptArp);
2913 UNSERIALIZE_SCALAR(multicastHashEnable);
2914
2915 /*
2916 * Keep track of pending interrupt status.
2917 */
2918 UNSERIALIZE_SCALAR(intrTick);
2919 UNSERIALIZE_SCALAR(cpuPendingIntr);
2920 Tick intrEventTick;
2921 UNSERIALIZE_SCALAR(intrEventTick);
2922 if (intrEventTick) {
2923 intrEvent = new IntrEvent(this, true);
2924 intrEvent->schedule(intrEventTick);
2925 }
2926
2927 /*
2928 * re-add addrRanges to bus bridges
2929 */
2930 if (pioInterface) {
2931 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2932 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2933 }
2934 }
2935
2936 Tick
2937 NSGigE::cacheAccess(MemReqPtr &req)
2938 {
2939 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2940 req->paddr, req->paddr & 0xfff);
2941
2942 return curTick + pioLatency;
2943 }
2944
2945 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2946
2947 SimObjectParam<EtherInt *> peer;
2948 SimObjectParam<NSGigE *> device;
2949
2950 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2951
2952 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2953
2954 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2955 INIT_PARAM(device, "Ethernet device of this interface")
2956
2957 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2958
2959 CREATE_SIM_OBJECT(NSGigEInt)
2960 {
2961 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2962
2963 EtherInt *p = (EtherInt *)peer;
2964 if (p) {
2965 dev_int->setPeer(p);
2966 p->setPeer(dev_int);
2967 }
2968
2969 return dev_int;
2970 }
2971
2972 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2973
2974
2975 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2976
2977 Param<Tick> clock;
2978
2979 Param<Addr> addr;
2980 SimObjectParam<MemoryController *> mmu;
2981 SimObjectParam<PhysicalMemory *> physmem;
2982 SimObjectParam<PciConfigAll *> configspace;
2983 SimObjectParam<PciConfigData *> configdata;
2984 SimObjectParam<Platform *> platform;
2985 Param<uint32_t> pci_bus;
2986 Param<uint32_t> pci_dev;
2987 Param<uint32_t> pci_func;
2988
2989 SimObjectParam<HierParams *> hier;
2990 SimObjectParam<Bus*> pio_bus;
2991 SimObjectParam<Bus*> dma_bus;
2992 SimObjectParam<Bus*> payload_bus;
2993 Param<bool> dma_desc_free;
2994 Param<bool> dma_data_free;
2995 Param<Tick> dma_read_delay;
2996 Param<Tick> dma_write_delay;
2997 Param<Tick> dma_read_factor;
2998 Param<Tick> dma_write_factor;
2999 Param<bool> dma_no_allocate;
3000 Param<Tick> pio_latency;
3001 Param<Tick> intr_delay;
3002
3003 Param<Tick> rx_delay;
3004 Param<Tick> tx_delay;
3005 Param<uint32_t> rx_fifo_size;
3006 Param<uint32_t> tx_fifo_size;
3007
3008 Param<bool> rx_filter;
3009 Param<string> hardware_address;
3010 Param<bool> rx_thread;
3011 Param<bool> tx_thread;
3012
3013 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
3014
3015 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
3016
3017 INIT_PARAM(clock, "State machine processor frequency"),
3018
3019 INIT_PARAM(addr, "Device Address"),
3020 INIT_PARAM(mmu, "Memory Controller"),
3021 INIT_PARAM(physmem, "Physical Memory"),
3022 INIT_PARAM(configspace, "PCI Configspace"),
3023 INIT_PARAM(configdata, "PCI Config data"),
3024 INIT_PARAM(platform, "Platform"),
3025 INIT_PARAM(pci_bus, "PCI bus"),
3026 INIT_PARAM(pci_dev, "PCI device number"),
3027 INIT_PARAM(pci_func, "PCI function code"),
3028
3029 INIT_PARAM(hier, "Hierarchy global variables"),
3030 INIT_PARAM(pio_bus, ""),
3031 INIT_PARAM(dma_bus, ""),
3032 INIT_PARAM(payload_bus, "The IO Bus to attach to for payload"),
3033 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
3034 INIT_PARAM(dma_data_free, "DMA of Data is free"),
3035 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
3036 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
3037 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
3038 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
3039 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
3040 INIT_PARAM(pio_latency, "Programmed IO latency in bus cycles"),
3041 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
3042
3043 INIT_PARAM(rx_delay, "Receive Delay"),
3044 INIT_PARAM(tx_delay, "Transmit Delay"),
3045 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
3046 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
3047
3048 INIT_PARAM(rx_filter, "Enable Receive Filter"),
3049 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
3050 INIT_PARAM(rx_thread, ""),
3051 INIT_PARAM(tx_thread, "")
3052
3053 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
3054
3055
3056 CREATE_SIM_OBJECT(NSGigE)
3057 {
3058 NSGigE::Params *params = new NSGigE::Params;
3059
3060 params->name = getInstanceName();
3061
3062 params->clock = clock;
3063
3064 params->mmu = mmu;
3065 params->pmem = physmem;
3066 params->configSpace = configspace;
3067 params->configData = configdata;
3068 params->plat = platform;
3069 params->busNum = pci_bus;
3070 params->deviceNum = pci_dev;
3071 params->functionNum = pci_func;
3072
3073 params->hier = hier;
3074 params->pio_bus = pio_bus;
3075 params->header_bus = dma_bus;
3076 params->payload_bus = payload_bus;
3077 params->dma_desc_free = dma_desc_free;
3078 params->dma_data_free = dma_data_free;
3079 params->dma_read_delay = dma_read_delay;
3080 params->dma_write_delay = dma_write_delay;
3081 params->dma_read_factor = dma_read_factor;
3082 params->dma_write_factor = dma_write_factor;
3083 params->dma_no_allocate = dma_no_allocate;
3084 params->pio_latency = pio_latency;
3085 params->intr_delay = intr_delay;
3086
3087 params->rx_delay = rx_delay;
3088 params->tx_delay = tx_delay;
3089 params->rx_fifo_size = rx_fifo_size;
3090 params->tx_fifo_size = tx_fifo_size;
3091
3092 params->rx_filter = rx_filter;
3093 params->eaddr = hardware_address;
3094 params->rx_thread = rx_thread;
3095 params->tx_thread = tx_thread;
3096
3097 return new NSGigE(params);
3098 }
3099
3100 REGISTER_SIM_OBJECT("NSGigE", NSGigE)