Shuffle around device names to make things easier to read.
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
53
54 const char *NsRxStateStrings[] =
55 {
56 "rxIdle",
57 "rxDescRefr",
58 "rxDescRead",
59 "rxFifoBlock",
60 "rxFragWrite",
61 "rxDescWrite",
62 "rxAdvance"
63 };
64
65 const char *NsTxStateStrings[] =
66 {
67 "txIdle",
68 "txDescRefr",
69 "txDescRead",
70 "txFifoBlock",
71 "txFragRead",
72 "txDescWrite",
73 "txAdvance"
74 };
75
76 const char *NsDmaState[] =
77 {
78 "dmaIdle",
79 "dmaReading",
80 "dmaWriting",
81 "dmaReadWaiting",
82 "dmaWriteWaiting"
83 };
84
85 using namespace std;
86 using namespace Net;
87
88 ///////////////////////////////////////////////////////////////////////
89 //
90 // NSGigE PCI Device
91 //
92 NSGigE::NSGigE(Params *p)
93 : PciDev(p), ioEnable(false),
94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
96 txXferLen(0), rxXferLen(0), clock(p->clock),
97 txState(txIdle), txEnable(false), CTDD(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
99 rxEnable(false), CRDD(false), rxPktBytes(0),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
101 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
102 txDmaReadEvent(this), txDmaWriteEvent(this),
103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
104 txDelay(p->tx_delay), rxDelay(p->rx_delay),
105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
107 acceptMulticast(false), acceptUnicast(false),
108 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
109 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
110 intrEvent(0), interface(0)
111 {
112 if (p->header_bus) {
113 pioInterface = newPioInterface(name() + ".pio", p->hier,
114 p->header_bus, this,
115 &NSGigE::cacheAccess);
116
117 pioLatency = p->pio_latency * p->header_bus->clockRate;
118
119 if (p->payload_bus)
120 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
121 p->header_bus,
122 p->payload_bus, 1,
123 p->dma_no_allocate);
124 else
125 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
126 p->header_bus,
127 p->header_bus, 1,
128 p->dma_no_allocate);
129 } else if (p->payload_bus) {
130 pioInterface = newPioInterface(name() + ".pio2", p->hier,
131 p->payload_bus, this,
132 &NSGigE::cacheAccess);
133
134 pioLatency = p->pio_latency * p->payload_bus->clockRate;
135
136 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
137 p->payload_bus,
138 p->payload_bus, 1,
139 p->dma_no_allocate);
140 }
141
142
143 intrDelay = p->intr_delay;
144 dmaReadDelay = p->dma_read_delay;
145 dmaWriteDelay = p->dma_write_delay;
146 dmaReadFactor = p->dma_read_factor;
147 dmaWriteFactor = p->dma_write_factor;
148
149 regsReset();
150 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
151
152 memset(&rxDesc32, 0, sizeof(rxDesc32));
153 memset(&txDesc32, 0, sizeof(txDesc32));
154 memset(&rxDesc64, 0, sizeof(rxDesc64));
155 memset(&txDesc64, 0, sizeof(txDesc64));
156 }
157
158 NSGigE::~NSGigE()
159 {}
160
161 void
162 NSGigE::regStats()
163 {
164 txBytes
165 .name(name() + ".txBytes")
166 .desc("Bytes Transmitted")
167 .prereq(txBytes)
168 ;
169
170 rxBytes
171 .name(name() + ".rxBytes")
172 .desc("Bytes Received")
173 .prereq(rxBytes)
174 ;
175
176 txPackets
177 .name(name() + ".txPackets")
178 .desc("Number of Packets Transmitted")
179 .prereq(txBytes)
180 ;
181
182 rxPackets
183 .name(name() + ".rxPackets")
184 .desc("Number of Packets Received")
185 .prereq(rxBytes)
186 ;
187
188 txIpChecksums
189 .name(name() + ".txIpChecksums")
190 .desc("Number of tx IP Checksums done by device")
191 .precision(0)
192 .prereq(txBytes)
193 ;
194
195 rxIpChecksums
196 .name(name() + ".rxIpChecksums")
197 .desc("Number of rx IP Checksums done by device")
198 .precision(0)
199 .prereq(rxBytes)
200 ;
201
202 txTcpChecksums
203 .name(name() + ".txTcpChecksums")
204 .desc("Number of tx TCP Checksums done by device")
205 .precision(0)
206 .prereq(txBytes)
207 ;
208
209 rxTcpChecksums
210 .name(name() + ".rxTcpChecksums")
211 .desc("Number of rx TCP Checksums done by device")
212 .precision(0)
213 .prereq(rxBytes)
214 ;
215
216 txUdpChecksums
217 .name(name() + ".txUdpChecksums")
218 .desc("Number of tx UDP Checksums done by device")
219 .precision(0)
220 .prereq(txBytes)
221 ;
222
223 rxUdpChecksums
224 .name(name() + ".rxUdpChecksums")
225 .desc("Number of rx UDP Checksums done by device")
226 .precision(0)
227 .prereq(rxBytes)
228 ;
229
230 descDmaReads
231 .name(name() + ".descDMAReads")
232 .desc("Number of descriptors the device read w/ DMA")
233 .precision(0)
234 ;
235
236 descDmaWrites
237 .name(name() + ".descDMAWrites")
238 .desc("Number of descriptors the device wrote w/ DMA")
239 .precision(0)
240 ;
241
242 descDmaRdBytes
243 .name(name() + ".descDmaReadBytes")
244 .desc("number of descriptor bytes read w/ DMA")
245 .precision(0)
246 ;
247
248 descDmaWrBytes
249 .name(name() + ".descDmaWriteBytes")
250 .desc("number of descriptor bytes write w/ DMA")
251 .precision(0)
252 ;
253
254 txBandwidth
255 .name(name() + ".txBandwidth")
256 .desc("Transmit Bandwidth (bits/s)")
257 .precision(0)
258 .prereq(txBytes)
259 ;
260
261 rxBandwidth
262 .name(name() + ".rxBandwidth")
263 .desc("Receive Bandwidth (bits/s)")
264 .precision(0)
265 .prereq(rxBytes)
266 ;
267
268 totBandwidth
269 .name(name() + ".totBandwidth")
270 .desc("Total Bandwidth (bits/s)")
271 .precision(0)
272 .prereq(totBytes)
273 ;
274
275 totPackets
276 .name(name() + ".totPackets")
277 .desc("Total Packets")
278 .precision(0)
279 .prereq(totBytes)
280 ;
281
282 totBytes
283 .name(name() + ".totBytes")
284 .desc("Total Bytes")
285 .precision(0)
286 .prereq(totBytes)
287 ;
288
289 totPacketRate
290 .name(name() + ".totPPS")
291 .desc("Total Tranmission Rate (packets/s)")
292 .precision(0)
293 .prereq(totBytes)
294 ;
295
296 txPacketRate
297 .name(name() + ".txPPS")
298 .desc("Packet Tranmission Rate (packets/s)")
299 .precision(0)
300 .prereq(txBytes)
301 ;
302
303 rxPacketRate
304 .name(name() + ".rxPPS")
305 .desc("Packet Reception Rate (packets/s)")
306 .precision(0)
307 .prereq(rxBytes)
308 ;
309
310 postedSwi
311 .name(name() + ".postedSwi")
312 .desc("number of software interrupts posted to CPU")
313 .precision(0)
314 ;
315
316 totalSwi
317 .name(name() + ".totalSwi")
318 .desc("total number of Swi written to ISR")
319 .precision(0)
320 ;
321
322 coalescedSwi
323 .name(name() + ".coalescedSwi")
324 .desc("average number of Swi's coalesced into each post")
325 .precision(0)
326 ;
327
328 postedRxIdle
329 .name(name() + ".postedRxIdle")
330 .desc("number of rxIdle interrupts posted to CPU")
331 .precision(0)
332 ;
333
334 totalRxIdle
335 .name(name() + ".totalRxIdle")
336 .desc("total number of RxIdle written to ISR")
337 .precision(0)
338 ;
339
340 coalescedRxIdle
341 .name(name() + ".coalescedRxIdle")
342 .desc("average number of RxIdle's coalesced into each post")
343 .precision(0)
344 ;
345
346 postedRxOk
347 .name(name() + ".postedRxOk")
348 .desc("number of RxOk interrupts posted to CPU")
349 .precision(0)
350 ;
351
352 totalRxOk
353 .name(name() + ".totalRxOk")
354 .desc("total number of RxOk written to ISR")
355 .precision(0)
356 ;
357
358 coalescedRxOk
359 .name(name() + ".coalescedRxOk")
360 .desc("average number of RxOk's coalesced into each post")
361 .precision(0)
362 ;
363
364 postedRxDesc
365 .name(name() + ".postedRxDesc")
366 .desc("number of RxDesc interrupts posted to CPU")
367 .precision(0)
368 ;
369
370 totalRxDesc
371 .name(name() + ".totalRxDesc")
372 .desc("total number of RxDesc written to ISR")
373 .precision(0)
374 ;
375
376 coalescedRxDesc
377 .name(name() + ".coalescedRxDesc")
378 .desc("average number of RxDesc's coalesced into each post")
379 .precision(0)
380 ;
381
382 postedTxOk
383 .name(name() + ".postedTxOk")
384 .desc("number of TxOk interrupts posted to CPU")
385 .precision(0)
386 ;
387
388 totalTxOk
389 .name(name() + ".totalTxOk")
390 .desc("total number of TxOk written to ISR")
391 .precision(0)
392 ;
393
394 coalescedTxOk
395 .name(name() + ".coalescedTxOk")
396 .desc("average number of TxOk's coalesced into each post")
397 .precision(0)
398 ;
399
400 postedTxIdle
401 .name(name() + ".postedTxIdle")
402 .desc("number of TxIdle interrupts posted to CPU")
403 .precision(0)
404 ;
405
406 totalTxIdle
407 .name(name() + ".totalTxIdle")
408 .desc("total number of TxIdle written to ISR")
409 .precision(0)
410 ;
411
412 coalescedTxIdle
413 .name(name() + ".coalescedTxIdle")
414 .desc("average number of TxIdle's coalesced into each post")
415 .precision(0)
416 ;
417
418 postedTxDesc
419 .name(name() + ".postedTxDesc")
420 .desc("number of TxDesc interrupts posted to CPU")
421 .precision(0)
422 ;
423
424 totalTxDesc
425 .name(name() + ".totalTxDesc")
426 .desc("total number of TxDesc written to ISR")
427 .precision(0)
428 ;
429
430 coalescedTxDesc
431 .name(name() + ".coalescedTxDesc")
432 .desc("average number of TxDesc's coalesced into each post")
433 .precision(0)
434 ;
435
436 postedRxOrn
437 .name(name() + ".postedRxOrn")
438 .desc("number of RxOrn posted to CPU")
439 .precision(0)
440 ;
441
442 totalRxOrn
443 .name(name() + ".totalRxOrn")
444 .desc("total number of RxOrn written to ISR")
445 .precision(0)
446 ;
447
448 coalescedRxOrn
449 .name(name() + ".coalescedRxOrn")
450 .desc("average number of RxOrn's coalesced into each post")
451 .precision(0)
452 ;
453
454 coalescedTotal
455 .name(name() + ".coalescedTotal")
456 .desc("average number of interrupts coalesced into each post")
457 .precision(0)
458 ;
459
460 postedInterrupts
461 .name(name() + ".postedInterrupts")
462 .desc("number of posts to CPU")
463 .precision(0)
464 ;
465
466 droppedPackets
467 .name(name() + ".droppedPackets")
468 .desc("number of packets dropped")
469 .precision(0)
470 ;
471
472 coalescedSwi = totalSwi / postedInterrupts;
473 coalescedRxIdle = totalRxIdle / postedInterrupts;
474 coalescedRxOk = totalRxOk / postedInterrupts;
475 coalescedRxDesc = totalRxDesc / postedInterrupts;
476 coalescedTxOk = totalTxOk / postedInterrupts;
477 coalescedTxIdle = totalTxIdle / postedInterrupts;
478 coalescedTxDesc = totalTxDesc / postedInterrupts;
479 coalescedRxOrn = totalRxOrn / postedInterrupts;
480
481 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
482 totalTxOk + totalTxIdle + totalTxDesc +
483 totalRxOrn) / postedInterrupts;
484
485 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
486 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
487 totBandwidth = txBandwidth + rxBandwidth;
488 totBytes = txBytes + rxBytes;
489 totPackets = txPackets + rxPackets;
490
491 txPacketRate = txPackets / simSeconds;
492 rxPacketRate = rxPackets / simSeconds;
493 }
494
495 /**
496 * This is to read the PCI general configuration registers
497 */
498 void
499 NSGigE::readConfig(int offset, int size, uint8_t *data)
500 {
501 if (offset < PCI_DEVICE_SPECIFIC)
502 PciDev::readConfig(offset, size, data);
503 else
504 panic("Device specific PCI config space not implemented!\n");
505 }
506
507 /**
508 * This is to write to the PCI general configuration registers
509 */
510 void
511 NSGigE::writeConfig(int offset, int size, const uint8_t* data)
512 {
513 if (offset < PCI_DEVICE_SPECIFIC)
514 PciDev::writeConfig(offset, size, data);
515 else
516 panic("Device specific PCI config space not implemented!\n");
517
518 // Need to catch writes to BARs to update the PIO interface
519 switch (offset) {
520 // seems to work fine without all these PCI settings, but i
521 // put in the IO to double check, an assertion will fail if we
522 // need to properly implement it
523 case PCI_COMMAND:
524 if (config.data[offset] & PCI_CMD_IOSE)
525 ioEnable = true;
526 else
527 ioEnable = false;
528
529 #if 0
530 if (config.data[offset] & PCI_CMD_BME) {
531 bmEnabled = true;
532 }
533 else {
534 bmEnabled = false;
535 }
536
537 if (config.data[offset] & PCI_CMD_MSE) {
538 memEnable = true;
539 }
540 else {
541 memEnable = false;
542 }
543 #endif
544 break;
545
546 case PCI0_BASE_ADDR0:
547 if (BARAddrs[0] != 0) {
548 if (pioInterface)
549 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
550
551 BARAddrs[0] &= EV5::PAddrUncachedMask;
552 }
553 break;
554 case PCI0_BASE_ADDR1:
555 if (BARAddrs[1] != 0) {
556 if (pioInterface)
557 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
558
559 BARAddrs[1] &= EV5::PAddrUncachedMask;
560 }
561 break;
562 }
563 }
564
565 /**
566 * This reads the device registers, which are detailed in the NS83820
567 * spec sheet
568 */
569 Fault
570 NSGigE::read(MemReqPtr &req, uint8_t *data)
571 {
572 assert(ioEnable);
573
574 //The mask is to give you only the offset into the device register file
575 Addr daddr = req->paddr & 0xfff;
576 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
577 daddr, req->paddr, req->vaddr, req->size);
578
579
580 // there are some reserved registers, you can see ns_gige_reg.h and
581 // the spec sheet for details
582 if (daddr > LAST && daddr <= RESERVED) {
583 panic("Accessing reserved register");
584 } else if (daddr > RESERVED && daddr <= 0x3FC) {
585 readConfig(daddr & 0xff, req->size, data);
586 return No_Fault;
587 } else if (daddr >= MIB_START && daddr <= MIB_END) {
588 // don't implement all the MIB's. hopefully the kernel
589 // doesn't actually DEPEND upon their values
590 // MIB are just hardware stats keepers
591 uint32_t &reg = *(uint32_t *) data;
592 reg = 0;
593 return No_Fault;
594 } else if (daddr > 0x3FC)
595 panic("Something is messed up!\n");
596
597 switch (req->size) {
598 case sizeof(uint32_t):
599 {
600 uint32_t &reg = *(uint32_t *)data;
601 uint16_t rfaddr;
602
603 switch (daddr) {
604 case CR:
605 reg = regs.command;
606 //these are supposed to be cleared on a read
607 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
608 break;
609
610 case CFGR:
611 reg = regs.config;
612 break;
613
614 case MEAR:
615 reg = regs.mear;
616 break;
617
618 case PTSCR:
619 reg = regs.ptscr;
620 break;
621
622 case ISR:
623 reg = regs.isr;
624 devIntrClear(ISR_ALL);
625 break;
626
627 case IMR:
628 reg = regs.imr;
629 break;
630
631 case IER:
632 reg = regs.ier;
633 break;
634
635 case IHR:
636 reg = regs.ihr;
637 break;
638
639 case TXDP:
640 reg = regs.txdp;
641 break;
642
643 case TXDP_HI:
644 reg = regs.txdp_hi;
645 break;
646
647 case TX_CFG:
648 reg = regs.txcfg;
649 break;
650
651 case GPIOR:
652 reg = regs.gpior;
653 break;
654
655 case RXDP:
656 reg = regs.rxdp;
657 break;
658
659 case RXDP_HI:
660 reg = regs.rxdp_hi;
661 break;
662
663 case RX_CFG:
664 reg = regs.rxcfg;
665 break;
666
667 case PQCR:
668 reg = regs.pqcr;
669 break;
670
671 case WCSR:
672 reg = regs.wcsr;
673 break;
674
675 case PCR:
676 reg = regs.pcr;
677 break;
678
679 // see the spec sheet for how RFCR and RFDR work
680 // basically, you write to RFCR to tell the machine
681 // what you want to do next, then you act upon RFDR,
682 // and the device will be prepared b/c of what you
683 // wrote to RFCR
684 case RFCR:
685 reg = regs.rfcr;
686 break;
687
688 case RFDR:
689 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
690 switch (rfaddr) {
691 // Read from perfect match ROM octets
692 case 0x000:
693 reg = rom.perfectMatch[1];
694 reg = reg << 8;
695 reg += rom.perfectMatch[0];
696 break;
697 case 0x002:
698 reg = rom.perfectMatch[3] << 8;
699 reg += rom.perfectMatch[2];
700 break;
701 case 0x004:
702 reg = rom.perfectMatch[5] << 8;
703 reg += rom.perfectMatch[4];
704 break;
705 default:
706 // Read filter hash table
707 if (rfaddr >= FHASH_ADDR &&
708 rfaddr < FHASH_ADDR + FHASH_SIZE) {
709
710 // Only word-aligned reads supported
711 if (rfaddr % 2)
712 panic("unaligned read from filter hash table!");
713
714 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
715 reg += rom.filterHash[rfaddr - FHASH_ADDR];
716 break;
717 }
718
719 panic("reading RFDR for something other than pattern"
720 " matching or hashing! %#x\n", rfaddr);
721 }
722 break;
723
724 case SRR:
725 reg = regs.srr;
726 break;
727
728 case MIBC:
729 reg = regs.mibc;
730 reg &= ~(MIBC_MIBS | MIBC_ACLR);
731 break;
732
733 case VRCR:
734 reg = regs.vrcr;
735 break;
736
737 case VTCR:
738 reg = regs.vtcr;
739 break;
740
741 case VDR:
742 reg = regs.vdr;
743 break;
744
745 case CCSR:
746 reg = regs.ccsr;
747 break;
748
749 case TBICR:
750 reg = regs.tbicr;
751 break;
752
753 case TBISR:
754 reg = regs.tbisr;
755 break;
756
757 case TANAR:
758 reg = regs.tanar;
759 break;
760
761 case TANLPAR:
762 reg = regs.tanlpar;
763 break;
764
765 case TANER:
766 reg = regs.taner;
767 break;
768
769 case TESR:
770 reg = regs.tesr;
771 break;
772
773 case M5REG:
774 reg = 0;
775 if (params()->dedicated)
776 reg |= M5REG_DEDICATED;
777 break;
778
779 default:
780 panic("reading unimplemented register: addr=%#x", daddr);
781 }
782
783 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
784 daddr, reg, reg);
785 }
786 break;
787
788 default:
789 panic("accessing register with invalid size: addr=%#x, size=%d",
790 daddr, req->size);
791 }
792
793 return No_Fault;
794 }
795
796 Fault
797 NSGigE::write(MemReqPtr &req, const uint8_t *data)
798 {
799 assert(ioEnable);
800
801 Addr daddr = req->paddr & 0xfff;
802 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
803 daddr, req->paddr, req->vaddr, req->size);
804
805 if (daddr > LAST && daddr <= RESERVED) {
806 panic("Accessing reserved register");
807 } else if (daddr > RESERVED && daddr <= 0x3FC) {
808 writeConfig(daddr & 0xff, req->size, data);
809 return No_Fault;
810 } else if (daddr > 0x3FC)
811 panic("Something is messed up!\n");
812
813 if (req->size == sizeof(uint32_t)) {
814 uint32_t reg = *(uint32_t *)data;
815 uint16_t rfaddr;
816
817 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
818
819 switch (daddr) {
820 case CR:
821 regs.command = reg;
822 if (reg & CR_TXD) {
823 txEnable = false;
824 } else if (reg & CR_TXE) {
825 txEnable = true;
826
827 // the kernel is enabling the transmit machine
828 if (txState == txIdle)
829 txKick();
830 }
831
832 if (reg & CR_RXD) {
833 rxEnable = false;
834 } else if (reg & CR_RXE) {
835 rxEnable = true;
836
837 if (rxState == rxIdle)
838 rxKick();
839 }
840
841 if (reg & CR_TXR)
842 txReset();
843
844 if (reg & CR_RXR)
845 rxReset();
846
847 if (reg & CR_SWI)
848 devIntrPost(ISR_SWI);
849
850 if (reg & CR_RST) {
851 txReset();
852 rxReset();
853
854 regsReset();
855 }
856 break;
857
858 case CFGR:
859 if (reg & CFGR_LNKSTS ||
860 reg & CFGR_SPDSTS ||
861 reg & CFGR_DUPSTS ||
862 reg & CFGR_RESERVED ||
863 reg & CFGR_T64ADDR ||
864 reg & CFGR_PCI64_DET)
865
866 // First clear all writable bits
867 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
868 CFGR_RESERVED | CFGR_T64ADDR |
869 CFGR_PCI64_DET;
870 // Now set the appropriate writable bits
871 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
872 CFGR_RESERVED | CFGR_T64ADDR |
873 CFGR_PCI64_DET);
874
875 // all these #if 0's are because i don't THINK the kernel needs to
876 // have these implemented. if there is a problem relating to one of
877 // these, you may need to add functionality in.
878 if (reg & CFGR_TBI_EN) ;
879 if (reg & CFGR_MODE_1000) ;
880
881 if (reg & CFGR_AUTO_1000)
882 panic("CFGR_AUTO_1000 not implemented!\n");
883
884 if (reg & CFGR_PINT_DUPSTS ||
885 reg & CFGR_PINT_LNKSTS ||
886 reg & CFGR_PINT_SPDSTS)
887 ;
888
889 if (reg & CFGR_TMRTEST) ;
890 if (reg & CFGR_MRM_DIS) ;
891 if (reg & CFGR_MWI_DIS) ;
892
893 if (reg & CFGR_T64ADDR) ;
894 // panic("CFGR_T64ADDR is read only register!\n");
895
896 if (reg & CFGR_PCI64_DET)
897 panic("CFGR_PCI64_DET is read only register!\n");
898
899 if (reg & CFGR_DATA64_EN) ;
900 if (reg & CFGR_M64ADDR) ;
901 if (reg & CFGR_PHY_RST) ;
902 if (reg & CFGR_PHY_DIS) ;
903
904 if (reg & CFGR_EXTSTS_EN)
905 extstsEnable = true;
906 else
907 extstsEnable = false;
908
909 if (reg & CFGR_REQALG) ;
910 if (reg & CFGR_SB) ;
911 if (reg & CFGR_POW) ;
912 if (reg & CFGR_EXD) ;
913 if (reg & CFGR_PESEL) ;
914 if (reg & CFGR_BROM_DIS) ;
915 if (reg & CFGR_EXT_125) ;
916 if (reg & CFGR_BEM) ;
917 break;
918
919 case MEAR:
920 // Clear writable bits
921 regs.mear &= MEAR_EEDO;
922 // Set appropriate writable bits
923 regs.mear |= reg & ~MEAR_EEDO;
924
925 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
926 // even though it could get it through RFDR
927 if (reg & MEAR_EESEL) {
928 // Rising edge of clock
929 if (reg & MEAR_EECLK && !eepromClk)
930 eepromKick();
931 }
932 else {
933 eepromState = eepromStart;
934 regs.mear &= ~MEAR_EEDI;
935 }
936
937 eepromClk = reg & MEAR_EECLK;
938
939 // since phy is completely faked, MEAR_MD* don't matter
940 if (reg & MEAR_MDIO) ;
941 if (reg & MEAR_MDDIR) ;
942 if (reg & MEAR_MDC) ;
943 break;
944
945 case PTSCR:
946 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
947 // these control BISTs for various parts of chip - we
948 // don't care or do just fake that the BIST is done
949 if (reg & PTSCR_RBIST_EN)
950 regs.ptscr |= PTSCR_RBIST_DONE;
951 if (reg & PTSCR_EEBIST_EN)
952 regs.ptscr &= ~PTSCR_EEBIST_EN;
953 if (reg & PTSCR_EELOAD_EN)
954 regs.ptscr &= ~PTSCR_EELOAD_EN;
955 break;
956
957 case ISR: /* writing to the ISR has no effect */
958 panic("ISR is a read only register!\n");
959
960 case IMR:
961 regs.imr = reg;
962 devIntrChangeMask();
963 break;
964
965 case IER:
966 regs.ier = reg;
967 break;
968
969 case IHR:
970 regs.ihr = reg;
971 /* not going to implement real interrupt holdoff */
972 break;
973
974 case TXDP:
975 regs.txdp = (reg & 0xFFFFFFFC);
976 assert(txState == txIdle);
977 CTDD = false;
978 break;
979
980 case TXDP_HI:
981 regs.txdp_hi = reg;
982 break;
983
984 case TX_CFG:
985 regs.txcfg = reg;
986 #if 0
987 if (reg & TX_CFG_CSI) ;
988 if (reg & TX_CFG_HBI) ;
989 if (reg & TX_CFG_MLB) ;
990 if (reg & TX_CFG_ATP) ;
991 if (reg & TX_CFG_ECRETRY) {
992 /*
993 * this could easily be implemented, but considering
994 * the network is just a fake pipe, wouldn't make
995 * sense to do this
996 */
997 }
998
999 if (reg & TX_CFG_BRST_DIS) ;
1000 #endif
1001
1002 #if 0
1003 /* we handle our own DMA, ignore the kernel's exhortations */
1004 if (reg & TX_CFG_MXDMA) ;
1005 #endif
1006
1007 // also, we currently don't care about fill/drain
1008 // thresholds though this may change in the future with
1009 // more realistic networks or a driver which changes it
1010 // according to feedback
1011
1012 break;
1013
1014 case GPIOR:
1015 // Only write writable bits
1016 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1017 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
1018 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1019 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
1020 /* these just control general purpose i/o pins, don't matter */
1021 break;
1022
1023 case RXDP:
1024 regs.rxdp = reg;
1025 CRDD = false;
1026 break;
1027
1028 case RXDP_HI:
1029 regs.rxdp_hi = reg;
1030 break;
1031
1032 case RX_CFG:
1033 regs.rxcfg = reg;
1034 #if 0
1035 if (reg & RX_CFG_AEP) ;
1036 if (reg & RX_CFG_ARP) ;
1037 if (reg & RX_CFG_STRIPCRC) ;
1038 if (reg & RX_CFG_RX_RD) ;
1039 if (reg & RX_CFG_ALP) ;
1040 if (reg & RX_CFG_AIRL) ;
1041
1042 /* we handle our own DMA, ignore what kernel says about it */
1043 if (reg & RX_CFG_MXDMA) ;
1044
1045 //also, we currently don't care about fill/drain thresholds
1046 //though this may change in the future with more realistic
1047 //networks or a driver which changes it according to feedback
1048 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1049 #endif
1050 break;
1051
1052 case PQCR:
1053 /* there is no priority queueing used in the linux 2.6 driver */
1054 regs.pqcr = reg;
1055 break;
1056
1057 case WCSR:
1058 /* not going to implement wake on LAN */
1059 regs.wcsr = reg;
1060 break;
1061
1062 case PCR:
1063 /* not going to implement pause control */
1064 regs.pcr = reg;
1065 break;
1066
1067 case RFCR:
1068 regs.rfcr = reg;
1069
1070 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1071 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1072 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1073 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1074 acceptPerfect = (reg & RFCR_APM) ? true : false;
1075 acceptArp = (reg & RFCR_AARP) ? true : false;
1076 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1077
1078 #if 0
1079 if (reg & RFCR_APAT)
1080 panic("RFCR_APAT not implemented!\n");
1081 #endif
1082 if (reg & RFCR_UHEN)
1083 panic("Unicast hash filtering not used by drivers!\n");
1084
1085 if (reg & RFCR_ULM)
1086 panic("RFCR_ULM not implemented!\n");
1087
1088 break;
1089
1090 case RFDR:
1091 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1092 switch (rfaddr) {
1093 case 0x000:
1094 rom.perfectMatch[0] = (uint8_t)reg;
1095 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1096 break;
1097 case 0x002:
1098 rom.perfectMatch[2] = (uint8_t)reg;
1099 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1100 break;
1101 case 0x004:
1102 rom.perfectMatch[4] = (uint8_t)reg;
1103 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1104 break;
1105 default:
1106
1107 if (rfaddr >= FHASH_ADDR &&
1108 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1109
1110 // Only word-aligned writes supported
1111 if (rfaddr % 2)
1112 panic("unaligned write to filter hash table!");
1113
1114 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1115 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1116 = (uint8_t)(reg >> 8);
1117 break;
1118 }
1119 panic("writing RFDR for something other than pattern matching\
1120 or hashing! %#x\n", rfaddr);
1121 }
1122
1123 case BRAR:
1124 regs.brar = reg;
1125 break;
1126
1127 case BRDR:
1128 panic("the driver never uses BRDR, something is wrong!\n");
1129
1130 case SRR:
1131 panic("SRR is read only register!\n");
1132
1133 case MIBC:
1134 panic("the driver never uses MIBC, something is wrong!\n");
1135
1136 case VRCR:
1137 regs.vrcr = reg;
1138 break;
1139
1140 case VTCR:
1141 regs.vtcr = reg;
1142 break;
1143
1144 case VDR:
1145 panic("the driver never uses VDR, something is wrong!\n");
1146
1147 case CCSR:
1148 /* not going to implement clockrun stuff */
1149 regs.ccsr = reg;
1150 break;
1151
1152 case TBICR:
1153 regs.tbicr = reg;
1154 if (reg & TBICR_MR_LOOPBACK)
1155 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1156
1157 if (reg & TBICR_MR_AN_ENABLE) {
1158 regs.tanlpar = regs.tanar;
1159 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1160 }
1161
1162 #if 0
1163 if (reg & TBICR_MR_RESTART_AN) ;
1164 #endif
1165
1166 break;
1167
1168 case TBISR:
1169 panic("TBISR is read only register!\n");
1170
1171 case TANAR:
1172 // Only write the writable bits
1173 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1174 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1175
1176 // Pause capability unimplemented
1177 #if 0
1178 if (reg & TANAR_PS2) ;
1179 if (reg & TANAR_PS1) ;
1180 #endif
1181
1182 break;
1183
1184 case TANLPAR:
1185 panic("this should only be written to by the fake phy!\n");
1186
1187 case TANER:
1188 panic("TANER is read only register!\n");
1189
1190 case TESR:
1191 regs.tesr = reg;
1192 break;
1193
1194 default:
1195 panic("invalid register access daddr=%#x", daddr);
1196 }
1197 } else {
1198 panic("Invalid Request Size");
1199 }
1200
1201 return No_Fault;
1202 }
1203
1204 void
1205 NSGigE::devIntrPost(uint32_t interrupts)
1206 {
1207 if (interrupts & ISR_RESERVE)
1208 panic("Cannot set a reserved interrupt");
1209
1210 if (interrupts & ISR_NOIMPL)
1211 warn("interrupt not implemented %#x\n", interrupts);
1212
1213 interrupts &= ISR_IMPL;
1214 regs.isr |= interrupts;
1215
1216 if (interrupts & regs.imr) {
1217 if (interrupts & ISR_SWI) {
1218 totalSwi++;
1219 }
1220 if (interrupts & ISR_RXIDLE) {
1221 totalRxIdle++;
1222 }
1223 if (interrupts & ISR_RXOK) {
1224 totalRxOk++;
1225 }
1226 if (interrupts & ISR_RXDESC) {
1227 totalRxDesc++;
1228 }
1229 if (interrupts & ISR_TXOK) {
1230 totalTxOk++;
1231 }
1232 if (interrupts & ISR_TXIDLE) {
1233 totalTxIdle++;
1234 }
1235 if (interrupts & ISR_TXDESC) {
1236 totalTxDesc++;
1237 }
1238 if (interrupts & ISR_RXORN) {
1239 totalRxOrn++;
1240 }
1241 }
1242
1243 DPRINTF(EthernetIntr,
1244 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1245 interrupts, regs.isr, regs.imr);
1246
1247 if ((regs.isr & regs.imr)) {
1248 Tick when = curTick;
1249 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1250 when += intrDelay;
1251 cpuIntrPost(when);
1252 }
1253 }
1254
1255 /* writing this interrupt counting stats inside this means that this function
1256 is now limited to being used to clear all interrupts upon the kernel
1257 reading isr and servicing. just telling you in case you were thinking
1258 of expanding use.
1259 */
1260 void
1261 NSGigE::devIntrClear(uint32_t interrupts)
1262 {
1263 if (interrupts & ISR_RESERVE)
1264 panic("Cannot clear a reserved interrupt");
1265
1266 if (regs.isr & regs.imr & ISR_SWI) {
1267 postedSwi++;
1268 }
1269 if (regs.isr & regs.imr & ISR_RXIDLE) {
1270 postedRxIdle++;
1271 }
1272 if (regs.isr & regs.imr & ISR_RXOK) {
1273 postedRxOk++;
1274 }
1275 if (regs.isr & regs.imr & ISR_RXDESC) {
1276 postedRxDesc++;
1277 }
1278 if (regs.isr & regs.imr & ISR_TXOK) {
1279 postedTxOk++;
1280 }
1281 if (regs.isr & regs.imr & ISR_TXIDLE) {
1282 postedTxIdle++;
1283 }
1284 if (regs.isr & regs.imr & ISR_TXDESC) {
1285 postedTxDesc++;
1286 }
1287 if (regs.isr & regs.imr & ISR_RXORN) {
1288 postedRxOrn++;
1289 }
1290
1291 if (regs.isr & regs.imr & ISR_IMPL)
1292 postedInterrupts++;
1293
1294 interrupts &= ~ISR_NOIMPL;
1295 regs.isr &= ~interrupts;
1296
1297 DPRINTF(EthernetIntr,
1298 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1299 interrupts, regs.isr, regs.imr);
1300
1301 if (!(regs.isr & regs.imr))
1302 cpuIntrClear();
1303 }
1304
1305 void
1306 NSGigE::devIntrChangeMask()
1307 {
1308 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1309 regs.isr, regs.imr, regs.isr & regs.imr);
1310
1311 if (regs.isr & regs.imr)
1312 cpuIntrPost(curTick);
1313 else
1314 cpuIntrClear();
1315 }
1316
1317 void
1318 NSGigE::cpuIntrPost(Tick when)
1319 {
1320 // If the interrupt you want to post is later than an interrupt
1321 // already scheduled, just let it post in the coming one and don't
1322 // schedule another.
1323 // HOWEVER, must be sure that the scheduled intrTick is in the
1324 // future (this was formerly the source of a bug)
1325 /**
1326 * @todo this warning should be removed and the intrTick code should
1327 * be fixed.
1328 */
1329 assert(when >= curTick);
1330 assert(intrTick >= curTick || intrTick == 0);
1331 if (when > intrTick && intrTick != 0) {
1332 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1333 intrTick);
1334 return;
1335 }
1336
1337 intrTick = when;
1338 if (intrTick < curTick) {
1339 debug_break();
1340 intrTick = curTick;
1341 }
1342
1343 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1344 intrTick);
1345
1346 if (intrEvent)
1347 intrEvent->squash();
1348 intrEvent = new IntrEvent(this, true);
1349 intrEvent->schedule(intrTick);
1350 }
1351
1352 void
1353 NSGigE::cpuInterrupt()
1354 {
1355 assert(intrTick == curTick);
1356
1357 // Whether or not there's a pending interrupt, we don't care about
1358 // it anymore
1359 intrEvent = 0;
1360 intrTick = 0;
1361
1362 // Don't send an interrupt if there's already one
1363 if (cpuPendingIntr) {
1364 DPRINTF(EthernetIntr,
1365 "would send an interrupt now, but there's already pending\n");
1366 } else {
1367 // Send interrupt
1368 cpuPendingIntr = true;
1369
1370 DPRINTF(EthernetIntr, "posting interrupt\n");
1371 intrPost();
1372 }
1373 }
1374
1375 void
1376 NSGigE::cpuIntrClear()
1377 {
1378 if (!cpuPendingIntr)
1379 return;
1380
1381 if (intrEvent) {
1382 intrEvent->squash();
1383 intrEvent = 0;
1384 }
1385
1386 intrTick = 0;
1387
1388 cpuPendingIntr = false;
1389
1390 DPRINTF(EthernetIntr, "clearing interrupt\n");
1391 intrClear();
1392 }
1393
1394 bool
1395 NSGigE::cpuIntrPending() const
1396 { return cpuPendingIntr; }
1397
1398 void
1399 NSGigE::txReset()
1400 {
1401
1402 DPRINTF(Ethernet, "transmit reset\n");
1403
1404 CTDD = false;
1405 txEnable = false;;
1406 txFragPtr = 0;
1407 assert(txDescCnt == 0);
1408 txFifo.clear();
1409 txState = txIdle;
1410 assert(txDmaState == dmaIdle);
1411 }
1412
1413 void
1414 NSGigE::rxReset()
1415 {
1416 DPRINTF(Ethernet, "receive reset\n");
1417
1418 CRDD = false;
1419 assert(rxPktBytes == 0);
1420 rxEnable = false;
1421 rxFragPtr = 0;
1422 assert(rxDescCnt == 0);
1423 assert(rxDmaState == dmaIdle);
1424 rxFifo.clear();
1425 rxState = rxIdle;
1426 }
1427
1428 void
1429 NSGigE::regsReset()
1430 {
1431 memset(&regs, 0, sizeof(regs));
1432 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1433 regs.mear = 0x12;
1434 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1435 // fill threshold to 32 bytes
1436 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1437 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1438 regs.mibc = MIBC_FRZ;
1439 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1440 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1441 regs.brar = 0xffffffff;
1442
1443 extstsEnable = false;
1444 acceptBroadcast = false;
1445 acceptMulticast = false;
1446 acceptUnicast = false;
1447 acceptPerfect = false;
1448 acceptArp = false;
1449 }
1450
1451 void
1452 NSGigE::rxDmaReadCopy()
1453 {
1454 assert(rxDmaState == dmaReading);
1455
1456 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1457 rxDmaState = dmaIdle;
1458
1459 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1460 rxDmaAddr, rxDmaLen);
1461 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1462 }
1463
1464 bool
1465 NSGigE::doRxDmaRead()
1466 {
1467 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1468 rxDmaState = dmaReading;
1469
1470 if (dmaInterface && !rxDmaFree) {
1471 if (dmaInterface->busy())
1472 rxDmaState = dmaReadWaiting;
1473 else
1474 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1475 &rxDmaReadEvent, true);
1476 return true;
1477 }
1478
1479 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1480 rxDmaReadCopy();
1481 return false;
1482 }
1483
1484 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1485 Tick start = curTick + dmaReadDelay + factor;
1486 rxDmaReadEvent.schedule(start);
1487 return true;
1488 }
1489
1490 void
1491 NSGigE::rxDmaReadDone()
1492 {
1493 assert(rxDmaState == dmaReading);
1494 rxDmaReadCopy();
1495
1496 // If the transmit state machine has a pending DMA, let it go first
1497 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1498 txKick();
1499
1500 rxKick();
1501 }
1502
1503 void
1504 NSGigE::rxDmaWriteCopy()
1505 {
1506 assert(rxDmaState == dmaWriting);
1507
1508 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1509 rxDmaState = dmaIdle;
1510
1511 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1512 rxDmaAddr, rxDmaLen);
1513 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1514 }
1515
1516 bool
1517 NSGigE::doRxDmaWrite()
1518 {
1519 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1520 rxDmaState = dmaWriting;
1521
1522 if (dmaInterface && !rxDmaFree) {
1523 if (dmaInterface->busy())
1524 rxDmaState = dmaWriteWaiting;
1525 else
1526 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1527 &rxDmaWriteEvent, true);
1528 return true;
1529 }
1530
1531 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1532 rxDmaWriteCopy();
1533 return false;
1534 }
1535
1536 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1537 Tick start = curTick + dmaWriteDelay + factor;
1538 rxDmaWriteEvent.schedule(start);
1539 return true;
1540 }
1541
1542 void
1543 NSGigE::rxDmaWriteDone()
1544 {
1545 assert(rxDmaState == dmaWriting);
1546 rxDmaWriteCopy();
1547
1548 // If the transmit state machine has a pending DMA, let it go first
1549 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1550 txKick();
1551
1552 rxKick();
1553 }
1554
1555 void
1556 NSGigE::rxKick()
1557 {
1558 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1559
1560 DPRINTF(EthernetSM,
1561 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1562 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1563
1564 Addr link, bufptr;
1565 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1566 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1567
1568 next:
1569 if (clock) {
1570 if (rxKickTick > curTick) {
1571 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1572 rxKickTick);
1573
1574 goto exit;
1575 }
1576
1577 // Go to the next state machine clock tick.
1578 rxKickTick = curTick + cycles(1);
1579 }
1580
1581 switch(rxDmaState) {
1582 case dmaReadWaiting:
1583 if (doRxDmaRead())
1584 goto exit;
1585 break;
1586 case dmaWriteWaiting:
1587 if (doRxDmaWrite())
1588 goto exit;
1589 break;
1590 default:
1591 break;
1592 }
1593
1594 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1595 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1596
1597 // see state machine from spec for details
1598 // the way this works is, if you finish work on one state and can
1599 // go directly to another, you do that through jumping to the
1600 // label "next". however, if you have intermediate work, like DMA
1601 // so that you can't go to the next state yet, you go to exit and
1602 // exit the loop. however, when the DMA is done it will trigger
1603 // an event and come back to this loop.
1604 switch (rxState) {
1605 case rxIdle:
1606 if (!rxEnable) {
1607 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1608 goto exit;
1609 }
1610
1611 if (CRDD) {
1612 rxState = rxDescRefr;
1613
1614 rxDmaAddr = regs.rxdp & 0x3fffffff;
1615 rxDmaData =
1616 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1617 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1618 rxDmaFree = dmaDescFree;
1619
1620 descDmaReads++;
1621 descDmaRdBytes += rxDmaLen;
1622
1623 if (doRxDmaRead())
1624 goto exit;
1625 } else {
1626 rxState = rxDescRead;
1627
1628 rxDmaAddr = regs.rxdp & 0x3fffffff;
1629 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1630 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1631 rxDmaFree = dmaDescFree;
1632
1633 descDmaReads++;
1634 descDmaRdBytes += rxDmaLen;
1635
1636 if (doRxDmaRead())
1637 goto exit;
1638 }
1639 break;
1640
1641 case rxDescRefr:
1642 if (rxDmaState != dmaIdle)
1643 goto exit;
1644
1645 rxState = rxAdvance;
1646 break;
1647
1648 case rxDescRead:
1649 if (rxDmaState != dmaIdle)
1650 goto exit;
1651
1652 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1653 regs.rxdp & 0x3fffffff);
1654 DPRINTF(EthernetDesc,
1655 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1656 link, bufptr, cmdsts, extsts);
1657
1658 if (cmdsts & CMDSTS_OWN) {
1659 devIntrPost(ISR_RXIDLE);
1660 rxState = rxIdle;
1661 goto exit;
1662 } else {
1663 rxState = rxFifoBlock;
1664 rxFragPtr = bufptr;
1665 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1666 }
1667 break;
1668
1669 case rxFifoBlock:
1670 if (!rxPacket) {
1671 /**
1672 * @todo in reality, we should be able to start processing
1673 * the packet as it arrives, and not have to wait for the
1674 * full packet ot be in the receive fifo.
1675 */
1676 if (rxFifo.empty())
1677 goto exit;
1678
1679 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1680
1681 // If we don't have a packet, grab a new one from the fifo.
1682 rxPacket = rxFifo.front();
1683 rxPktBytes = rxPacket->length;
1684 rxPacketBufPtr = rxPacket->data;
1685
1686 #if TRACING_ON
1687 if (DTRACE(Ethernet)) {
1688 IpPtr ip(rxPacket);
1689 if (ip) {
1690 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1691 TcpPtr tcp(ip);
1692 if (tcp) {
1693 DPRINTF(Ethernet,
1694 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1695 tcp->sport(), tcp->dport(), tcp->seq(),
1696 tcp->ack());
1697 }
1698 }
1699 }
1700 #endif
1701
1702 // sanity check - i think the driver behaves like this
1703 assert(rxDescCnt >= rxPktBytes);
1704 rxFifo.pop();
1705 }
1706
1707
1708 // dont' need the && rxDescCnt > 0 if driver sanity check
1709 // above holds
1710 if (rxPktBytes > 0) {
1711 rxState = rxFragWrite;
1712 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1713 // check holds
1714 rxXferLen = rxPktBytes;
1715
1716 rxDmaAddr = rxFragPtr & 0x3fffffff;
1717 rxDmaData = rxPacketBufPtr;
1718 rxDmaLen = rxXferLen;
1719 rxDmaFree = dmaDataFree;
1720
1721 if (doRxDmaWrite())
1722 goto exit;
1723
1724 } else {
1725 rxState = rxDescWrite;
1726
1727 //if (rxPktBytes == 0) { /* packet is done */
1728 assert(rxPktBytes == 0);
1729 DPRINTF(EthernetSM, "done with receiving packet\n");
1730
1731 cmdsts |= CMDSTS_OWN;
1732 cmdsts &= ~CMDSTS_MORE;
1733 cmdsts |= CMDSTS_OK;
1734 cmdsts &= 0xffff0000;
1735 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1736
1737 #if 0
1738 /*
1739 * all the driver uses these are for its own stats keeping
1740 * which we don't care about, aren't necessary for
1741 * functionality and doing this would just slow us down.
1742 * if they end up using this in a later version for
1743 * functional purposes, just undef
1744 */
1745 if (rxFilterEnable) {
1746 cmdsts &= ~CMDSTS_DEST_MASK;
1747 const EthAddr &dst = rxFifoFront()->dst();
1748 if (dst->unicast())
1749 cmdsts |= CMDSTS_DEST_SELF;
1750 if (dst->multicast())
1751 cmdsts |= CMDSTS_DEST_MULTI;
1752 if (dst->broadcast())
1753 cmdsts |= CMDSTS_DEST_MASK;
1754 }
1755 #endif
1756
1757 IpPtr ip(rxPacket);
1758 if (extstsEnable && ip) {
1759 extsts |= EXTSTS_IPPKT;
1760 rxIpChecksums++;
1761 if (cksum(ip) != 0) {
1762 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1763 extsts |= EXTSTS_IPERR;
1764 }
1765 TcpPtr tcp(ip);
1766 UdpPtr udp(ip);
1767 if (tcp) {
1768 extsts |= EXTSTS_TCPPKT;
1769 rxTcpChecksums++;
1770 if (cksum(tcp) != 0) {
1771 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1772 extsts |= EXTSTS_TCPERR;
1773
1774 }
1775 } else if (udp) {
1776 extsts |= EXTSTS_UDPPKT;
1777 rxUdpChecksums++;
1778 if (cksum(udp) != 0) {
1779 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1780 extsts |= EXTSTS_UDPERR;
1781 }
1782 }
1783 }
1784 rxPacket = 0;
1785
1786 /*
1787 * the driver seems to always receive into desc buffers
1788 * of size 1514, so you never have a pkt that is split
1789 * into multiple descriptors on the receive side, so
1790 * i don't implement that case, hence the assert above.
1791 */
1792
1793 DPRINTF(EthernetDesc,
1794 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1795 regs.rxdp & 0x3fffffff);
1796 DPRINTF(EthernetDesc,
1797 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1798 link, bufptr, cmdsts, extsts);
1799
1800 rxDmaAddr = regs.rxdp & 0x3fffffff;
1801 rxDmaData = &cmdsts;
1802 if (is64bit) {
1803 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1804 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1805 } else {
1806 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1807 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1808 }
1809 rxDmaFree = dmaDescFree;
1810
1811 descDmaWrites++;
1812 descDmaWrBytes += rxDmaLen;
1813
1814 if (doRxDmaWrite())
1815 goto exit;
1816 }
1817 break;
1818
1819 case rxFragWrite:
1820 if (rxDmaState != dmaIdle)
1821 goto exit;
1822
1823 rxPacketBufPtr += rxXferLen;
1824 rxFragPtr += rxXferLen;
1825 rxPktBytes -= rxXferLen;
1826
1827 rxState = rxFifoBlock;
1828 break;
1829
1830 case rxDescWrite:
1831 if (rxDmaState != dmaIdle)
1832 goto exit;
1833
1834 assert(cmdsts & CMDSTS_OWN);
1835
1836 assert(rxPacket == 0);
1837 devIntrPost(ISR_RXOK);
1838
1839 if (cmdsts & CMDSTS_INTR)
1840 devIntrPost(ISR_RXDESC);
1841
1842 if (!rxEnable) {
1843 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1844 rxState = rxIdle;
1845 goto exit;
1846 } else
1847 rxState = rxAdvance;
1848 break;
1849
1850 case rxAdvance:
1851 if (link == 0) {
1852 devIntrPost(ISR_RXIDLE);
1853 rxState = rxIdle;
1854 CRDD = true;
1855 goto exit;
1856 } else {
1857 if (rxDmaState != dmaIdle)
1858 goto exit;
1859 rxState = rxDescRead;
1860 regs.rxdp = link;
1861 CRDD = false;
1862
1863 rxDmaAddr = regs.rxdp & 0x3fffffff;
1864 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1865 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1866 rxDmaFree = dmaDescFree;
1867
1868 if (doRxDmaRead())
1869 goto exit;
1870 }
1871 break;
1872
1873 default:
1874 panic("Invalid rxState!");
1875 }
1876
1877 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1878 NsRxStateStrings[rxState]);
1879 goto next;
1880
1881 exit:
1882 /**
1883 * @todo do we want to schedule a future kick?
1884 */
1885 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1886 NsRxStateStrings[rxState]);
1887
1888 if (clock && !rxKickEvent.scheduled())
1889 rxKickEvent.schedule(rxKickTick);
1890 }
1891
1892 void
1893 NSGigE::transmit()
1894 {
1895 if (txFifo.empty()) {
1896 DPRINTF(Ethernet, "nothing to transmit\n");
1897 return;
1898 }
1899
1900 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1901 txFifo.size());
1902 if (interface->sendPacket(txFifo.front())) {
1903 #if TRACING_ON
1904 if (DTRACE(Ethernet)) {
1905 IpPtr ip(txFifo.front());
1906 if (ip) {
1907 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1908 TcpPtr tcp(ip);
1909 if (tcp) {
1910 DPRINTF(Ethernet,
1911 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1912 tcp->sport(), tcp->dport(), tcp->seq(),
1913 tcp->ack());
1914 }
1915 }
1916 }
1917 #endif
1918
1919 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1920 txBytes += txFifo.front()->length;
1921 txPackets++;
1922
1923 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1924 txFifo.avail());
1925 txFifo.pop();
1926
1927 /*
1928 * normally do a writeback of the descriptor here, and ONLY
1929 * after that is done, send this interrupt. but since our
1930 * stuff never actually fails, just do this interrupt here,
1931 * otherwise the code has to stray from this nice format.
1932 * besides, it's functionally the same.
1933 */
1934 devIntrPost(ISR_TXOK);
1935 }
1936
1937 if (!txFifo.empty() && !txEvent.scheduled()) {
1938 DPRINTF(Ethernet, "reschedule transmit\n");
1939 txEvent.schedule(curTick + retryTime);
1940 }
1941 }
1942
1943 void
1944 NSGigE::txDmaReadCopy()
1945 {
1946 assert(txDmaState == dmaReading);
1947
1948 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1949 txDmaState = dmaIdle;
1950
1951 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1952 txDmaAddr, txDmaLen);
1953 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1954 }
1955
1956 bool
1957 NSGigE::doTxDmaRead()
1958 {
1959 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1960 txDmaState = dmaReading;
1961
1962 if (dmaInterface && !txDmaFree) {
1963 if (dmaInterface->busy())
1964 txDmaState = dmaReadWaiting;
1965 else
1966 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1967 &txDmaReadEvent, true);
1968 return true;
1969 }
1970
1971 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1972 txDmaReadCopy();
1973 return false;
1974 }
1975
1976 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1977 Tick start = curTick + dmaReadDelay + factor;
1978 txDmaReadEvent.schedule(start);
1979 return true;
1980 }
1981
1982 void
1983 NSGigE::txDmaReadDone()
1984 {
1985 assert(txDmaState == dmaReading);
1986 txDmaReadCopy();
1987
1988 // If the receive state machine has a pending DMA, let it go first
1989 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1990 rxKick();
1991
1992 txKick();
1993 }
1994
1995 void
1996 NSGigE::txDmaWriteCopy()
1997 {
1998 assert(txDmaState == dmaWriting);
1999
2000 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
2001 txDmaState = dmaIdle;
2002
2003 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
2004 txDmaAddr, txDmaLen);
2005 DDUMP(EthernetDMA, txDmaData, txDmaLen);
2006 }
2007
2008 bool
2009 NSGigE::doTxDmaWrite()
2010 {
2011 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
2012 txDmaState = dmaWriting;
2013
2014 if (dmaInterface && !txDmaFree) {
2015 if (dmaInterface->busy())
2016 txDmaState = dmaWriteWaiting;
2017 else
2018 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
2019 &txDmaWriteEvent, true);
2020 return true;
2021 }
2022
2023 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
2024 txDmaWriteCopy();
2025 return false;
2026 }
2027
2028 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
2029 Tick start = curTick + dmaWriteDelay + factor;
2030 txDmaWriteEvent.schedule(start);
2031 return true;
2032 }
2033
2034 void
2035 NSGigE::txDmaWriteDone()
2036 {
2037 assert(txDmaState == dmaWriting);
2038 txDmaWriteCopy();
2039
2040 // If the receive state machine has a pending DMA, let it go first
2041 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
2042 rxKick();
2043
2044 txKick();
2045 }
2046
2047 void
2048 NSGigE::txKick()
2049 {
2050 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
2051
2052 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
2053 NsTxStateStrings[txState], is64bit ? 64 : 32);
2054
2055 Addr link, bufptr;
2056 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
2057 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
2058
2059 next:
2060 if (clock) {
2061 if (txKickTick > curTick) {
2062 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
2063 txKickTick);
2064 goto exit;
2065 }
2066
2067 // Go to the next state machine clock tick.
2068 txKickTick = curTick + cycles(1);
2069 }
2070
2071 switch(txDmaState) {
2072 case dmaReadWaiting:
2073 if (doTxDmaRead())
2074 goto exit;
2075 break;
2076 case dmaWriteWaiting:
2077 if (doTxDmaWrite())
2078 goto exit;
2079 break;
2080 default:
2081 break;
2082 }
2083
2084 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
2085 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
2086 switch (txState) {
2087 case txIdle:
2088 if (!txEnable) {
2089 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
2090 goto exit;
2091 }
2092
2093 if (CTDD) {
2094 txState = txDescRefr;
2095
2096 txDmaAddr = regs.txdp & 0x3fffffff;
2097 txDmaData =
2098 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
2099 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
2100 txDmaFree = dmaDescFree;
2101
2102 descDmaReads++;
2103 descDmaRdBytes += txDmaLen;
2104
2105 if (doTxDmaRead())
2106 goto exit;
2107
2108 } else {
2109 txState = txDescRead;
2110
2111 txDmaAddr = regs.txdp & 0x3fffffff;
2112 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2113 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2114 txDmaFree = dmaDescFree;
2115
2116 descDmaReads++;
2117 descDmaRdBytes += txDmaLen;
2118
2119 if (doTxDmaRead())
2120 goto exit;
2121 }
2122 break;
2123
2124 case txDescRefr:
2125 if (txDmaState != dmaIdle)
2126 goto exit;
2127
2128 txState = txAdvance;
2129 break;
2130
2131 case txDescRead:
2132 if (txDmaState != dmaIdle)
2133 goto exit;
2134
2135 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
2136 regs.txdp & 0x3fffffff);
2137 DPRINTF(EthernetDesc,
2138 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
2139 link, bufptr, cmdsts, extsts);
2140
2141 if (cmdsts & CMDSTS_OWN) {
2142 txState = txFifoBlock;
2143 txFragPtr = bufptr;
2144 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
2145 } else {
2146 devIntrPost(ISR_TXIDLE);
2147 txState = txIdle;
2148 goto exit;
2149 }
2150 break;
2151
2152 case txFifoBlock:
2153 if (!txPacket) {
2154 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2155 txPacket = new PacketData(16384);
2156 txPacketBufPtr = txPacket->data;
2157 }
2158
2159 if (txDescCnt == 0) {
2160 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2161 if (cmdsts & CMDSTS_MORE) {
2162 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2163 txState = txDescWrite;
2164
2165 cmdsts &= ~CMDSTS_OWN;
2166
2167 txDmaAddr = regs.txdp & 0x3fffffff;
2168 txDmaData = &cmdsts;
2169 if (is64bit) {
2170 txDmaAddr += offsetof(ns_desc64, cmdsts);
2171 txDmaLen = sizeof(txDesc64.cmdsts);
2172 } else {
2173 txDmaAddr += offsetof(ns_desc32, cmdsts);
2174 txDmaLen = sizeof(txDesc32.cmdsts);
2175 }
2176 txDmaFree = dmaDescFree;
2177
2178 if (doTxDmaWrite())
2179 goto exit;
2180
2181 } else { /* this packet is totally done */
2182 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2183 /* deal with the the packet that just finished */
2184 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2185 IpPtr ip(txPacket);
2186 if (extsts & EXTSTS_UDPPKT) {
2187 UdpPtr udp(ip);
2188 udp->sum(0);
2189 udp->sum(cksum(udp));
2190 txUdpChecksums++;
2191 } else if (extsts & EXTSTS_TCPPKT) {
2192 TcpPtr tcp(ip);
2193 tcp->sum(0);
2194 tcp->sum(cksum(tcp));
2195 txTcpChecksums++;
2196 }
2197 if (extsts & EXTSTS_IPPKT) {
2198 ip->sum(0);
2199 ip->sum(cksum(ip));
2200 txIpChecksums++;
2201 }
2202 }
2203
2204 txPacket->length = txPacketBufPtr - txPacket->data;
2205 // this is just because the receive can't handle a
2206 // packet bigger want to make sure
2207 if (txPacket->length > 1514)
2208 panic("transmit packet too large, %s > 1514\n",
2209 txPacket->length);
2210
2211 #ifndef NDEBUG
2212 bool success =
2213 #endif
2214 txFifo.push(txPacket);
2215 assert(success);
2216
2217 /*
2218 * this following section is not tqo spec, but
2219 * functionally shouldn't be any different. normally,
2220 * the chip will wait til the transmit has occurred
2221 * before writing back the descriptor because it has
2222 * to wait to see that it was successfully transmitted
2223 * to decide whether to set CMDSTS_OK or not.
2224 * however, in the simulator since it is always
2225 * successfully transmitted, and writing it exactly to
2226 * spec would complicate the code, we just do it here
2227 */
2228
2229 cmdsts &= ~CMDSTS_OWN;
2230 cmdsts |= CMDSTS_OK;
2231
2232 DPRINTF(EthernetDesc,
2233 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2234 cmdsts, extsts);
2235
2236 txDmaFree = dmaDescFree;
2237 txDmaAddr = regs.txdp & 0x3fffffff;
2238 txDmaData = &cmdsts;
2239 if (is64bit) {
2240 txDmaAddr += offsetof(ns_desc64, cmdsts);
2241 txDmaLen =
2242 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2243 } else {
2244 txDmaAddr += offsetof(ns_desc32, cmdsts);
2245 txDmaLen =
2246 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2247 }
2248
2249 descDmaWrites++;
2250 descDmaWrBytes += txDmaLen;
2251
2252 transmit();
2253 txPacket = 0;
2254
2255 if (!txEnable) {
2256 DPRINTF(EthernetSM, "halting TX state machine\n");
2257 txState = txIdle;
2258 goto exit;
2259 } else
2260 txState = txAdvance;
2261
2262 if (doTxDmaWrite())
2263 goto exit;
2264 }
2265 } else {
2266 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2267 if (!txFifo.full()) {
2268 txState = txFragRead;
2269
2270 /*
2271 * The number of bytes transferred is either whatever
2272 * is left in the descriptor (txDescCnt), or if there
2273 * is not enough room in the fifo, just whatever room
2274 * is left in the fifo
2275 */
2276 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2277
2278 txDmaAddr = txFragPtr & 0x3fffffff;
2279 txDmaData = txPacketBufPtr;
2280 txDmaLen = txXferLen;
2281 txDmaFree = dmaDataFree;
2282
2283 if (doTxDmaRead())
2284 goto exit;
2285 } else {
2286 txState = txFifoBlock;
2287 transmit();
2288
2289 goto exit;
2290 }
2291
2292 }
2293 break;
2294
2295 case txFragRead:
2296 if (txDmaState != dmaIdle)
2297 goto exit;
2298
2299 txPacketBufPtr += txXferLen;
2300 txFragPtr += txXferLen;
2301 txDescCnt -= txXferLen;
2302 txFifo.reserve(txXferLen);
2303
2304 txState = txFifoBlock;
2305 break;
2306
2307 case txDescWrite:
2308 if (txDmaState != dmaIdle)
2309 goto exit;
2310
2311 if (cmdsts & CMDSTS_INTR)
2312 devIntrPost(ISR_TXDESC);
2313
2314 if (!txEnable) {
2315 DPRINTF(EthernetSM, "halting TX state machine\n");
2316 txState = txIdle;
2317 goto exit;
2318 } else
2319 txState = txAdvance;
2320 break;
2321
2322 case txAdvance:
2323 if (link == 0) {
2324 devIntrPost(ISR_TXIDLE);
2325 txState = txIdle;
2326 goto exit;
2327 } else {
2328 if (txDmaState != dmaIdle)
2329 goto exit;
2330 txState = txDescRead;
2331 regs.txdp = link;
2332 CTDD = false;
2333
2334 txDmaAddr = link & 0x3fffffff;
2335 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2336 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2337 txDmaFree = dmaDescFree;
2338
2339 if (doTxDmaRead())
2340 goto exit;
2341 }
2342 break;
2343
2344 default:
2345 panic("invalid state");
2346 }
2347
2348 DPRINTF(EthernetSM, "entering next txState=%s\n",
2349 NsTxStateStrings[txState]);
2350 goto next;
2351
2352 exit:
2353 /**
2354 * @todo do we want to schedule a future kick?
2355 */
2356 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2357 NsTxStateStrings[txState]);
2358
2359 if (clock && !txKickEvent.scheduled())
2360 txKickEvent.schedule(txKickTick);
2361 }
2362
2363 /**
2364 * Advance the EEPROM state machine
2365 * Called on rising edge of EEPROM clock bit in MEAR
2366 */
2367 void
2368 NSGigE::eepromKick()
2369 {
2370 switch (eepromState) {
2371
2372 case eepromStart:
2373
2374 // Wait for start bit
2375 if (regs.mear & MEAR_EEDI) {
2376 // Set up to get 2 opcode bits
2377 eepromState = eepromGetOpcode;
2378 eepromBitsToRx = 2;
2379 eepromOpcode = 0;
2380 }
2381 break;
2382
2383 case eepromGetOpcode:
2384 eepromOpcode <<= 1;
2385 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2386 --eepromBitsToRx;
2387
2388 // Done getting opcode
2389 if (eepromBitsToRx == 0) {
2390 if (eepromOpcode != EEPROM_READ)
2391 panic("only EEPROM reads are implemented!");
2392
2393 // Set up to get address
2394 eepromState = eepromGetAddress;
2395 eepromBitsToRx = 6;
2396 eepromAddress = 0;
2397 }
2398 break;
2399
2400 case eepromGetAddress:
2401 eepromAddress <<= 1;
2402 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2403 --eepromBitsToRx;
2404
2405 // Done getting address
2406 if (eepromBitsToRx == 0) {
2407
2408 if (eepromAddress >= EEPROM_SIZE)
2409 panic("EEPROM read access out of range!");
2410
2411 switch (eepromAddress) {
2412
2413 case EEPROM_PMATCH2_ADDR:
2414 eepromData = rom.perfectMatch[5];
2415 eepromData <<= 8;
2416 eepromData += rom.perfectMatch[4];
2417 break;
2418
2419 case EEPROM_PMATCH1_ADDR:
2420 eepromData = rom.perfectMatch[3];
2421 eepromData <<= 8;
2422 eepromData += rom.perfectMatch[2];
2423 break;
2424
2425 case EEPROM_PMATCH0_ADDR:
2426 eepromData = rom.perfectMatch[1];
2427 eepromData <<= 8;
2428 eepromData += rom.perfectMatch[0];
2429 break;
2430
2431 default:
2432 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2433 }
2434 // Set up to read data
2435 eepromState = eepromRead;
2436 eepromBitsToRx = 16;
2437
2438 // Clear data in bit
2439 regs.mear &= ~MEAR_EEDI;
2440 }
2441 break;
2442
2443 case eepromRead:
2444 // Clear Data Out bit
2445 regs.mear &= ~MEAR_EEDO;
2446 // Set bit to value of current EEPROM bit
2447 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2448
2449 eepromData <<= 1;
2450 --eepromBitsToRx;
2451
2452 // All done
2453 if (eepromBitsToRx == 0) {
2454 eepromState = eepromStart;
2455 }
2456 break;
2457
2458 default:
2459 panic("invalid EEPROM state");
2460 }
2461
2462 }
2463
2464 void
2465 NSGigE::transferDone()
2466 {
2467 if (txFifo.empty()) {
2468 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2469 return;
2470 }
2471
2472 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2473
2474 if (txEvent.scheduled())
2475 txEvent.reschedule(curTick + cycles(1));
2476 else
2477 txEvent.schedule(curTick + cycles(1));
2478 }
2479
2480 bool
2481 NSGigE::rxFilter(const PacketPtr &packet)
2482 {
2483 EthPtr eth = packet;
2484 bool drop = true;
2485 string type;
2486
2487 const EthAddr &dst = eth->dst();
2488 if (dst.unicast()) {
2489 // If we're accepting all unicast addresses
2490 if (acceptUnicast)
2491 drop = false;
2492
2493 // If we make a perfect match
2494 if (acceptPerfect && dst == rom.perfectMatch)
2495 drop = false;
2496
2497 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2498 drop = false;
2499
2500 } else if (dst.broadcast()) {
2501 // if we're accepting broadcasts
2502 if (acceptBroadcast)
2503 drop = false;
2504
2505 } else if (dst.multicast()) {
2506 // if we're accepting all multicasts
2507 if (acceptMulticast)
2508 drop = false;
2509
2510 // Multicast hashing faked - all packets accepted
2511 if (multicastHashEnable)
2512 drop = false;
2513 }
2514
2515 if (drop) {
2516 DPRINTF(Ethernet, "rxFilter drop\n");
2517 DDUMP(EthernetData, packet->data, packet->length);
2518 }
2519
2520 return drop;
2521 }
2522
2523 bool
2524 NSGigE::recvPacket(PacketPtr packet)
2525 {
2526 rxBytes += packet->length;
2527 rxPackets++;
2528
2529 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2530 rxFifo.avail());
2531
2532 if (!rxEnable) {
2533 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2534 interface->recvDone();
2535 return true;
2536 }
2537
2538 if (!rxFilterEnable) {
2539 DPRINTF(Ethernet,
2540 "receive packet filtering disabled . . . packet dropped\n");
2541 interface->recvDone();
2542 return true;
2543 }
2544
2545 if (rxFilter(packet)) {
2546 DPRINTF(Ethernet, "packet filtered...dropped\n");
2547 interface->recvDone();
2548 return true;
2549 }
2550
2551 if (rxFifo.avail() < packet->length) {
2552 #if TRACING_ON
2553 IpPtr ip(packet);
2554 TcpPtr tcp(ip);
2555 if (ip) {
2556 DPRINTF(Ethernet,
2557 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2558 ip->id());
2559 if (tcp) {
2560 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2561 }
2562 }
2563 #endif
2564 droppedPackets++;
2565 devIntrPost(ISR_RXORN);
2566 return false;
2567 }
2568
2569 rxFifo.push(packet);
2570 interface->recvDone();
2571
2572 rxKick();
2573 return true;
2574 }
2575
2576 //=====================================================================
2577 //
2578 //
2579 void
2580 NSGigE::serialize(ostream &os)
2581 {
2582 // Serialize the PciDev base class
2583 PciDev::serialize(os);
2584
2585 /*
2586 * Finalize any DMA events now.
2587 */
2588 if (rxDmaReadEvent.scheduled())
2589 rxDmaReadCopy();
2590 if (rxDmaWriteEvent.scheduled())
2591 rxDmaWriteCopy();
2592 if (txDmaReadEvent.scheduled())
2593 txDmaReadCopy();
2594 if (txDmaWriteEvent.scheduled())
2595 txDmaWriteCopy();
2596
2597 /*
2598 * Serialize the device registers
2599 */
2600 SERIALIZE_SCALAR(regs.command);
2601 SERIALIZE_SCALAR(regs.config);
2602 SERIALIZE_SCALAR(regs.mear);
2603 SERIALIZE_SCALAR(regs.ptscr);
2604 SERIALIZE_SCALAR(regs.isr);
2605 SERIALIZE_SCALAR(regs.imr);
2606 SERIALIZE_SCALAR(regs.ier);
2607 SERIALIZE_SCALAR(regs.ihr);
2608 SERIALIZE_SCALAR(regs.txdp);
2609 SERIALIZE_SCALAR(regs.txdp_hi);
2610 SERIALIZE_SCALAR(regs.txcfg);
2611 SERIALIZE_SCALAR(regs.gpior);
2612 SERIALIZE_SCALAR(regs.rxdp);
2613 SERIALIZE_SCALAR(regs.rxdp_hi);
2614 SERIALIZE_SCALAR(regs.rxcfg);
2615 SERIALIZE_SCALAR(regs.pqcr);
2616 SERIALIZE_SCALAR(regs.wcsr);
2617 SERIALIZE_SCALAR(regs.pcr);
2618 SERIALIZE_SCALAR(regs.rfcr);
2619 SERIALIZE_SCALAR(regs.rfdr);
2620 SERIALIZE_SCALAR(regs.brar);
2621 SERIALIZE_SCALAR(regs.brdr);
2622 SERIALIZE_SCALAR(regs.srr);
2623 SERIALIZE_SCALAR(regs.mibc);
2624 SERIALIZE_SCALAR(regs.vrcr);
2625 SERIALIZE_SCALAR(regs.vtcr);
2626 SERIALIZE_SCALAR(regs.vdr);
2627 SERIALIZE_SCALAR(regs.ccsr);
2628 SERIALIZE_SCALAR(regs.tbicr);
2629 SERIALIZE_SCALAR(regs.tbisr);
2630 SERIALIZE_SCALAR(regs.tanar);
2631 SERIALIZE_SCALAR(regs.tanlpar);
2632 SERIALIZE_SCALAR(regs.taner);
2633 SERIALIZE_SCALAR(regs.tesr);
2634
2635 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2636 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2637
2638 SERIALIZE_SCALAR(ioEnable);
2639
2640 /*
2641 * Serialize the data Fifos
2642 */
2643 rxFifo.serialize("rxFifo", os);
2644 txFifo.serialize("txFifo", os);
2645
2646 /*
2647 * Serialize the various helper variables
2648 */
2649 bool txPacketExists = txPacket;
2650 SERIALIZE_SCALAR(txPacketExists);
2651 if (txPacketExists) {
2652 txPacket->length = txPacketBufPtr - txPacket->data;
2653 txPacket->serialize("txPacket", os);
2654 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2655 SERIALIZE_SCALAR(txPktBufPtr);
2656 }
2657
2658 bool rxPacketExists = rxPacket;
2659 SERIALIZE_SCALAR(rxPacketExists);
2660 if (rxPacketExists) {
2661 rxPacket->serialize("rxPacket", os);
2662 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2663 SERIALIZE_SCALAR(rxPktBufPtr);
2664 }
2665
2666 SERIALIZE_SCALAR(txXferLen);
2667 SERIALIZE_SCALAR(rxXferLen);
2668
2669 /*
2670 * Serialize Cached Descriptors
2671 */
2672 SERIALIZE_SCALAR(rxDesc64.link);
2673 SERIALIZE_SCALAR(rxDesc64.bufptr);
2674 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2675 SERIALIZE_SCALAR(rxDesc64.extsts);
2676 SERIALIZE_SCALAR(txDesc64.link);
2677 SERIALIZE_SCALAR(txDesc64.bufptr);
2678 SERIALIZE_SCALAR(txDesc64.cmdsts);
2679 SERIALIZE_SCALAR(txDesc64.extsts);
2680 SERIALIZE_SCALAR(rxDesc32.link);
2681 SERIALIZE_SCALAR(rxDesc32.bufptr);
2682 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2683 SERIALIZE_SCALAR(rxDesc32.extsts);
2684 SERIALIZE_SCALAR(txDesc32.link);
2685 SERIALIZE_SCALAR(txDesc32.bufptr);
2686 SERIALIZE_SCALAR(txDesc32.cmdsts);
2687 SERIALIZE_SCALAR(txDesc32.extsts);
2688 SERIALIZE_SCALAR(extstsEnable);
2689
2690 /*
2691 * Serialize tx state machine
2692 */
2693 int txState = this->txState;
2694 SERIALIZE_SCALAR(txState);
2695 SERIALIZE_SCALAR(txEnable);
2696 SERIALIZE_SCALAR(CTDD);
2697 SERIALIZE_SCALAR(txFragPtr);
2698 SERIALIZE_SCALAR(txDescCnt);
2699 int txDmaState = this->txDmaState;
2700 SERIALIZE_SCALAR(txDmaState);
2701 SERIALIZE_SCALAR(txKickTick);
2702
2703 /*
2704 * Serialize rx state machine
2705 */
2706 int rxState = this->rxState;
2707 SERIALIZE_SCALAR(rxState);
2708 SERIALIZE_SCALAR(rxEnable);
2709 SERIALIZE_SCALAR(CRDD);
2710 SERIALIZE_SCALAR(rxPktBytes);
2711 SERIALIZE_SCALAR(rxFragPtr);
2712 SERIALIZE_SCALAR(rxDescCnt);
2713 int rxDmaState = this->rxDmaState;
2714 SERIALIZE_SCALAR(rxDmaState);
2715 SERIALIZE_SCALAR(rxKickTick);
2716
2717 /*
2718 * Serialize EEPROM state machine
2719 */
2720 int eepromState = this->eepromState;
2721 SERIALIZE_SCALAR(eepromState);
2722 SERIALIZE_SCALAR(eepromClk);
2723 SERIALIZE_SCALAR(eepromBitsToRx);
2724 SERIALIZE_SCALAR(eepromOpcode);
2725 SERIALIZE_SCALAR(eepromAddress);
2726 SERIALIZE_SCALAR(eepromData);
2727
2728 /*
2729 * If there's a pending transmit, store the time so we can
2730 * reschedule it later
2731 */
2732 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2733 SERIALIZE_SCALAR(transmitTick);
2734
2735 /*
2736 * receive address filter settings
2737 */
2738 SERIALIZE_SCALAR(rxFilterEnable);
2739 SERIALIZE_SCALAR(acceptBroadcast);
2740 SERIALIZE_SCALAR(acceptMulticast);
2741 SERIALIZE_SCALAR(acceptUnicast);
2742 SERIALIZE_SCALAR(acceptPerfect);
2743 SERIALIZE_SCALAR(acceptArp);
2744 SERIALIZE_SCALAR(multicastHashEnable);
2745
2746 /*
2747 * Keep track of pending interrupt status.
2748 */
2749 SERIALIZE_SCALAR(intrTick);
2750 SERIALIZE_SCALAR(cpuPendingIntr);
2751 Tick intrEventTick = 0;
2752 if (intrEvent)
2753 intrEventTick = intrEvent->when();
2754 SERIALIZE_SCALAR(intrEventTick);
2755
2756 }
2757
2758 void
2759 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2760 {
2761 // Unserialize the PciDev base class
2762 PciDev::unserialize(cp, section);
2763
2764 UNSERIALIZE_SCALAR(regs.command);
2765 UNSERIALIZE_SCALAR(regs.config);
2766 UNSERIALIZE_SCALAR(regs.mear);
2767 UNSERIALIZE_SCALAR(regs.ptscr);
2768 UNSERIALIZE_SCALAR(regs.isr);
2769 UNSERIALIZE_SCALAR(regs.imr);
2770 UNSERIALIZE_SCALAR(regs.ier);
2771 UNSERIALIZE_SCALAR(regs.ihr);
2772 UNSERIALIZE_SCALAR(regs.txdp);
2773 UNSERIALIZE_SCALAR(regs.txdp_hi);
2774 UNSERIALIZE_SCALAR(regs.txcfg);
2775 UNSERIALIZE_SCALAR(regs.gpior);
2776 UNSERIALIZE_SCALAR(regs.rxdp);
2777 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2778 UNSERIALIZE_SCALAR(regs.rxcfg);
2779 UNSERIALIZE_SCALAR(regs.pqcr);
2780 UNSERIALIZE_SCALAR(regs.wcsr);
2781 UNSERIALIZE_SCALAR(regs.pcr);
2782 UNSERIALIZE_SCALAR(regs.rfcr);
2783 UNSERIALIZE_SCALAR(regs.rfdr);
2784 UNSERIALIZE_SCALAR(regs.brar);
2785 UNSERIALIZE_SCALAR(regs.brdr);
2786 UNSERIALIZE_SCALAR(regs.srr);
2787 UNSERIALIZE_SCALAR(regs.mibc);
2788 UNSERIALIZE_SCALAR(regs.vrcr);
2789 UNSERIALIZE_SCALAR(regs.vtcr);
2790 UNSERIALIZE_SCALAR(regs.vdr);
2791 UNSERIALIZE_SCALAR(regs.ccsr);
2792 UNSERIALIZE_SCALAR(regs.tbicr);
2793 UNSERIALIZE_SCALAR(regs.tbisr);
2794 UNSERIALIZE_SCALAR(regs.tanar);
2795 UNSERIALIZE_SCALAR(regs.tanlpar);
2796 UNSERIALIZE_SCALAR(regs.taner);
2797 UNSERIALIZE_SCALAR(regs.tesr);
2798
2799 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2800 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2801
2802 UNSERIALIZE_SCALAR(ioEnable);
2803
2804 /*
2805 * unserialize the data fifos
2806 */
2807 rxFifo.unserialize("rxFifo", cp, section);
2808 txFifo.unserialize("txFifo", cp, section);
2809
2810 /*
2811 * unserialize the various helper variables
2812 */
2813 bool txPacketExists;
2814 UNSERIALIZE_SCALAR(txPacketExists);
2815 if (txPacketExists) {
2816 txPacket = new PacketData(16384);
2817 txPacket->unserialize("txPacket", cp, section);
2818 uint32_t txPktBufPtr;
2819 UNSERIALIZE_SCALAR(txPktBufPtr);
2820 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2821 } else
2822 txPacket = 0;
2823
2824 bool rxPacketExists;
2825 UNSERIALIZE_SCALAR(rxPacketExists);
2826 rxPacket = 0;
2827 if (rxPacketExists) {
2828 rxPacket = new PacketData(16384);
2829 rxPacket->unserialize("rxPacket", cp, section);
2830 uint32_t rxPktBufPtr;
2831 UNSERIALIZE_SCALAR(rxPktBufPtr);
2832 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2833 } else
2834 rxPacket = 0;
2835
2836 UNSERIALIZE_SCALAR(txXferLen);
2837 UNSERIALIZE_SCALAR(rxXferLen);
2838
2839 /*
2840 * Unserialize Cached Descriptors
2841 */
2842 UNSERIALIZE_SCALAR(rxDesc64.link);
2843 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2844 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2845 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2846 UNSERIALIZE_SCALAR(txDesc64.link);
2847 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2848 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2849 UNSERIALIZE_SCALAR(txDesc64.extsts);
2850 UNSERIALIZE_SCALAR(rxDesc32.link);
2851 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2852 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2853 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2854 UNSERIALIZE_SCALAR(txDesc32.link);
2855 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2856 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2857 UNSERIALIZE_SCALAR(txDesc32.extsts);
2858 UNSERIALIZE_SCALAR(extstsEnable);
2859
2860 /*
2861 * unserialize tx state machine
2862 */
2863 int txState;
2864 UNSERIALIZE_SCALAR(txState);
2865 this->txState = (TxState) txState;
2866 UNSERIALIZE_SCALAR(txEnable);
2867 UNSERIALIZE_SCALAR(CTDD);
2868 UNSERIALIZE_SCALAR(txFragPtr);
2869 UNSERIALIZE_SCALAR(txDescCnt);
2870 int txDmaState;
2871 UNSERIALIZE_SCALAR(txDmaState);
2872 this->txDmaState = (DmaState) txDmaState;
2873 UNSERIALIZE_SCALAR(txKickTick);
2874 if (txKickTick)
2875 txKickEvent.schedule(txKickTick);
2876
2877 /*
2878 * unserialize rx state machine
2879 */
2880 int rxState;
2881 UNSERIALIZE_SCALAR(rxState);
2882 this->rxState = (RxState) rxState;
2883 UNSERIALIZE_SCALAR(rxEnable);
2884 UNSERIALIZE_SCALAR(CRDD);
2885 UNSERIALIZE_SCALAR(rxPktBytes);
2886 UNSERIALIZE_SCALAR(rxFragPtr);
2887 UNSERIALIZE_SCALAR(rxDescCnt);
2888 int rxDmaState;
2889 UNSERIALIZE_SCALAR(rxDmaState);
2890 this->rxDmaState = (DmaState) rxDmaState;
2891 UNSERIALIZE_SCALAR(rxKickTick);
2892 if (rxKickTick)
2893 rxKickEvent.schedule(rxKickTick);
2894
2895 /*
2896 * Unserialize EEPROM state machine
2897 */
2898 int eepromState;
2899 UNSERIALIZE_SCALAR(eepromState);
2900 this->eepromState = (EEPROMState) eepromState;
2901 UNSERIALIZE_SCALAR(eepromClk);
2902 UNSERIALIZE_SCALAR(eepromBitsToRx);
2903 UNSERIALIZE_SCALAR(eepromOpcode);
2904 UNSERIALIZE_SCALAR(eepromAddress);
2905 UNSERIALIZE_SCALAR(eepromData);
2906
2907 /*
2908 * If there's a pending transmit, reschedule it now
2909 */
2910 Tick transmitTick;
2911 UNSERIALIZE_SCALAR(transmitTick);
2912 if (transmitTick)
2913 txEvent.schedule(curTick + transmitTick);
2914
2915 /*
2916 * unserialize receive address filter settings
2917 */
2918 UNSERIALIZE_SCALAR(rxFilterEnable);
2919 UNSERIALIZE_SCALAR(acceptBroadcast);
2920 UNSERIALIZE_SCALAR(acceptMulticast);
2921 UNSERIALIZE_SCALAR(acceptUnicast);
2922 UNSERIALIZE_SCALAR(acceptPerfect);
2923 UNSERIALIZE_SCALAR(acceptArp);
2924 UNSERIALIZE_SCALAR(multicastHashEnable);
2925
2926 /*
2927 * Keep track of pending interrupt status.
2928 */
2929 UNSERIALIZE_SCALAR(intrTick);
2930 UNSERIALIZE_SCALAR(cpuPendingIntr);
2931 Tick intrEventTick;
2932 UNSERIALIZE_SCALAR(intrEventTick);
2933 if (intrEventTick) {
2934 intrEvent = new IntrEvent(this, true);
2935 intrEvent->schedule(intrEventTick);
2936 }
2937
2938 /*
2939 * re-add addrRanges to bus bridges
2940 */
2941 if (pioInterface) {
2942 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2943 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2944 }
2945 }
2946
2947 Tick
2948 NSGigE::cacheAccess(MemReqPtr &req)
2949 {
2950 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2951 req->paddr, req->paddr - addr);
2952 return curTick + pioLatency;
2953 }
2954
2955 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2956
2957 SimObjectParam<EtherInt *> peer;
2958 SimObjectParam<NSGigE *> device;
2959
2960 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2961
2962 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2963
2964 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2965 INIT_PARAM(device, "Ethernet device of this interface")
2966
2967 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2968
2969 CREATE_SIM_OBJECT(NSGigEInt)
2970 {
2971 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2972
2973 EtherInt *p = (EtherInt *)peer;
2974 if (p) {
2975 dev_int->setPeer(p);
2976 p->setPeer(dev_int);
2977 }
2978
2979 return dev_int;
2980 }
2981
2982 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2983
2984
2985 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2986
2987 Param<Tick> clock;
2988
2989 Param<Addr> addr;
2990 SimObjectParam<MemoryController *> mmu;
2991 SimObjectParam<PhysicalMemory *> physmem;
2992 SimObjectParam<PciConfigAll *> configspace;
2993 SimObjectParam<PciConfigData *> configdata;
2994 SimObjectParam<Platform *> platform;
2995 Param<uint32_t> pci_bus;
2996 Param<uint32_t> pci_dev;
2997 Param<uint32_t> pci_func;
2998
2999 SimObjectParam<HierParams *> hier;
3000 SimObjectParam<Bus*> io_bus;
3001 SimObjectParam<Bus*> payload_bus;
3002 Param<bool> dma_desc_free;
3003 Param<bool> dma_data_free;
3004 Param<Tick> dma_read_delay;
3005 Param<Tick> dma_write_delay;
3006 Param<Tick> dma_read_factor;
3007 Param<Tick> dma_write_factor;
3008 Param<bool> dma_no_allocate;
3009 Param<Tick> pio_latency;
3010 Param<Tick> intr_delay;
3011
3012 Param<Tick> rx_delay;
3013 Param<Tick> tx_delay;
3014 Param<uint32_t> rx_fifo_size;
3015 Param<uint32_t> tx_fifo_size;
3016
3017 Param<bool> rx_filter;
3018 Param<string> hardware_address;
3019 Param<bool> dedicated;
3020
3021 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
3022
3023 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
3024
3025 INIT_PARAM(clock, "State machine processor frequency"),
3026
3027 INIT_PARAM(addr, "Device Address"),
3028 INIT_PARAM(mmu, "Memory Controller"),
3029 INIT_PARAM(physmem, "Physical Memory"),
3030 INIT_PARAM(configspace, "PCI Configspace"),
3031 INIT_PARAM(configdata, "PCI Config data"),
3032 INIT_PARAM(platform, "Platform"),
3033 INIT_PARAM(pci_bus, "PCI bus"),
3034 INIT_PARAM(pci_dev, "PCI device number"),
3035 INIT_PARAM(pci_func, "PCI function code"),
3036
3037 INIT_PARAM(hier, "Hierarchy global variables"),
3038 INIT_PARAM(io_bus, "The IO Bus to attach to for headers"),
3039 INIT_PARAM(payload_bus, "The IO Bus to attach to for payload"),
3040 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
3041 INIT_PARAM(dma_data_free, "DMA of Data is free"),
3042 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
3043 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
3044 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
3045 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
3046 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
3047 INIT_PARAM(pio_latency, "Programmed IO latency in bus cycles"),
3048 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
3049
3050 INIT_PARAM(rx_delay, "Receive Delay"),
3051 INIT_PARAM(tx_delay, "Transmit Delay"),
3052 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
3053 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
3054
3055 INIT_PARAM(rx_filter, "Enable Receive Filter"),
3056 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
3057 INIT_PARAM(dedicated, "dedicate a kernel thread to the driver")
3058
3059 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
3060
3061
3062 CREATE_SIM_OBJECT(NSGigE)
3063 {
3064 NSGigE::Params *params = new NSGigE::Params;
3065
3066 params->name = getInstanceName();
3067
3068 params->clock = clock;
3069
3070 params->mmu = mmu;
3071 params->pmem = physmem;
3072 params->configSpace = configspace;
3073 params->configData = configdata;
3074 params->plat = platform;
3075 params->busNum = pci_bus;
3076 params->deviceNum = pci_dev;
3077 params->functionNum = pci_func;
3078
3079 params->hier = hier;
3080 params->header_bus = io_bus;
3081 params->payload_bus = payload_bus;
3082 params->dma_desc_free = dma_desc_free;
3083 params->dma_data_free = dma_data_free;
3084 params->dma_read_delay = dma_read_delay;
3085 params->dma_write_delay = dma_write_delay;
3086 params->dma_read_factor = dma_read_factor;
3087 params->dma_write_factor = dma_write_factor;
3088 params->dma_no_allocate = dma_no_allocate;
3089 params->pio_latency = pio_latency;
3090 params->intr_delay = intr_delay;
3091
3092 params->rx_delay = rx_delay;
3093 params->tx_delay = tx_delay;
3094 params->rx_fifo_size = rx_fifo_size;
3095 params->tx_fifo_size = tx_fifo_size;
3096
3097 params->rx_filter = rx_filter;
3098 params->eaddr = hardware_address;
3099 params->dedicated = dedicated;
3100
3101 return new NSGigE(params);
3102 }
3103
3104 REGISTER_SIM_OBJECT("NSGigE", NSGigE)