Added ev5.hh to files which should include it directly, now that it isn't included...
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "arch/alpha/ev5.hh"
38 #include "base/inet.hh"
39 #include "cpu/exec_context.hh"
40 #include "dev/etherlink.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/pciconfigall.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional/memory_control.hh"
48 #include "mem/functional/physical.hh"
49 #include "sim/builder.hh"
50 #include "sim/debug.hh"
51 #include "sim/host.hh"
52 #include "sim/stats.hh"
53 #include "arch/vtophys.hh"
54
55 const char *NsRxStateStrings[] =
56 {
57 "rxIdle",
58 "rxDescRefr",
59 "rxDescRead",
60 "rxFifoBlock",
61 "rxFragWrite",
62 "rxDescWrite",
63 "rxAdvance"
64 };
65
66 const char *NsTxStateStrings[] =
67 {
68 "txIdle",
69 "txDescRefr",
70 "txDescRead",
71 "txFifoBlock",
72 "txFragRead",
73 "txDescWrite",
74 "txAdvance"
75 };
76
77 const char *NsDmaState[] =
78 {
79 "dmaIdle",
80 "dmaReading",
81 "dmaWriting",
82 "dmaReadWaiting",
83 "dmaWriteWaiting"
84 };
85
86 using namespace std;
87 using namespace Net;
88 using namespace TheISA;
89
90 ///////////////////////////////////////////////////////////////////////
91 //
92 // NSGigE PCI Device
93 //
94 NSGigE::NSGigE(Params *p)
95 : PciDev(p), ioEnable(false),
96 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
97 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
98 txXferLen(0), rxXferLen(0), clock(p->clock),
99 txState(txIdle), txEnable(false), CTDD(false),
100 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
101 rxEnable(false), CRDD(false), rxPktBytes(0),
102 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
103 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
104 txDmaReadEvent(this), txDmaWriteEvent(this),
105 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
106 txDelay(p->tx_delay), rxDelay(p->rx_delay),
107 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
108 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
109 acceptMulticast(false), acceptUnicast(false),
110 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
111 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
112 intrEvent(0), interface(0)
113 {
114 if (p->pio_bus) {
115 pioInterface = newPioInterface(name() + ".pio", p->hier,
116 p->pio_bus, this,
117 &NSGigE::cacheAccess);
118 pioLatency = p->pio_latency * p->pio_bus->clockRate;
119 }
120
121 if (p->header_bus) {
122 if (p->payload_bus)
123 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
124 p->header_bus,
125 p->payload_bus, 1,
126 p->dma_no_allocate);
127 else
128 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
129 p->header_bus,
130 p->header_bus, 1,
131 p->dma_no_allocate);
132 } else if (p->payload_bus)
133 panic("Must define a header bus if defining a payload bus");
134
135 intrDelay = p->intr_delay;
136 dmaReadDelay = p->dma_read_delay;
137 dmaWriteDelay = p->dma_write_delay;
138 dmaReadFactor = p->dma_read_factor;
139 dmaWriteFactor = p->dma_write_factor;
140
141 regsReset();
142 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
143
144 memset(&rxDesc32, 0, sizeof(rxDesc32));
145 memset(&txDesc32, 0, sizeof(txDesc32));
146 memset(&rxDesc64, 0, sizeof(rxDesc64));
147 memset(&txDesc64, 0, sizeof(txDesc64));
148 }
149
150 NSGigE::~NSGigE()
151 {}
152
153 void
154 NSGigE::regStats()
155 {
156 txBytes
157 .name(name() + ".txBytes")
158 .desc("Bytes Transmitted")
159 .prereq(txBytes)
160 ;
161
162 rxBytes
163 .name(name() + ".rxBytes")
164 .desc("Bytes Received")
165 .prereq(rxBytes)
166 ;
167
168 txPackets
169 .name(name() + ".txPackets")
170 .desc("Number of Packets Transmitted")
171 .prereq(txBytes)
172 ;
173
174 rxPackets
175 .name(name() + ".rxPackets")
176 .desc("Number of Packets Received")
177 .prereq(rxBytes)
178 ;
179
180 txIpChecksums
181 .name(name() + ".txIpChecksums")
182 .desc("Number of tx IP Checksums done by device")
183 .precision(0)
184 .prereq(txBytes)
185 ;
186
187 rxIpChecksums
188 .name(name() + ".rxIpChecksums")
189 .desc("Number of rx IP Checksums done by device")
190 .precision(0)
191 .prereq(rxBytes)
192 ;
193
194 txTcpChecksums
195 .name(name() + ".txTcpChecksums")
196 .desc("Number of tx TCP Checksums done by device")
197 .precision(0)
198 .prereq(txBytes)
199 ;
200
201 rxTcpChecksums
202 .name(name() + ".rxTcpChecksums")
203 .desc("Number of rx TCP Checksums done by device")
204 .precision(0)
205 .prereq(rxBytes)
206 ;
207
208 txUdpChecksums
209 .name(name() + ".txUdpChecksums")
210 .desc("Number of tx UDP Checksums done by device")
211 .precision(0)
212 .prereq(txBytes)
213 ;
214
215 rxUdpChecksums
216 .name(name() + ".rxUdpChecksums")
217 .desc("Number of rx UDP Checksums done by device")
218 .precision(0)
219 .prereq(rxBytes)
220 ;
221
222 descDmaReads
223 .name(name() + ".descDMAReads")
224 .desc("Number of descriptors the device read w/ DMA")
225 .precision(0)
226 ;
227
228 descDmaWrites
229 .name(name() + ".descDMAWrites")
230 .desc("Number of descriptors the device wrote w/ DMA")
231 .precision(0)
232 ;
233
234 descDmaRdBytes
235 .name(name() + ".descDmaReadBytes")
236 .desc("number of descriptor bytes read w/ DMA")
237 .precision(0)
238 ;
239
240 descDmaWrBytes
241 .name(name() + ".descDmaWriteBytes")
242 .desc("number of descriptor bytes write w/ DMA")
243 .precision(0)
244 ;
245
246 txBandwidth
247 .name(name() + ".txBandwidth")
248 .desc("Transmit Bandwidth (bits/s)")
249 .precision(0)
250 .prereq(txBytes)
251 ;
252
253 rxBandwidth
254 .name(name() + ".rxBandwidth")
255 .desc("Receive Bandwidth (bits/s)")
256 .precision(0)
257 .prereq(rxBytes)
258 ;
259
260 totBandwidth
261 .name(name() + ".totBandwidth")
262 .desc("Total Bandwidth (bits/s)")
263 .precision(0)
264 .prereq(totBytes)
265 ;
266
267 totPackets
268 .name(name() + ".totPackets")
269 .desc("Total Packets")
270 .precision(0)
271 .prereq(totBytes)
272 ;
273
274 totBytes
275 .name(name() + ".totBytes")
276 .desc("Total Bytes")
277 .precision(0)
278 .prereq(totBytes)
279 ;
280
281 totPacketRate
282 .name(name() + ".totPPS")
283 .desc("Total Tranmission Rate (packets/s)")
284 .precision(0)
285 .prereq(totBytes)
286 ;
287
288 txPacketRate
289 .name(name() + ".txPPS")
290 .desc("Packet Tranmission Rate (packets/s)")
291 .precision(0)
292 .prereq(txBytes)
293 ;
294
295 rxPacketRate
296 .name(name() + ".rxPPS")
297 .desc("Packet Reception Rate (packets/s)")
298 .precision(0)
299 .prereq(rxBytes)
300 ;
301
302 postedSwi
303 .name(name() + ".postedSwi")
304 .desc("number of software interrupts posted to CPU")
305 .precision(0)
306 ;
307
308 totalSwi
309 .name(name() + ".totalSwi")
310 .desc("total number of Swi written to ISR")
311 .precision(0)
312 ;
313
314 coalescedSwi
315 .name(name() + ".coalescedSwi")
316 .desc("average number of Swi's coalesced into each post")
317 .precision(0)
318 ;
319
320 postedRxIdle
321 .name(name() + ".postedRxIdle")
322 .desc("number of rxIdle interrupts posted to CPU")
323 .precision(0)
324 ;
325
326 totalRxIdle
327 .name(name() + ".totalRxIdle")
328 .desc("total number of RxIdle written to ISR")
329 .precision(0)
330 ;
331
332 coalescedRxIdle
333 .name(name() + ".coalescedRxIdle")
334 .desc("average number of RxIdle's coalesced into each post")
335 .precision(0)
336 ;
337
338 postedRxOk
339 .name(name() + ".postedRxOk")
340 .desc("number of RxOk interrupts posted to CPU")
341 .precision(0)
342 ;
343
344 totalRxOk
345 .name(name() + ".totalRxOk")
346 .desc("total number of RxOk written to ISR")
347 .precision(0)
348 ;
349
350 coalescedRxOk
351 .name(name() + ".coalescedRxOk")
352 .desc("average number of RxOk's coalesced into each post")
353 .precision(0)
354 ;
355
356 postedRxDesc
357 .name(name() + ".postedRxDesc")
358 .desc("number of RxDesc interrupts posted to CPU")
359 .precision(0)
360 ;
361
362 totalRxDesc
363 .name(name() + ".totalRxDesc")
364 .desc("total number of RxDesc written to ISR")
365 .precision(0)
366 ;
367
368 coalescedRxDesc
369 .name(name() + ".coalescedRxDesc")
370 .desc("average number of RxDesc's coalesced into each post")
371 .precision(0)
372 ;
373
374 postedTxOk
375 .name(name() + ".postedTxOk")
376 .desc("number of TxOk interrupts posted to CPU")
377 .precision(0)
378 ;
379
380 totalTxOk
381 .name(name() + ".totalTxOk")
382 .desc("total number of TxOk written to ISR")
383 .precision(0)
384 ;
385
386 coalescedTxOk
387 .name(name() + ".coalescedTxOk")
388 .desc("average number of TxOk's coalesced into each post")
389 .precision(0)
390 ;
391
392 postedTxIdle
393 .name(name() + ".postedTxIdle")
394 .desc("number of TxIdle interrupts posted to CPU")
395 .precision(0)
396 ;
397
398 totalTxIdle
399 .name(name() + ".totalTxIdle")
400 .desc("total number of TxIdle written to ISR")
401 .precision(0)
402 ;
403
404 coalescedTxIdle
405 .name(name() + ".coalescedTxIdle")
406 .desc("average number of TxIdle's coalesced into each post")
407 .precision(0)
408 ;
409
410 postedTxDesc
411 .name(name() + ".postedTxDesc")
412 .desc("number of TxDesc interrupts posted to CPU")
413 .precision(0)
414 ;
415
416 totalTxDesc
417 .name(name() + ".totalTxDesc")
418 .desc("total number of TxDesc written to ISR")
419 .precision(0)
420 ;
421
422 coalescedTxDesc
423 .name(name() + ".coalescedTxDesc")
424 .desc("average number of TxDesc's coalesced into each post")
425 .precision(0)
426 ;
427
428 postedRxOrn
429 .name(name() + ".postedRxOrn")
430 .desc("number of RxOrn posted to CPU")
431 .precision(0)
432 ;
433
434 totalRxOrn
435 .name(name() + ".totalRxOrn")
436 .desc("total number of RxOrn written to ISR")
437 .precision(0)
438 ;
439
440 coalescedRxOrn
441 .name(name() + ".coalescedRxOrn")
442 .desc("average number of RxOrn's coalesced into each post")
443 .precision(0)
444 ;
445
446 coalescedTotal
447 .name(name() + ".coalescedTotal")
448 .desc("average number of interrupts coalesced into each post")
449 .precision(0)
450 ;
451
452 postedInterrupts
453 .name(name() + ".postedInterrupts")
454 .desc("number of posts to CPU")
455 .precision(0)
456 ;
457
458 droppedPackets
459 .name(name() + ".droppedPackets")
460 .desc("number of packets dropped")
461 .precision(0)
462 ;
463
464 coalescedSwi = totalSwi / postedInterrupts;
465 coalescedRxIdle = totalRxIdle / postedInterrupts;
466 coalescedRxOk = totalRxOk / postedInterrupts;
467 coalescedRxDesc = totalRxDesc / postedInterrupts;
468 coalescedTxOk = totalTxOk / postedInterrupts;
469 coalescedTxIdle = totalTxIdle / postedInterrupts;
470 coalescedTxDesc = totalTxDesc / postedInterrupts;
471 coalescedRxOrn = totalRxOrn / postedInterrupts;
472
473 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
474 totalTxOk + totalTxIdle + totalTxDesc +
475 totalRxOrn) / postedInterrupts;
476
477 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
478 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
479 totBandwidth = txBandwidth + rxBandwidth;
480 totBytes = txBytes + rxBytes;
481 totPackets = txPackets + rxPackets;
482
483 txPacketRate = txPackets / simSeconds;
484 rxPacketRate = rxPackets / simSeconds;
485 }
486
487 /**
488 * This is to read the PCI general configuration registers
489 */
490 void
491 NSGigE::readConfig(int offset, int size, uint8_t *data)
492 {
493 if (offset < PCI_DEVICE_SPECIFIC)
494 PciDev::readConfig(offset, size, data);
495 else
496 panic("Device specific PCI config space not implemented!\n");
497 }
498
499 /**
500 * This is to write to the PCI general configuration registers
501 */
502 void
503 NSGigE::writeConfig(int offset, int size, const uint8_t* data)
504 {
505 if (offset < PCI_DEVICE_SPECIFIC)
506 PciDev::writeConfig(offset, size, data);
507 else
508 panic("Device specific PCI config space not implemented!\n");
509
510 // Need to catch writes to BARs to update the PIO interface
511 switch (offset) {
512 // seems to work fine without all these PCI settings, but i
513 // put in the IO to double check, an assertion will fail if we
514 // need to properly implement it
515 case PCI_COMMAND:
516 if (config.data[offset] & PCI_CMD_IOSE)
517 ioEnable = true;
518 else
519 ioEnable = false;
520
521 #if 0
522 if (config.data[offset] & PCI_CMD_BME) {
523 bmEnabled = true;
524 }
525 else {
526 bmEnabled = false;
527 }
528
529 if (config.data[offset] & PCI_CMD_MSE) {
530 memEnable = true;
531 }
532 else {
533 memEnable = false;
534 }
535 #endif
536 break;
537
538 case PCI0_BASE_ADDR0:
539 if (BARAddrs[0] != 0) {
540 if (pioInterface)
541 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
542
543 BARAddrs[0] &= EV5::PAddrUncachedMask;
544 }
545 break;
546 case PCI0_BASE_ADDR1:
547 if (BARAddrs[1] != 0) {
548 if (pioInterface)
549 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
550
551 BARAddrs[1] &= EV5::PAddrUncachedMask;
552 }
553 break;
554 }
555 }
556
557 /**
558 * This reads the device registers, which are detailed in the NS83820
559 * spec sheet
560 */
561 Fault
562 NSGigE::read(MemReqPtr &req, uint8_t *data)
563 {
564 assert(ioEnable);
565
566 //The mask is to give you only the offset into the device register file
567 Addr daddr = req->paddr & 0xfff;
568 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
569 daddr, req->paddr, req->vaddr, req->size);
570
571
572 // there are some reserved registers, you can see ns_gige_reg.h and
573 // the spec sheet for details
574 if (daddr > LAST && daddr <= RESERVED) {
575 panic("Accessing reserved register");
576 } else if (daddr > RESERVED && daddr <= 0x3FC) {
577 readConfig(daddr & 0xff, req->size, data);
578 return NoFault;
579 } else if (daddr >= MIB_START && daddr <= MIB_END) {
580 // don't implement all the MIB's. hopefully the kernel
581 // doesn't actually DEPEND upon their values
582 // MIB are just hardware stats keepers
583 uint32_t &reg = *(uint32_t *) data;
584 reg = 0;
585 return NoFault;
586 } else if (daddr > 0x3FC)
587 panic("Something is messed up!\n");
588
589 switch (req->size) {
590 case sizeof(uint32_t):
591 {
592 uint32_t &reg = *(uint32_t *)data;
593 uint16_t rfaddr;
594
595 switch (daddr) {
596 case CR:
597 reg = regs.command;
598 //these are supposed to be cleared on a read
599 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
600 break;
601
602 case CFGR:
603 reg = regs.config;
604 break;
605
606 case MEAR:
607 reg = regs.mear;
608 break;
609
610 case PTSCR:
611 reg = regs.ptscr;
612 break;
613
614 case ISR:
615 reg = regs.isr;
616 devIntrClear(ISR_ALL);
617 break;
618
619 case IMR:
620 reg = regs.imr;
621 break;
622
623 case IER:
624 reg = regs.ier;
625 break;
626
627 case IHR:
628 reg = regs.ihr;
629 break;
630
631 case TXDP:
632 reg = regs.txdp;
633 break;
634
635 case TXDP_HI:
636 reg = regs.txdp_hi;
637 break;
638
639 case TX_CFG:
640 reg = regs.txcfg;
641 break;
642
643 case GPIOR:
644 reg = regs.gpior;
645 break;
646
647 case RXDP:
648 reg = regs.rxdp;
649 break;
650
651 case RXDP_HI:
652 reg = regs.rxdp_hi;
653 break;
654
655 case RX_CFG:
656 reg = regs.rxcfg;
657 break;
658
659 case PQCR:
660 reg = regs.pqcr;
661 break;
662
663 case WCSR:
664 reg = regs.wcsr;
665 break;
666
667 case PCR:
668 reg = regs.pcr;
669 break;
670
671 // see the spec sheet for how RFCR and RFDR work
672 // basically, you write to RFCR to tell the machine
673 // what you want to do next, then you act upon RFDR,
674 // and the device will be prepared b/c of what you
675 // wrote to RFCR
676 case RFCR:
677 reg = regs.rfcr;
678 break;
679
680 case RFDR:
681 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
682 switch (rfaddr) {
683 // Read from perfect match ROM octets
684 case 0x000:
685 reg = rom.perfectMatch[1];
686 reg = reg << 8;
687 reg += rom.perfectMatch[0];
688 break;
689 case 0x002:
690 reg = rom.perfectMatch[3] << 8;
691 reg += rom.perfectMatch[2];
692 break;
693 case 0x004:
694 reg = rom.perfectMatch[5] << 8;
695 reg += rom.perfectMatch[4];
696 break;
697 default:
698 // Read filter hash table
699 if (rfaddr >= FHASH_ADDR &&
700 rfaddr < FHASH_ADDR + FHASH_SIZE) {
701
702 // Only word-aligned reads supported
703 if (rfaddr % 2)
704 panic("unaligned read from filter hash table!");
705
706 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
707 reg += rom.filterHash[rfaddr - FHASH_ADDR];
708 break;
709 }
710
711 panic("reading RFDR for something other than pattern"
712 " matching or hashing! %#x\n", rfaddr);
713 }
714 break;
715
716 case SRR:
717 reg = regs.srr;
718 break;
719
720 case MIBC:
721 reg = regs.mibc;
722 reg &= ~(MIBC_MIBS | MIBC_ACLR);
723 break;
724
725 case VRCR:
726 reg = regs.vrcr;
727 break;
728
729 case VTCR:
730 reg = regs.vtcr;
731 break;
732
733 case VDR:
734 reg = regs.vdr;
735 break;
736
737 case CCSR:
738 reg = regs.ccsr;
739 break;
740
741 case TBICR:
742 reg = regs.tbicr;
743 break;
744
745 case TBISR:
746 reg = regs.tbisr;
747 break;
748
749 case TANAR:
750 reg = regs.tanar;
751 break;
752
753 case TANLPAR:
754 reg = regs.tanlpar;
755 break;
756
757 case TANER:
758 reg = regs.taner;
759 break;
760
761 case TESR:
762 reg = regs.tesr;
763 break;
764
765 case M5REG:
766 reg = 0;
767 if (params()->rx_thread)
768 reg |= M5REG_RX_THREAD;
769 if (params()->tx_thread)
770 reg |= M5REG_TX_THREAD;
771 if (params()->rss)
772 reg |= M5REG_RSS;
773 break;
774
775 default:
776 panic("reading unimplemented register: addr=%#x", daddr);
777 }
778
779 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
780 daddr, reg, reg);
781 }
782 break;
783
784 default:
785 panic("accessing register with invalid size: addr=%#x, size=%d",
786 daddr, req->size);
787 }
788
789 return NoFault;
790 }
791
792 Fault
793 NSGigE::write(MemReqPtr &req, const uint8_t *data)
794 {
795 assert(ioEnable);
796
797 Addr daddr = req->paddr & 0xfff;
798 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
799 daddr, req->paddr, req->vaddr, req->size);
800
801 if (daddr > LAST && daddr <= RESERVED) {
802 panic("Accessing reserved register");
803 } else if (daddr > RESERVED && daddr <= 0x3FC) {
804 writeConfig(daddr & 0xff, req->size, data);
805 return NoFault;
806 } else if (daddr > 0x3FC)
807 panic("Something is messed up!\n");
808
809 if (req->size == sizeof(uint32_t)) {
810 uint32_t reg = *(uint32_t *)data;
811 uint16_t rfaddr;
812
813 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
814
815 switch (daddr) {
816 case CR:
817 regs.command = reg;
818 if (reg & CR_TXD) {
819 txEnable = false;
820 } else if (reg & CR_TXE) {
821 txEnable = true;
822
823 // the kernel is enabling the transmit machine
824 if (txState == txIdle)
825 txKick();
826 }
827
828 if (reg & CR_RXD) {
829 rxEnable = false;
830 } else if (reg & CR_RXE) {
831 rxEnable = true;
832
833 if (rxState == rxIdle)
834 rxKick();
835 }
836
837 if (reg & CR_TXR)
838 txReset();
839
840 if (reg & CR_RXR)
841 rxReset();
842
843 if (reg & CR_SWI)
844 devIntrPost(ISR_SWI);
845
846 if (reg & CR_RST) {
847 txReset();
848 rxReset();
849
850 regsReset();
851 }
852 break;
853
854 case CFGR:
855 if (reg & CFGR_LNKSTS ||
856 reg & CFGR_SPDSTS ||
857 reg & CFGR_DUPSTS ||
858 reg & CFGR_RESERVED ||
859 reg & CFGR_T64ADDR ||
860 reg & CFGR_PCI64_DET)
861
862 // First clear all writable bits
863 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
864 CFGR_RESERVED | CFGR_T64ADDR |
865 CFGR_PCI64_DET;
866 // Now set the appropriate writable bits
867 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
868 CFGR_RESERVED | CFGR_T64ADDR |
869 CFGR_PCI64_DET);
870
871 // all these #if 0's are because i don't THINK the kernel needs to
872 // have these implemented. if there is a problem relating to one of
873 // these, you may need to add functionality in.
874 if (reg & CFGR_TBI_EN) ;
875 if (reg & CFGR_MODE_1000) ;
876
877 if (reg & CFGR_AUTO_1000)
878 panic("CFGR_AUTO_1000 not implemented!\n");
879
880 if (reg & CFGR_PINT_DUPSTS ||
881 reg & CFGR_PINT_LNKSTS ||
882 reg & CFGR_PINT_SPDSTS)
883 ;
884
885 if (reg & CFGR_TMRTEST) ;
886 if (reg & CFGR_MRM_DIS) ;
887 if (reg & CFGR_MWI_DIS) ;
888
889 if (reg & CFGR_T64ADDR) ;
890 // panic("CFGR_T64ADDR is read only register!\n");
891
892 if (reg & CFGR_PCI64_DET)
893 panic("CFGR_PCI64_DET is read only register!\n");
894
895 if (reg & CFGR_DATA64_EN) ;
896 if (reg & CFGR_M64ADDR) ;
897 if (reg & CFGR_PHY_RST) ;
898 if (reg & CFGR_PHY_DIS) ;
899
900 if (reg & CFGR_EXTSTS_EN)
901 extstsEnable = true;
902 else
903 extstsEnable = false;
904
905 if (reg & CFGR_REQALG) ;
906 if (reg & CFGR_SB) ;
907 if (reg & CFGR_POW) ;
908 if (reg & CFGR_EXD) ;
909 if (reg & CFGR_PESEL) ;
910 if (reg & CFGR_BROM_DIS) ;
911 if (reg & CFGR_EXT_125) ;
912 if (reg & CFGR_BEM) ;
913 break;
914
915 case MEAR:
916 // Clear writable bits
917 regs.mear &= MEAR_EEDO;
918 // Set appropriate writable bits
919 regs.mear |= reg & ~MEAR_EEDO;
920
921 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
922 // even though it could get it through RFDR
923 if (reg & MEAR_EESEL) {
924 // Rising edge of clock
925 if (reg & MEAR_EECLK && !eepromClk)
926 eepromKick();
927 }
928 else {
929 eepromState = eepromStart;
930 regs.mear &= ~MEAR_EEDI;
931 }
932
933 eepromClk = reg & MEAR_EECLK;
934
935 // since phy is completely faked, MEAR_MD* don't matter
936 if (reg & MEAR_MDIO) ;
937 if (reg & MEAR_MDDIR) ;
938 if (reg & MEAR_MDC) ;
939 break;
940
941 case PTSCR:
942 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
943 // these control BISTs for various parts of chip - we
944 // don't care or do just fake that the BIST is done
945 if (reg & PTSCR_RBIST_EN)
946 regs.ptscr |= PTSCR_RBIST_DONE;
947 if (reg & PTSCR_EEBIST_EN)
948 regs.ptscr &= ~PTSCR_EEBIST_EN;
949 if (reg & PTSCR_EELOAD_EN)
950 regs.ptscr &= ~PTSCR_EELOAD_EN;
951 break;
952
953 case ISR: /* writing to the ISR has no effect */
954 panic("ISR is a read only register!\n");
955
956 case IMR:
957 regs.imr = reg;
958 devIntrChangeMask();
959 break;
960
961 case IER:
962 regs.ier = reg;
963 break;
964
965 case IHR:
966 regs.ihr = reg;
967 /* not going to implement real interrupt holdoff */
968 break;
969
970 case TXDP:
971 regs.txdp = (reg & 0xFFFFFFFC);
972 assert(txState == txIdle);
973 CTDD = false;
974 break;
975
976 case TXDP_HI:
977 regs.txdp_hi = reg;
978 break;
979
980 case TX_CFG:
981 regs.txcfg = reg;
982 #if 0
983 if (reg & TX_CFG_CSI) ;
984 if (reg & TX_CFG_HBI) ;
985 if (reg & TX_CFG_MLB) ;
986 if (reg & TX_CFG_ATP) ;
987 if (reg & TX_CFG_ECRETRY) {
988 /*
989 * this could easily be implemented, but considering
990 * the network is just a fake pipe, wouldn't make
991 * sense to do this
992 */
993 }
994
995 if (reg & TX_CFG_BRST_DIS) ;
996 #endif
997
998 #if 0
999 /* we handle our own DMA, ignore the kernel's exhortations */
1000 if (reg & TX_CFG_MXDMA) ;
1001 #endif
1002
1003 // also, we currently don't care about fill/drain
1004 // thresholds though this may change in the future with
1005 // more realistic networks or a driver which changes it
1006 // according to feedback
1007
1008 break;
1009
1010 case GPIOR:
1011 // Only write writable bits
1012 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1013 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
1014 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1015 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
1016 /* these just control general purpose i/o pins, don't matter */
1017 break;
1018
1019 case RXDP:
1020 regs.rxdp = reg;
1021 CRDD = false;
1022 break;
1023
1024 case RXDP_HI:
1025 regs.rxdp_hi = reg;
1026 break;
1027
1028 case RX_CFG:
1029 regs.rxcfg = reg;
1030 #if 0
1031 if (reg & RX_CFG_AEP) ;
1032 if (reg & RX_CFG_ARP) ;
1033 if (reg & RX_CFG_STRIPCRC) ;
1034 if (reg & RX_CFG_RX_RD) ;
1035 if (reg & RX_CFG_ALP) ;
1036 if (reg & RX_CFG_AIRL) ;
1037
1038 /* we handle our own DMA, ignore what kernel says about it */
1039 if (reg & RX_CFG_MXDMA) ;
1040
1041 //also, we currently don't care about fill/drain thresholds
1042 //though this may change in the future with more realistic
1043 //networks or a driver which changes it according to feedback
1044 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1045 #endif
1046 break;
1047
1048 case PQCR:
1049 /* there is no priority queueing used in the linux 2.6 driver */
1050 regs.pqcr = reg;
1051 break;
1052
1053 case WCSR:
1054 /* not going to implement wake on LAN */
1055 regs.wcsr = reg;
1056 break;
1057
1058 case PCR:
1059 /* not going to implement pause control */
1060 regs.pcr = reg;
1061 break;
1062
1063 case RFCR:
1064 regs.rfcr = reg;
1065
1066 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1067 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1068 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1069 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1070 acceptPerfect = (reg & RFCR_APM) ? true : false;
1071 acceptArp = (reg & RFCR_AARP) ? true : false;
1072 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1073
1074 #if 0
1075 if (reg & RFCR_APAT)
1076 panic("RFCR_APAT not implemented!\n");
1077 #endif
1078 if (reg & RFCR_UHEN)
1079 panic("Unicast hash filtering not used by drivers!\n");
1080
1081 if (reg & RFCR_ULM)
1082 panic("RFCR_ULM not implemented!\n");
1083
1084 break;
1085
1086 case RFDR:
1087 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1088 switch (rfaddr) {
1089 case 0x000:
1090 rom.perfectMatch[0] = (uint8_t)reg;
1091 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1092 break;
1093 case 0x002:
1094 rom.perfectMatch[2] = (uint8_t)reg;
1095 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1096 break;
1097 case 0x004:
1098 rom.perfectMatch[4] = (uint8_t)reg;
1099 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1100 break;
1101 default:
1102
1103 if (rfaddr >= FHASH_ADDR &&
1104 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1105
1106 // Only word-aligned writes supported
1107 if (rfaddr % 2)
1108 panic("unaligned write to filter hash table!");
1109
1110 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1111 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1112 = (uint8_t)(reg >> 8);
1113 break;
1114 }
1115 panic("writing RFDR for something other than pattern matching\
1116 or hashing! %#x\n", rfaddr);
1117 }
1118
1119 case BRAR:
1120 regs.brar = reg;
1121 break;
1122
1123 case BRDR:
1124 panic("the driver never uses BRDR, something is wrong!\n");
1125
1126 case SRR:
1127 panic("SRR is read only register!\n");
1128
1129 case MIBC:
1130 panic("the driver never uses MIBC, something is wrong!\n");
1131
1132 case VRCR:
1133 regs.vrcr = reg;
1134 break;
1135
1136 case VTCR:
1137 regs.vtcr = reg;
1138 break;
1139
1140 case VDR:
1141 panic("the driver never uses VDR, something is wrong!\n");
1142
1143 case CCSR:
1144 /* not going to implement clockrun stuff */
1145 regs.ccsr = reg;
1146 break;
1147
1148 case TBICR:
1149 regs.tbicr = reg;
1150 if (reg & TBICR_MR_LOOPBACK)
1151 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1152
1153 if (reg & TBICR_MR_AN_ENABLE) {
1154 regs.tanlpar = regs.tanar;
1155 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1156 }
1157
1158 #if 0
1159 if (reg & TBICR_MR_RESTART_AN) ;
1160 #endif
1161
1162 break;
1163
1164 case TBISR:
1165 panic("TBISR is read only register!\n");
1166
1167 case TANAR:
1168 // Only write the writable bits
1169 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1170 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1171
1172 // Pause capability unimplemented
1173 #if 0
1174 if (reg & TANAR_PS2) ;
1175 if (reg & TANAR_PS1) ;
1176 #endif
1177
1178 break;
1179
1180 case TANLPAR:
1181 panic("this should only be written to by the fake phy!\n");
1182
1183 case TANER:
1184 panic("TANER is read only register!\n");
1185
1186 case TESR:
1187 regs.tesr = reg;
1188 break;
1189
1190 default:
1191 panic("invalid register access daddr=%#x", daddr);
1192 }
1193 } else {
1194 panic("Invalid Request Size");
1195 }
1196
1197 return NoFault;
1198 }
1199
1200 void
1201 NSGigE::devIntrPost(uint32_t interrupts)
1202 {
1203 if (interrupts & ISR_RESERVE)
1204 panic("Cannot set a reserved interrupt");
1205
1206 if (interrupts & ISR_NOIMPL)
1207 warn("interrupt not implemented %#x\n", interrupts);
1208
1209 interrupts &= ISR_IMPL;
1210 regs.isr |= interrupts;
1211
1212 if (interrupts & regs.imr) {
1213 if (interrupts & ISR_SWI) {
1214 totalSwi++;
1215 }
1216 if (interrupts & ISR_RXIDLE) {
1217 totalRxIdle++;
1218 }
1219 if (interrupts & ISR_RXOK) {
1220 totalRxOk++;
1221 }
1222 if (interrupts & ISR_RXDESC) {
1223 totalRxDesc++;
1224 }
1225 if (interrupts & ISR_TXOK) {
1226 totalTxOk++;
1227 }
1228 if (interrupts & ISR_TXIDLE) {
1229 totalTxIdle++;
1230 }
1231 if (interrupts & ISR_TXDESC) {
1232 totalTxDesc++;
1233 }
1234 if (interrupts & ISR_RXORN) {
1235 totalRxOrn++;
1236 }
1237 }
1238
1239 DPRINTF(EthernetIntr,
1240 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1241 interrupts, regs.isr, regs.imr);
1242
1243 if ((regs.isr & regs.imr)) {
1244 Tick when = curTick;
1245 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1246 when += intrDelay;
1247 cpuIntrPost(when);
1248 }
1249 }
1250
1251 /* writing this interrupt counting stats inside this means that this function
1252 is now limited to being used to clear all interrupts upon the kernel
1253 reading isr and servicing. just telling you in case you were thinking
1254 of expanding use.
1255 */
1256 void
1257 NSGigE::devIntrClear(uint32_t interrupts)
1258 {
1259 if (interrupts & ISR_RESERVE)
1260 panic("Cannot clear a reserved interrupt");
1261
1262 if (regs.isr & regs.imr & ISR_SWI) {
1263 postedSwi++;
1264 }
1265 if (regs.isr & regs.imr & ISR_RXIDLE) {
1266 postedRxIdle++;
1267 }
1268 if (regs.isr & regs.imr & ISR_RXOK) {
1269 postedRxOk++;
1270 }
1271 if (regs.isr & regs.imr & ISR_RXDESC) {
1272 postedRxDesc++;
1273 }
1274 if (regs.isr & regs.imr & ISR_TXOK) {
1275 postedTxOk++;
1276 }
1277 if (regs.isr & regs.imr & ISR_TXIDLE) {
1278 postedTxIdle++;
1279 }
1280 if (regs.isr & regs.imr & ISR_TXDESC) {
1281 postedTxDesc++;
1282 }
1283 if (regs.isr & regs.imr & ISR_RXORN) {
1284 postedRxOrn++;
1285 }
1286
1287 if (regs.isr & regs.imr & ISR_IMPL)
1288 postedInterrupts++;
1289
1290 interrupts &= ~ISR_NOIMPL;
1291 regs.isr &= ~interrupts;
1292
1293 DPRINTF(EthernetIntr,
1294 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1295 interrupts, regs.isr, regs.imr);
1296
1297 if (!(regs.isr & regs.imr))
1298 cpuIntrClear();
1299 }
1300
1301 void
1302 NSGigE::devIntrChangeMask()
1303 {
1304 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1305 regs.isr, regs.imr, regs.isr & regs.imr);
1306
1307 if (regs.isr & regs.imr)
1308 cpuIntrPost(curTick);
1309 else
1310 cpuIntrClear();
1311 }
1312
1313 void
1314 NSGigE::cpuIntrPost(Tick when)
1315 {
1316 // If the interrupt you want to post is later than an interrupt
1317 // already scheduled, just let it post in the coming one and don't
1318 // schedule another.
1319 // HOWEVER, must be sure that the scheduled intrTick is in the
1320 // future (this was formerly the source of a bug)
1321 /**
1322 * @todo this warning should be removed and the intrTick code should
1323 * be fixed.
1324 */
1325 assert(when >= curTick);
1326 assert(intrTick >= curTick || intrTick == 0);
1327 if (when > intrTick && intrTick != 0) {
1328 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1329 intrTick);
1330 return;
1331 }
1332
1333 intrTick = when;
1334 if (intrTick < curTick) {
1335 debug_break();
1336 intrTick = curTick;
1337 }
1338
1339 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1340 intrTick);
1341
1342 if (intrEvent)
1343 intrEvent->squash();
1344 intrEvent = new IntrEvent(this, true);
1345 intrEvent->schedule(intrTick);
1346 }
1347
1348 void
1349 NSGigE::cpuInterrupt()
1350 {
1351 assert(intrTick == curTick);
1352
1353 // Whether or not there's a pending interrupt, we don't care about
1354 // it anymore
1355 intrEvent = 0;
1356 intrTick = 0;
1357
1358 // Don't send an interrupt if there's already one
1359 if (cpuPendingIntr) {
1360 DPRINTF(EthernetIntr,
1361 "would send an interrupt now, but there's already pending\n");
1362 } else {
1363 // Send interrupt
1364 cpuPendingIntr = true;
1365
1366 DPRINTF(EthernetIntr, "posting interrupt\n");
1367 intrPost();
1368 }
1369 }
1370
1371 void
1372 NSGigE::cpuIntrClear()
1373 {
1374 if (!cpuPendingIntr)
1375 return;
1376
1377 if (intrEvent) {
1378 intrEvent->squash();
1379 intrEvent = 0;
1380 }
1381
1382 intrTick = 0;
1383
1384 cpuPendingIntr = false;
1385
1386 DPRINTF(EthernetIntr, "clearing interrupt\n");
1387 intrClear();
1388 }
1389
1390 bool
1391 NSGigE::cpuIntrPending() const
1392 { return cpuPendingIntr; }
1393
1394 void
1395 NSGigE::txReset()
1396 {
1397
1398 DPRINTF(Ethernet, "transmit reset\n");
1399
1400 CTDD = false;
1401 txEnable = false;;
1402 txFragPtr = 0;
1403 assert(txDescCnt == 0);
1404 txFifo.clear();
1405 txState = txIdle;
1406 assert(txDmaState == dmaIdle);
1407 }
1408
1409 void
1410 NSGigE::rxReset()
1411 {
1412 DPRINTF(Ethernet, "receive reset\n");
1413
1414 CRDD = false;
1415 assert(rxPktBytes == 0);
1416 rxEnable = false;
1417 rxFragPtr = 0;
1418 assert(rxDescCnt == 0);
1419 assert(rxDmaState == dmaIdle);
1420 rxFifo.clear();
1421 rxState = rxIdle;
1422 }
1423
1424 void
1425 NSGigE::regsReset()
1426 {
1427 memset(&regs, 0, sizeof(regs));
1428 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1429 regs.mear = 0x12;
1430 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1431 // fill threshold to 32 bytes
1432 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1433 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1434 regs.mibc = MIBC_FRZ;
1435 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1436 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1437 regs.brar = 0xffffffff;
1438
1439 extstsEnable = false;
1440 acceptBroadcast = false;
1441 acceptMulticast = false;
1442 acceptUnicast = false;
1443 acceptPerfect = false;
1444 acceptArp = false;
1445 }
1446
1447 void
1448 NSGigE::rxDmaReadCopy()
1449 {
1450 assert(rxDmaState == dmaReading);
1451
1452 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1453 rxDmaState = dmaIdle;
1454
1455 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1456 rxDmaAddr, rxDmaLen);
1457 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1458 }
1459
1460 bool
1461 NSGigE::doRxDmaRead()
1462 {
1463 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1464 rxDmaState = dmaReading;
1465
1466 if (dmaInterface && !rxDmaFree) {
1467 if (dmaInterface->busy())
1468 rxDmaState = dmaReadWaiting;
1469 else
1470 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1471 &rxDmaReadEvent, true);
1472 return true;
1473 }
1474
1475 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1476 rxDmaReadCopy();
1477 return false;
1478 }
1479
1480 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1481 Tick start = curTick + dmaReadDelay + factor;
1482 rxDmaReadEvent.schedule(start);
1483 return true;
1484 }
1485
1486 void
1487 NSGigE::rxDmaReadDone()
1488 {
1489 assert(rxDmaState == dmaReading);
1490 rxDmaReadCopy();
1491
1492 // If the transmit state machine has a pending DMA, let it go first
1493 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1494 txKick();
1495
1496 rxKick();
1497 }
1498
1499 void
1500 NSGigE::rxDmaWriteCopy()
1501 {
1502 assert(rxDmaState == dmaWriting);
1503
1504 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1505 rxDmaState = dmaIdle;
1506
1507 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1508 rxDmaAddr, rxDmaLen);
1509 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1510 }
1511
1512 bool
1513 NSGigE::doRxDmaWrite()
1514 {
1515 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1516 rxDmaState = dmaWriting;
1517
1518 if (dmaInterface && !rxDmaFree) {
1519 if (dmaInterface->busy())
1520 rxDmaState = dmaWriteWaiting;
1521 else
1522 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1523 &rxDmaWriteEvent, true);
1524 return true;
1525 }
1526
1527 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1528 rxDmaWriteCopy();
1529 return false;
1530 }
1531
1532 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1533 Tick start = curTick + dmaWriteDelay + factor;
1534 rxDmaWriteEvent.schedule(start);
1535 return true;
1536 }
1537
1538 void
1539 NSGigE::rxDmaWriteDone()
1540 {
1541 assert(rxDmaState == dmaWriting);
1542 rxDmaWriteCopy();
1543
1544 // If the transmit state machine has a pending DMA, let it go first
1545 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1546 txKick();
1547
1548 rxKick();
1549 }
1550
1551 void
1552 NSGigE::rxKick()
1553 {
1554 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
1555
1556 DPRINTF(EthernetSM,
1557 "receive kick rxState=%s (rxBuf.size=%d) %d-bit\n",
1558 NsRxStateStrings[rxState], rxFifo.size(), is64bit ? 64 : 32);
1559
1560 Addr link, bufptr;
1561 uint32_t &cmdsts = is64bit ? rxDesc64.cmdsts : rxDesc32.cmdsts;
1562 uint32_t &extsts = is64bit ? rxDesc64.extsts : rxDesc32.extsts;
1563
1564 next:
1565 if (clock) {
1566 if (rxKickTick > curTick) {
1567 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1568 rxKickTick);
1569
1570 goto exit;
1571 }
1572
1573 // Go to the next state machine clock tick.
1574 rxKickTick = curTick + cycles(1);
1575 }
1576
1577 switch(rxDmaState) {
1578 case dmaReadWaiting:
1579 if (doRxDmaRead())
1580 goto exit;
1581 break;
1582 case dmaWriteWaiting:
1583 if (doRxDmaWrite())
1584 goto exit;
1585 break;
1586 default:
1587 break;
1588 }
1589
1590 link = is64bit ? (Addr)rxDesc64.link : (Addr)rxDesc32.link;
1591 bufptr = is64bit ? (Addr)rxDesc64.bufptr : (Addr)rxDesc32.bufptr;
1592
1593 // see state machine from spec for details
1594 // the way this works is, if you finish work on one state and can
1595 // go directly to another, you do that through jumping to the
1596 // label "next". however, if you have intermediate work, like DMA
1597 // so that you can't go to the next state yet, you go to exit and
1598 // exit the loop. however, when the DMA is done it will trigger
1599 // an event and come back to this loop.
1600 switch (rxState) {
1601 case rxIdle:
1602 if (!rxEnable) {
1603 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1604 goto exit;
1605 }
1606
1607 if (CRDD) {
1608 rxState = rxDescRefr;
1609
1610 rxDmaAddr = regs.rxdp & 0x3fffffff;
1611 rxDmaData =
1612 is64bit ? (void *)&rxDesc64.link : (void *)&rxDesc32.link;
1613 rxDmaLen = is64bit ? sizeof(rxDesc64.link) : sizeof(rxDesc32.link);
1614 rxDmaFree = dmaDescFree;
1615
1616 descDmaReads++;
1617 descDmaRdBytes += rxDmaLen;
1618
1619 if (doRxDmaRead())
1620 goto exit;
1621 } else {
1622 rxState = rxDescRead;
1623
1624 rxDmaAddr = regs.rxdp & 0x3fffffff;
1625 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1626 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1627 rxDmaFree = dmaDescFree;
1628
1629 descDmaReads++;
1630 descDmaRdBytes += rxDmaLen;
1631
1632 if (doRxDmaRead())
1633 goto exit;
1634 }
1635 break;
1636
1637 case rxDescRefr:
1638 if (rxDmaState != dmaIdle)
1639 goto exit;
1640
1641 rxState = rxAdvance;
1642 break;
1643
1644 case rxDescRead:
1645 if (rxDmaState != dmaIdle)
1646 goto exit;
1647
1648 DPRINTF(EthernetDesc, "rxDesc: addr=%08x read descriptor\n",
1649 regs.rxdp & 0x3fffffff);
1650 DPRINTF(EthernetDesc,
1651 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1652 link, bufptr, cmdsts, extsts);
1653
1654 if (cmdsts & CMDSTS_OWN) {
1655 devIntrPost(ISR_RXIDLE);
1656 rxState = rxIdle;
1657 goto exit;
1658 } else {
1659 rxState = rxFifoBlock;
1660 rxFragPtr = bufptr;
1661 rxDescCnt = cmdsts & CMDSTS_LEN_MASK;
1662 }
1663 break;
1664
1665 case rxFifoBlock:
1666 if (!rxPacket) {
1667 /**
1668 * @todo in reality, we should be able to start processing
1669 * the packet as it arrives, and not have to wait for the
1670 * full packet ot be in the receive fifo.
1671 */
1672 if (rxFifo.empty())
1673 goto exit;
1674
1675 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1676
1677 // If we don't have a packet, grab a new one from the fifo.
1678 rxPacket = rxFifo.front();
1679 rxPktBytes = rxPacket->length;
1680 rxPacketBufPtr = rxPacket->data;
1681
1682 #if TRACING_ON
1683 if (DTRACE(Ethernet)) {
1684 IpPtr ip(rxPacket);
1685 if (ip) {
1686 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1687 TcpPtr tcp(ip);
1688 if (tcp) {
1689 DPRINTF(Ethernet,
1690 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1691 tcp->sport(), tcp->dport(), tcp->seq(),
1692 tcp->ack());
1693 }
1694 }
1695 }
1696 #endif
1697
1698 // sanity check - i think the driver behaves like this
1699 assert(rxDescCnt >= rxPktBytes);
1700 rxFifo.pop();
1701 }
1702
1703
1704 // dont' need the && rxDescCnt > 0 if driver sanity check
1705 // above holds
1706 if (rxPktBytes > 0) {
1707 rxState = rxFragWrite;
1708 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1709 // check holds
1710 rxXferLen = rxPktBytes;
1711
1712 rxDmaAddr = rxFragPtr & 0x3fffffff;
1713 rxDmaData = rxPacketBufPtr;
1714 rxDmaLen = rxXferLen;
1715 rxDmaFree = dmaDataFree;
1716
1717 if (doRxDmaWrite())
1718 goto exit;
1719
1720 } else {
1721 rxState = rxDescWrite;
1722
1723 //if (rxPktBytes == 0) { /* packet is done */
1724 assert(rxPktBytes == 0);
1725 DPRINTF(EthernetSM, "done with receiving packet\n");
1726
1727 cmdsts |= CMDSTS_OWN;
1728 cmdsts &= ~CMDSTS_MORE;
1729 cmdsts |= CMDSTS_OK;
1730 cmdsts &= 0xffff0000;
1731 cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1732
1733 #if 0
1734 /*
1735 * all the driver uses these are for its own stats keeping
1736 * which we don't care about, aren't necessary for
1737 * functionality and doing this would just slow us down.
1738 * if they end up using this in a later version for
1739 * functional purposes, just undef
1740 */
1741 if (rxFilterEnable) {
1742 cmdsts &= ~CMDSTS_DEST_MASK;
1743 const EthAddr &dst = rxFifoFront()->dst();
1744 if (dst->unicast())
1745 cmdsts |= CMDSTS_DEST_SELF;
1746 if (dst->multicast())
1747 cmdsts |= CMDSTS_DEST_MULTI;
1748 if (dst->broadcast())
1749 cmdsts |= CMDSTS_DEST_MASK;
1750 }
1751 #endif
1752
1753 IpPtr ip(rxPacket);
1754 if (extstsEnable && ip) {
1755 extsts |= EXTSTS_IPPKT;
1756 rxIpChecksums++;
1757 if (cksum(ip) != 0) {
1758 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1759 extsts |= EXTSTS_IPERR;
1760 }
1761 TcpPtr tcp(ip);
1762 UdpPtr udp(ip);
1763 if (tcp) {
1764 extsts |= EXTSTS_TCPPKT;
1765 rxTcpChecksums++;
1766 if (cksum(tcp) != 0) {
1767 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1768 extsts |= EXTSTS_TCPERR;
1769
1770 }
1771 } else if (udp) {
1772 extsts |= EXTSTS_UDPPKT;
1773 rxUdpChecksums++;
1774 if (cksum(udp) != 0) {
1775 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1776 extsts |= EXTSTS_UDPERR;
1777 }
1778 }
1779 }
1780 rxPacket = 0;
1781
1782 /*
1783 * the driver seems to always receive into desc buffers
1784 * of size 1514, so you never have a pkt that is split
1785 * into multiple descriptors on the receive side, so
1786 * i don't implement that case, hence the assert above.
1787 */
1788
1789 DPRINTF(EthernetDesc,
1790 "rxDesc: addr=%08x writeback cmdsts extsts\n",
1791 regs.rxdp & 0x3fffffff);
1792 DPRINTF(EthernetDesc,
1793 "rxDesc: link=%#x bufptr=%#x cmdsts=%08x extsts=%08x\n",
1794 link, bufptr, cmdsts, extsts);
1795
1796 rxDmaAddr = regs.rxdp & 0x3fffffff;
1797 rxDmaData = &cmdsts;
1798 if (is64bit) {
1799 rxDmaAddr += offsetof(ns_desc64, cmdsts);
1800 rxDmaLen = sizeof(rxDesc64.cmdsts) + sizeof(rxDesc64.extsts);
1801 } else {
1802 rxDmaAddr += offsetof(ns_desc32, cmdsts);
1803 rxDmaLen = sizeof(rxDesc32.cmdsts) + sizeof(rxDesc32.extsts);
1804 }
1805 rxDmaFree = dmaDescFree;
1806
1807 descDmaWrites++;
1808 descDmaWrBytes += rxDmaLen;
1809
1810 if (doRxDmaWrite())
1811 goto exit;
1812 }
1813 break;
1814
1815 case rxFragWrite:
1816 if (rxDmaState != dmaIdle)
1817 goto exit;
1818
1819 rxPacketBufPtr += rxXferLen;
1820 rxFragPtr += rxXferLen;
1821 rxPktBytes -= rxXferLen;
1822
1823 rxState = rxFifoBlock;
1824 break;
1825
1826 case rxDescWrite:
1827 if (rxDmaState != dmaIdle)
1828 goto exit;
1829
1830 assert(cmdsts & CMDSTS_OWN);
1831
1832 assert(rxPacket == 0);
1833 devIntrPost(ISR_RXOK);
1834
1835 if (cmdsts & CMDSTS_INTR)
1836 devIntrPost(ISR_RXDESC);
1837
1838 if (!rxEnable) {
1839 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1840 rxState = rxIdle;
1841 goto exit;
1842 } else
1843 rxState = rxAdvance;
1844 break;
1845
1846 case rxAdvance:
1847 if (link == 0) {
1848 devIntrPost(ISR_RXIDLE);
1849 rxState = rxIdle;
1850 CRDD = true;
1851 goto exit;
1852 } else {
1853 if (rxDmaState != dmaIdle)
1854 goto exit;
1855 rxState = rxDescRead;
1856 regs.rxdp = link;
1857 CRDD = false;
1858
1859 rxDmaAddr = regs.rxdp & 0x3fffffff;
1860 rxDmaData = is64bit ? (void *)&rxDesc64 : (void *)&rxDesc32;
1861 rxDmaLen = is64bit ? sizeof(rxDesc64) : sizeof(rxDesc32);
1862 rxDmaFree = dmaDescFree;
1863
1864 if (doRxDmaRead())
1865 goto exit;
1866 }
1867 break;
1868
1869 default:
1870 panic("Invalid rxState!");
1871 }
1872
1873 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1874 NsRxStateStrings[rxState]);
1875 goto next;
1876
1877 exit:
1878 /**
1879 * @todo do we want to schedule a future kick?
1880 */
1881 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1882 NsRxStateStrings[rxState]);
1883
1884 if (clock && !rxKickEvent.scheduled())
1885 rxKickEvent.schedule(rxKickTick);
1886 }
1887
1888 void
1889 NSGigE::transmit()
1890 {
1891 if (txFifo.empty()) {
1892 DPRINTF(Ethernet, "nothing to transmit\n");
1893 return;
1894 }
1895
1896 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1897 txFifo.size());
1898 if (interface->sendPacket(txFifo.front())) {
1899 #if TRACING_ON
1900 if (DTRACE(Ethernet)) {
1901 IpPtr ip(txFifo.front());
1902 if (ip) {
1903 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1904 TcpPtr tcp(ip);
1905 if (tcp) {
1906 DPRINTF(Ethernet,
1907 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1908 tcp->sport(), tcp->dport(), tcp->seq(),
1909 tcp->ack());
1910 }
1911 }
1912 }
1913 #endif
1914
1915 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1916 txBytes += txFifo.front()->length;
1917 txPackets++;
1918
1919 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1920 txFifo.avail());
1921 txFifo.pop();
1922
1923 /*
1924 * normally do a writeback of the descriptor here, and ONLY
1925 * after that is done, send this interrupt. but since our
1926 * stuff never actually fails, just do this interrupt here,
1927 * otherwise the code has to stray from this nice format.
1928 * besides, it's functionally the same.
1929 */
1930 devIntrPost(ISR_TXOK);
1931 }
1932
1933 if (!txFifo.empty() && !txEvent.scheduled()) {
1934 DPRINTF(Ethernet, "reschedule transmit\n");
1935 txEvent.schedule(curTick + retryTime);
1936 }
1937 }
1938
1939 void
1940 NSGigE::txDmaReadCopy()
1941 {
1942 assert(txDmaState == dmaReading);
1943
1944 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1945 txDmaState = dmaIdle;
1946
1947 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1948 txDmaAddr, txDmaLen);
1949 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1950 }
1951
1952 bool
1953 NSGigE::doTxDmaRead()
1954 {
1955 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1956 txDmaState = dmaReading;
1957
1958 if (dmaInterface && !txDmaFree) {
1959 if (dmaInterface->busy())
1960 txDmaState = dmaReadWaiting;
1961 else
1962 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1963 &txDmaReadEvent, true);
1964 return true;
1965 }
1966
1967 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1968 txDmaReadCopy();
1969 return false;
1970 }
1971
1972 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1973 Tick start = curTick + dmaReadDelay + factor;
1974 txDmaReadEvent.schedule(start);
1975 return true;
1976 }
1977
1978 void
1979 NSGigE::txDmaReadDone()
1980 {
1981 assert(txDmaState == dmaReading);
1982 txDmaReadCopy();
1983
1984 // If the receive state machine has a pending DMA, let it go first
1985 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1986 rxKick();
1987
1988 txKick();
1989 }
1990
1991 void
1992 NSGigE::txDmaWriteCopy()
1993 {
1994 assert(txDmaState == dmaWriting);
1995
1996 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1997 txDmaState = dmaIdle;
1998
1999 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
2000 txDmaAddr, txDmaLen);
2001 DDUMP(EthernetDMA, txDmaData, txDmaLen);
2002 }
2003
2004 bool
2005 NSGigE::doTxDmaWrite()
2006 {
2007 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
2008 txDmaState = dmaWriting;
2009
2010 if (dmaInterface && !txDmaFree) {
2011 if (dmaInterface->busy())
2012 txDmaState = dmaWriteWaiting;
2013 else
2014 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
2015 &txDmaWriteEvent, true);
2016 return true;
2017 }
2018
2019 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
2020 txDmaWriteCopy();
2021 return false;
2022 }
2023
2024 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
2025 Tick start = curTick + dmaWriteDelay + factor;
2026 txDmaWriteEvent.schedule(start);
2027 return true;
2028 }
2029
2030 void
2031 NSGigE::txDmaWriteDone()
2032 {
2033 assert(txDmaState == dmaWriting);
2034 txDmaWriteCopy();
2035
2036 // If the receive state machine has a pending DMA, let it go first
2037 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
2038 rxKick();
2039
2040 txKick();
2041 }
2042
2043 void
2044 NSGigE::txKick()
2045 {
2046 bool is64bit = (bool)(regs.config & CFGR_M64ADDR);
2047
2048 DPRINTF(EthernetSM, "transmit kick txState=%s %d-bit\n",
2049 NsTxStateStrings[txState], is64bit ? 64 : 32);
2050
2051 Addr link, bufptr;
2052 uint32_t &cmdsts = is64bit ? txDesc64.cmdsts : txDesc32.cmdsts;
2053 uint32_t &extsts = is64bit ? txDesc64.extsts : txDesc32.extsts;
2054
2055 next:
2056 if (clock) {
2057 if (txKickTick > curTick) {
2058 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
2059 txKickTick);
2060 goto exit;
2061 }
2062
2063 // Go to the next state machine clock tick.
2064 txKickTick = curTick + cycles(1);
2065 }
2066
2067 switch(txDmaState) {
2068 case dmaReadWaiting:
2069 if (doTxDmaRead())
2070 goto exit;
2071 break;
2072 case dmaWriteWaiting:
2073 if (doTxDmaWrite())
2074 goto exit;
2075 break;
2076 default:
2077 break;
2078 }
2079
2080 link = is64bit ? (Addr)txDesc64.link : (Addr)txDesc32.link;
2081 bufptr = is64bit ? (Addr)txDesc64.bufptr : (Addr)txDesc32.bufptr;
2082 switch (txState) {
2083 case txIdle:
2084 if (!txEnable) {
2085 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
2086 goto exit;
2087 }
2088
2089 if (CTDD) {
2090 txState = txDescRefr;
2091
2092 txDmaAddr = regs.txdp & 0x3fffffff;
2093 txDmaData =
2094 is64bit ? (void *)&txDesc64.link : (void *)&txDesc32.link;
2095 txDmaLen = is64bit ? sizeof(txDesc64.link) : sizeof(txDesc32.link);
2096 txDmaFree = dmaDescFree;
2097
2098 descDmaReads++;
2099 descDmaRdBytes += txDmaLen;
2100
2101 if (doTxDmaRead())
2102 goto exit;
2103
2104 } else {
2105 txState = txDescRead;
2106
2107 txDmaAddr = regs.txdp & 0x3fffffff;
2108 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2109 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2110 txDmaFree = dmaDescFree;
2111
2112 descDmaReads++;
2113 descDmaRdBytes += txDmaLen;
2114
2115 if (doTxDmaRead())
2116 goto exit;
2117 }
2118 break;
2119
2120 case txDescRefr:
2121 if (txDmaState != dmaIdle)
2122 goto exit;
2123
2124 txState = txAdvance;
2125 break;
2126
2127 case txDescRead:
2128 if (txDmaState != dmaIdle)
2129 goto exit;
2130
2131 DPRINTF(EthernetDesc, "txDesc: addr=%08x read descriptor\n",
2132 regs.txdp & 0x3fffffff);
2133 DPRINTF(EthernetDesc,
2134 "txDesc: link=%#x bufptr=%#x cmdsts=%#08x extsts=%#08x\n",
2135 link, bufptr, cmdsts, extsts);
2136
2137 if (cmdsts & CMDSTS_OWN) {
2138 txState = txFifoBlock;
2139 txFragPtr = bufptr;
2140 txDescCnt = cmdsts & CMDSTS_LEN_MASK;
2141 } else {
2142 devIntrPost(ISR_TXIDLE);
2143 txState = txIdle;
2144 goto exit;
2145 }
2146 break;
2147
2148 case txFifoBlock:
2149 if (!txPacket) {
2150 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2151 txPacket = new PacketData(16384);
2152 txPacketBufPtr = txPacket->data;
2153 }
2154
2155 if (txDescCnt == 0) {
2156 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2157 if (cmdsts & CMDSTS_MORE) {
2158 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2159 txState = txDescWrite;
2160
2161 cmdsts &= ~CMDSTS_OWN;
2162
2163 txDmaAddr = regs.txdp & 0x3fffffff;
2164 txDmaData = &cmdsts;
2165 if (is64bit) {
2166 txDmaAddr += offsetof(ns_desc64, cmdsts);
2167 txDmaLen = sizeof(txDesc64.cmdsts);
2168 } else {
2169 txDmaAddr += offsetof(ns_desc32, cmdsts);
2170 txDmaLen = sizeof(txDesc32.cmdsts);
2171 }
2172 txDmaFree = dmaDescFree;
2173
2174 if (doTxDmaWrite())
2175 goto exit;
2176
2177 } else { /* this packet is totally done */
2178 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2179 /* deal with the the packet that just finished */
2180 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2181 IpPtr ip(txPacket);
2182 if (extsts & EXTSTS_UDPPKT) {
2183 UdpPtr udp(ip);
2184 udp->sum(0);
2185 udp->sum(cksum(udp));
2186 txUdpChecksums++;
2187 } else if (extsts & EXTSTS_TCPPKT) {
2188 TcpPtr tcp(ip);
2189 tcp->sum(0);
2190 tcp->sum(cksum(tcp));
2191 txTcpChecksums++;
2192 }
2193 if (extsts & EXTSTS_IPPKT) {
2194 ip->sum(0);
2195 ip->sum(cksum(ip));
2196 txIpChecksums++;
2197 }
2198 }
2199
2200 txPacket->length = txPacketBufPtr - txPacket->data;
2201 // this is just because the receive can't handle a
2202 // packet bigger want to make sure
2203 if (txPacket->length > 1514)
2204 panic("transmit packet too large, %s > 1514\n",
2205 txPacket->length);
2206
2207 #ifndef NDEBUG
2208 bool success =
2209 #endif
2210 txFifo.push(txPacket);
2211 assert(success);
2212
2213 /*
2214 * this following section is not tqo spec, but
2215 * functionally shouldn't be any different. normally,
2216 * the chip will wait til the transmit has occurred
2217 * before writing back the descriptor because it has
2218 * to wait to see that it was successfully transmitted
2219 * to decide whether to set CMDSTS_OK or not.
2220 * however, in the simulator since it is always
2221 * successfully transmitted, and writing it exactly to
2222 * spec would complicate the code, we just do it here
2223 */
2224
2225 cmdsts &= ~CMDSTS_OWN;
2226 cmdsts |= CMDSTS_OK;
2227
2228 DPRINTF(EthernetDesc,
2229 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2230 cmdsts, extsts);
2231
2232 txDmaFree = dmaDescFree;
2233 txDmaAddr = regs.txdp & 0x3fffffff;
2234 txDmaData = &cmdsts;
2235 if (is64bit) {
2236 txDmaAddr += offsetof(ns_desc64, cmdsts);
2237 txDmaLen =
2238 sizeof(txDesc64.cmdsts) + sizeof(txDesc64.extsts);
2239 } else {
2240 txDmaAddr += offsetof(ns_desc32, cmdsts);
2241 txDmaLen =
2242 sizeof(txDesc32.cmdsts) + sizeof(txDesc32.extsts);
2243 }
2244
2245 descDmaWrites++;
2246 descDmaWrBytes += txDmaLen;
2247
2248 transmit();
2249 txPacket = 0;
2250
2251 if (!txEnable) {
2252 DPRINTF(EthernetSM, "halting TX state machine\n");
2253 txState = txIdle;
2254 goto exit;
2255 } else
2256 txState = txAdvance;
2257
2258 if (doTxDmaWrite())
2259 goto exit;
2260 }
2261 } else {
2262 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2263 if (!txFifo.full()) {
2264 txState = txFragRead;
2265
2266 /*
2267 * The number of bytes transferred is either whatever
2268 * is left in the descriptor (txDescCnt), or if there
2269 * is not enough room in the fifo, just whatever room
2270 * is left in the fifo
2271 */
2272 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2273
2274 txDmaAddr = txFragPtr & 0x3fffffff;
2275 txDmaData = txPacketBufPtr;
2276 txDmaLen = txXferLen;
2277 txDmaFree = dmaDataFree;
2278
2279 if (doTxDmaRead())
2280 goto exit;
2281 } else {
2282 txState = txFifoBlock;
2283 transmit();
2284
2285 goto exit;
2286 }
2287
2288 }
2289 break;
2290
2291 case txFragRead:
2292 if (txDmaState != dmaIdle)
2293 goto exit;
2294
2295 txPacketBufPtr += txXferLen;
2296 txFragPtr += txXferLen;
2297 txDescCnt -= txXferLen;
2298 txFifo.reserve(txXferLen);
2299
2300 txState = txFifoBlock;
2301 break;
2302
2303 case txDescWrite:
2304 if (txDmaState != dmaIdle)
2305 goto exit;
2306
2307 if (cmdsts & CMDSTS_INTR)
2308 devIntrPost(ISR_TXDESC);
2309
2310 if (!txEnable) {
2311 DPRINTF(EthernetSM, "halting TX state machine\n");
2312 txState = txIdle;
2313 goto exit;
2314 } else
2315 txState = txAdvance;
2316 break;
2317
2318 case txAdvance:
2319 if (link == 0) {
2320 devIntrPost(ISR_TXIDLE);
2321 txState = txIdle;
2322 goto exit;
2323 } else {
2324 if (txDmaState != dmaIdle)
2325 goto exit;
2326 txState = txDescRead;
2327 regs.txdp = link;
2328 CTDD = false;
2329
2330 txDmaAddr = link & 0x3fffffff;
2331 txDmaData = is64bit ? (void *)&txDesc64 : (void *)&txDesc32;
2332 txDmaLen = is64bit ? sizeof(txDesc64) : sizeof(txDesc32);
2333 txDmaFree = dmaDescFree;
2334
2335 if (doTxDmaRead())
2336 goto exit;
2337 }
2338 break;
2339
2340 default:
2341 panic("invalid state");
2342 }
2343
2344 DPRINTF(EthernetSM, "entering next txState=%s\n",
2345 NsTxStateStrings[txState]);
2346 goto next;
2347
2348 exit:
2349 /**
2350 * @todo do we want to schedule a future kick?
2351 */
2352 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2353 NsTxStateStrings[txState]);
2354
2355 if (clock && !txKickEvent.scheduled())
2356 txKickEvent.schedule(txKickTick);
2357 }
2358
2359 /**
2360 * Advance the EEPROM state machine
2361 * Called on rising edge of EEPROM clock bit in MEAR
2362 */
2363 void
2364 NSGigE::eepromKick()
2365 {
2366 switch (eepromState) {
2367
2368 case eepromStart:
2369
2370 // Wait for start bit
2371 if (regs.mear & MEAR_EEDI) {
2372 // Set up to get 2 opcode bits
2373 eepromState = eepromGetOpcode;
2374 eepromBitsToRx = 2;
2375 eepromOpcode = 0;
2376 }
2377 break;
2378
2379 case eepromGetOpcode:
2380 eepromOpcode <<= 1;
2381 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2382 --eepromBitsToRx;
2383
2384 // Done getting opcode
2385 if (eepromBitsToRx == 0) {
2386 if (eepromOpcode != EEPROM_READ)
2387 panic("only EEPROM reads are implemented!");
2388
2389 // Set up to get address
2390 eepromState = eepromGetAddress;
2391 eepromBitsToRx = 6;
2392 eepromAddress = 0;
2393 }
2394 break;
2395
2396 case eepromGetAddress:
2397 eepromAddress <<= 1;
2398 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2399 --eepromBitsToRx;
2400
2401 // Done getting address
2402 if (eepromBitsToRx == 0) {
2403
2404 if (eepromAddress >= EEPROM_SIZE)
2405 panic("EEPROM read access out of range!");
2406
2407 switch (eepromAddress) {
2408
2409 case EEPROM_PMATCH2_ADDR:
2410 eepromData = rom.perfectMatch[5];
2411 eepromData <<= 8;
2412 eepromData += rom.perfectMatch[4];
2413 break;
2414
2415 case EEPROM_PMATCH1_ADDR:
2416 eepromData = rom.perfectMatch[3];
2417 eepromData <<= 8;
2418 eepromData += rom.perfectMatch[2];
2419 break;
2420
2421 case EEPROM_PMATCH0_ADDR:
2422 eepromData = rom.perfectMatch[1];
2423 eepromData <<= 8;
2424 eepromData += rom.perfectMatch[0];
2425 break;
2426
2427 default:
2428 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2429 }
2430 // Set up to read data
2431 eepromState = eepromRead;
2432 eepromBitsToRx = 16;
2433
2434 // Clear data in bit
2435 regs.mear &= ~MEAR_EEDI;
2436 }
2437 break;
2438
2439 case eepromRead:
2440 // Clear Data Out bit
2441 regs.mear &= ~MEAR_EEDO;
2442 // Set bit to value of current EEPROM bit
2443 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2444
2445 eepromData <<= 1;
2446 --eepromBitsToRx;
2447
2448 // All done
2449 if (eepromBitsToRx == 0) {
2450 eepromState = eepromStart;
2451 }
2452 break;
2453
2454 default:
2455 panic("invalid EEPROM state");
2456 }
2457
2458 }
2459
2460 void
2461 NSGigE::transferDone()
2462 {
2463 if (txFifo.empty()) {
2464 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2465 return;
2466 }
2467
2468 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2469
2470 if (txEvent.scheduled())
2471 txEvent.reschedule(curTick + cycles(1));
2472 else
2473 txEvent.schedule(curTick + cycles(1));
2474 }
2475
2476 bool
2477 NSGigE::rxFilter(const PacketPtr &packet)
2478 {
2479 EthPtr eth = packet;
2480 bool drop = true;
2481 string type;
2482
2483 const EthAddr &dst = eth->dst();
2484 if (dst.unicast()) {
2485 // If we're accepting all unicast addresses
2486 if (acceptUnicast)
2487 drop = false;
2488
2489 // If we make a perfect match
2490 if (acceptPerfect && dst == rom.perfectMatch)
2491 drop = false;
2492
2493 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2494 drop = false;
2495
2496 } else if (dst.broadcast()) {
2497 // if we're accepting broadcasts
2498 if (acceptBroadcast)
2499 drop = false;
2500
2501 } else if (dst.multicast()) {
2502 // if we're accepting all multicasts
2503 if (acceptMulticast)
2504 drop = false;
2505
2506 // Multicast hashing faked - all packets accepted
2507 if (multicastHashEnable)
2508 drop = false;
2509 }
2510
2511 if (drop) {
2512 DPRINTF(Ethernet, "rxFilter drop\n");
2513 DDUMP(EthernetData, packet->data, packet->length);
2514 }
2515
2516 return drop;
2517 }
2518
2519 bool
2520 NSGigE::recvPacket(PacketPtr packet)
2521 {
2522 rxBytes += packet->length;
2523 rxPackets++;
2524
2525 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2526 rxFifo.avail());
2527
2528 if (!rxEnable) {
2529 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2530 return true;
2531 }
2532
2533 if (!rxFilterEnable) {
2534 DPRINTF(Ethernet,
2535 "receive packet filtering disabled . . . packet dropped\n");
2536 return true;
2537 }
2538
2539 if (rxFilter(packet)) {
2540 DPRINTF(Ethernet, "packet filtered...dropped\n");
2541 return true;
2542 }
2543
2544 if (rxFifo.avail() < packet->length) {
2545 #if TRACING_ON
2546 IpPtr ip(packet);
2547 TcpPtr tcp(ip);
2548 if (ip) {
2549 DPRINTF(Ethernet,
2550 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2551 ip->id());
2552 if (tcp) {
2553 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2554 }
2555 }
2556 #endif
2557 droppedPackets++;
2558 devIntrPost(ISR_RXORN);
2559 return false;
2560 }
2561
2562 rxFifo.push(packet);
2563
2564 rxKick();
2565 return true;
2566 }
2567
2568 //=====================================================================
2569 //
2570 //
2571 void
2572 NSGigE::serialize(ostream &os)
2573 {
2574 // Serialize the PciDev base class
2575 PciDev::serialize(os);
2576
2577 /*
2578 * Finalize any DMA events now.
2579 */
2580 if (rxDmaReadEvent.scheduled())
2581 rxDmaReadCopy();
2582 if (rxDmaWriteEvent.scheduled())
2583 rxDmaWriteCopy();
2584 if (txDmaReadEvent.scheduled())
2585 txDmaReadCopy();
2586 if (txDmaWriteEvent.scheduled())
2587 txDmaWriteCopy();
2588
2589 /*
2590 * Serialize the device registers
2591 */
2592 SERIALIZE_SCALAR(regs.command);
2593 SERIALIZE_SCALAR(regs.config);
2594 SERIALIZE_SCALAR(regs.mear);
2595 SERIALIZE_SCALAR(regs.ptscr);
2596 SERIALIZE_SCALAR(regs.isr);
2597 SERIALIZE_SCALAR(regs.imr);
2598 SERIALIZE_SCALAR(regs.ier);
2599 SERIALIZE_SCALAR(regs.ihr);
2600 SERIALIZE_SCALAR(regs.txdp);
2601 SERIALIZE_SCALAR(regs.txdp_hi);
2602 SERIALIZE_SCALAR(regs.txcfg);
2603 SERIALIZE_SCALAR(regs.gpior);
2604 SERIALIZE_SCALAR(regs.rxdp);
2605 SERIALIZE_SCALAR(regs.rxdp_hi);
2606 SERIALIZE_SCALAR(regs.rxcfg);
2607 SERIALIZE_SCALAR(regs.pqcr);
2608 SERIALIZE_SCALAR(regs.wcsr);
2609 SERIALIZE_SCALAR(regs.pcr);
2610 SERIALIZE_SCALAR(regs.rfcr);
2611 SERIALIZE_SCALAR(regs.rfdr);
2612 SERIALIZE_SCALAR(regs.brar);
2613 SERIALIZE_SCALAR(regs.brdr);
2614 SERIALIZE_SCALAR(regs.srr);
2615 SERIALIZE_SCALAR(regs.mibc);
2616 SERIALIZE_SCALAR(regs.vrcr);
2617 SERIALIZE_SCALAR(regs.vtcr);
2618 SERIALIZE_SCALAR(regs.vdr);
2619 SERIALIZE_SCALAR(regs.ccsr);
2620 SERIALIZE_SCALAR(regs.tbicr);
2621 SERIALIZE_SCALAR(regs.tbisr);
2622 SERIALIZE_SCALAR(regs.tanar);
2623 SERIALIZE_SCALAR(regs.tanlpar);
2624 SERIALIZE_SCALAR(regs.taner);
2625 SERIALIZE_SCALAR(regs.tesr);
2626
2627 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2628 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2629
2630 SERIALIZE_SCALAR(ioEnable);
2631
2632 /*
2633 * Serialize the data Fifos
2634 */
2635 rxFifo.serialize("rxFifo", os);
2636 txFifo.serialize("txFifo", os);
2637
2638 /*
2639 * Serialize the various helper variables
2640 */
2641 bool txPacketExists = txPacket;
2642 SERIALIZE_SCALAR(txPacketExists);
2643 if (txPacketExists) {
2644 txPacket->length = txPacketBufPtr - txPacket->data;
2645 txPacket->serialize("txPacket", os);
2646 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2647 SERIALIZE_SCALAR(txPktBufPtr);
2648 }
2649
2650 bool rxPacketExists = rxPacket;
2651 SERIALIZE_SCALAR(rxPacketExists);
2652 if (rxPacketExists) {
2653 rxPacket->serialize("rxPacket", os);
2654 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2655 SERIALIZE_SCALAR(rxPktBufPtr);
2656 }
2657
2658 SERIALIZE_SCALAR(txXferLen);
2659 SERIALIZE_SCALAR(rxXferLen);
2660
2661 /*
2662 * Serialize Cached Descriptors
2663 */
2664 SERIALIZE_SCALAR(rxDesc64.link);
2665 SERIALIZE_SCALAR(rxDesc64.bufptr);
2666 SERIALIZE_SCALAR(rxDesc64.cmdsts);
2667 SERIALIZE_SCALAR(rxDesc64.extsts);
2668 SERIALIZE_SCALAR(txDesc64.link);
2669 SERIALIZE_SCALAR(txDesc64.bufptr);
2670 SERIALIZE_SCALAR(txDesc64.cmdsts);
2671 SERIALIZE_SCALAR(txDesc64.extsts);
2672 SERIALIZE_SCALAR(rxDesc32.link);
2673 SERIALIZE_SCALAR(rxDesc32.bufptr);
2674 SERIALIZE_SCALAR(rxDesc32.cmdsts);
2675 SERIALIZE_SCALAR(rxDesc32.extsts);
2676 SERIALIZE_SCALAR(txDesc32.link);
2677 SERIALIZE_SCALAR(txDesc32.bufptr);
2678 SERIALIZE_SCALAR(txDesc32.cmdsts);
2679 SERIALIZE_SCALAR(txDesc32.extsts);
2680 SERIALIZE_SCALAR(extstsEnable);
2681
2682 /*
2683 * Serialize tx state machine
2684 */
2685 int txState = this->txState;
2686 SERIALIZE_SCALAR(txState);
2687 SERIALIZE_SCALAR(txEnable);
2688 SERIALIZE_SCALAR(CTDD);
2689 SERIALIZE_SCALAR(txFragPtr);
2690 SERIALIZE_SCALAR(txDescCnt);
2691 int txDmaState = this->txDmaState;
2692 SERIALIZE_SCALAR(txDmaState);
2693 SERIALIZE_SCALAR(txKickTick);
2694
2695 /*
2696 * Serialize rx state machine
2697 */
2698 int rxState = this->rxState;
2699 SERIALIZE_SCALAR(rxState);
2700 SERIALIZE_SCALAR(rxEnable);
2701 SERIALIZE_SCALAR(CRDD);
2702 SERIALIZE_SCALAR(rxPktBytes);
2703 SERIALIZE_SCALAR(rxFragPtr);
2704 SERIALIZE_SCALAR(rxDescCnt);
2705 int rxDmaState = this->rxDmaState;
2706 SERIALIZE_SCALAR(rxDmaState);
2707 SERIALIZE_SCALAR(rxKickTick);
2708
2709 /*
2710 * Serialize EEPROM state machine
2711 */
2712 int eepromState = this->eepromState;
2713 SERIALIZE_SCALAR(eepromState);
2714 SERIALIZE_SCALAR(eepromClk);
2715 SERIALIZE_SCALAR(eepromBitsToRx);
2716 SERIALIZE_SCALAR(eepromOpcode);
2717 SERIALIZE_SCALAR(eepromAddress);
2718 SERIALIZE_SCALAR(eepromData);
2719
2720 /*
2721 * If there's a pending transmit, store the time so we can
2722 * reschedule it later
2723 */
2724 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2725 SERIALIZE_SCALAR(transmitTick);
2726
2727 /*
2728 * receive address filter settings
2729 */
2730 SERIALIZE_SCALAR(rxFilterEnable);
2731 SERIALIZE_SCALAR(acceptBroadcast);
2732 SERIALIZE_SCALAR(acceptMulticast);
2733 SERIALIZE_SCALAR(acceptUnicast);
2734 SERIALIZE_SCALAR(acceptPerfect);
2735 SERIALIZE_SCALAR(acceptArp);
2736 SERIALIZE_SCALAR(multicastHashEnable);
2737
2738 /*
2739 * Keep track of pending interrupt status.
2740 */
2741 SERIALIZE_SCALAR(intrTick);
2742 SERIALIZE_SCALAR(cpuPendingIntr);
2743 Tick intrEventTick = 0;
2744 if (intrEvent)
2745 intrEventTick = intrEvent->when();
2746 SERIALIZE_SCALAR(intrEventTick);
2747
2748 }
2749
2750 void
2751 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2752 {
2753 // Unserialize the PciDev base class
2754 PciDev::unserialize(cp, section);
2755
2756 UNSERIALIZE_SCALAR(regs.command);
2757 UNSERIALIZE_SCALAR(regs.config);
2758 UNSERIALIZE_SCALAR(regs.mear);
2759 UNSERIALIZE_SCALAR(regs.ptscr);
2760 UNSERIALIZE_SCALAR(regs.isr);
2761 UNSERIALIZE_SCALAR(regs.imr);
2762 UNSERIALIZE_SCALAR(regs.ier);
2763 UNSERIALIZE_SCALAR(regs.ihr);
2764 UNSERIALIZE_SCALAR(regs.txdp);
2765 UNSERIALIZE_SCALAR(regs.txdp_hi);
2766 UNSERIALIZE_SCALAR(regs.txcfg);
2767 UNSERIALIZE_SCALAR(regs.gpior);
2768 UNSERIALIZE_SCALAR(regs.rxdp);
2769 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2770 UNSERIALIZE_SCALAR(regs.rxcfg);
2771 UNSERIALIZE_SCALAR(regs.pqcr);
2772 UNSERIALIZE_SCALAR(regs.wcsr);
2773 UNSERIALIZE_SCALAR(regs.pcr);
2774 UNSERIALIZE_SCALAR(regs.rfcr);
2775 UNSERIALIZE_SCALAR(regs.rfdr);
2776 UNSERIALIZE_SCALAR(regs.brar);
2777 UNSERIALIZE_SCALAR(regs.brdr);
2778 UNSERIALIZE_SCALAR(regs.srr);
2779 UNSERIALIZE_SCALAR(regs.mibc);
2780 UNSERIALIZE_SCALAR(regs.vrcr);
2781 UNSERIALIZE_SCALAR(regs.vtcr);
2782 UNSERIALIZE_SCALAR(regs.vdr);
2783 UNSERIALIZE_SCALAR(regs.ccsr);
2784 UNSERIALIZE_SCALAR(regs.tbicr);
2785 UNSERIALIZE_SCALAR(regs.tbisr);
2786 UNSERIALIZE_SCALAR(regs.tanar);
2787 UNSERIALIZE_SCALAR(regs.tanlpar);
2788 UNSERIALIZE_SCALAR(regs.taner);
2789 UNSERIALIZE_SCALAR(regs.tesr);
2790
2791 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2792 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2793
2794 UNSERIALIZE_SCALAR(ioEnable);
2795
2796 /*
2797 * unserialize the data fifos
2798 */
2799 rxFifo.unserialize("rxFifo", cp, section);
2800 txFifo.unserialize("txFifo", cp, section);
2801
2802 /*
2803 * unserialize the various helper variables
2804 */
2805 bool txPacketExists;
2806 UNSERIALIZE_SCALAR(txPacketExists);
2807 if (txPacketExists) {
2808 txPacket = new PacketData(16384);
2809 txPacket->unserialize("txPacket", cp, section);
2810 uint32_t txPktBufPtr;
2811 UNSERIALIZE_SCALAR(txPktBufPtr);
2812 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2813 } else
2814 txPacket = 0;
2815
2816 bool rxPacketExists;
2817 UNSERIALIZE_SCALAR(rxPacketExists);
2818 rxPacket = 0;
2819 if (rxPacketExists) {
2820 rxPacket = new PacketData(16384);
2821 rxPacket->unserialize("rxPacket", cp, section);
2822 uint32_t rxPktBufPtr;
2823 UNSERIALIZE_SCALAR(rxPktBufPtr);
2824 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2825 } else
2826 rxPacket = 0;
2827
2828 UNSERIALIZE_SCALAR(txXferLen);
2829 UNSERIALIZE_SCALAR(rxXferLen);
2830
2831 /*
2832 * Unserialize Cached Descriptors
2833 */
2834 UNSERIALIZE_SCALAR(rxDesc64.link);
2835 UNSERIALIZE_SCALAR(rxDesc64.bufptr);
2836 UNSERIALIZE_SCALAR(rxDesc64.cmdsts);
2837 UNSERIALIZE_SCALAR(rxDesc64.extsts);
2838 UNSERIALIZE_SCALAR(txDesc64.link);
2839 UNSERIALIZE_SCALAR(txDesc64.bufptr);
2840 UNSERIALIZE_SCALAR(txDesc64.cmdsts);
2841 UNSERIALIZE_SCALAR(txDesc64.extsts);
2842 UNSERIALIZE_SCALAR(rxDesc32.link);
2843 UNSERIALIZE_SCALAR(rxDesc32.bufptr);
2844 UNSERIALIZE_SCALAR(rxDesc32.cmdsts);
2845 UNSERIALIZE_SCALAR(rxDesc32.extsts);
2846 UNSERIALIZE_SCALAR(txDesc32.link);
2847 UNSERIALIZE_SCALAR(txDesc32.bufptr);
2848 UNSERIALIZE_SCALAR(txDesc32.cmdsts);
2849 UNSERIALIZE_SCALAR(txDesc32.extsts);
2850 UNSERIALIZE_SCALAR(extstsEnable);
2851
2852 /*
2853 * unserialize tx state machine
2854 */
2855 int txState;
2856 UNSERIALIZE_SCALAR(txState);
2857 this->txState = (TxState) txState;
2858 UNSERIALIZE_SCALAR(txEnable);
2859 UNSERIALIZE_SCALAR(CTDD);
2860 UNSERIALIZE_SCALAR(txFragPtr);
2861 UNSERIALIZE_SCALAR(txDescCnt);
2862 int txDmaState;
2863 UNSERIALIZE_SCALAR(txDmaState);
2864 this->txDmaState = (DmaState) txDmaState;
2865 UNSERIALIZE_SCALAR(txKickTick);
2866 if (txKickTick)
2867 txKickEvent.schedule(txKickTick);
2868
2869 /*
2870 * unserialize rx state machine
2871 */
2872 int rxState;
2873 UNSERIALIZE_SCALAR(rxState);
2874 this->rxState = (RxState) rxState;
2875 UNSERIALIZE_SCALAR(rxEnable);
2876 UNSERIALIZE_SCALAR(CRDD);
2877 UNSERIALIZE_SCALAR(rxPktBytes);
2878 UNSERIALIZE_SCALAR(rxFragPtr);
2879 UNSERIALIZE_SCALAR(rxDescCnt);
2880 int rxDmaState;
2881 UNSERIALIZE_SCALAR(rxDmaState);
2882 this->rxDmaState = (DmaState) rxDmaState;
2883 UNSERIALIZE_SCALAR(rxKickTick);
2884 if (rxKickTick)
2885 rxKickEvent.schedule(rxKickTick);
2886
2887 /*
2888 * Unserialize EEPROM state machine
2889 */
2890 int eepromState;
2891 UNSERIALIZE_SCALAR(eepromState);
2892 this->eepromState = (EEPROMState) eepromState;
2893 UNSERIALIZE_SCALAR(eepromClk);
2894 UNSERIALIZE_SCALAR(eepromBitsToRx);
2895 UNSERIALIZE_SCALAR(eepromOpcode);
2896 UNSERIALIZE_SCALAR(eepromAddress);
2897 UNSERIALIZE_SCALAR(eepromData);
2898
2899 /*
2900 * If there's a pending transmit, reschedule it now
2901 */
2902 Tick transmitTick;
2903 UNSERIALIZE_SCALAR(transmitTick);
2904 if (transmitTick)
2905 txEvent.schedule(curTick + transmitTick);
2906
2907 /*
2908 * unserialize receive address filter settings
2909 */
2910 UNSERIALIZE_SCALAR(rxFilterEnable);
2911 UNSERIALIZE_SCALAR(acceptBroadcast);
2912 UNSERIALIZE_SCALAR(acceptMulticast);
2913 UNSERIALIZE_SCALAR(acceptUnicast);
2914 UNSERIALIZE_SCALAR(acceptPerfect);
2915 UNSERIALIZE_SCALAR(acceptArp);
2916 UNSERIALIZE_SCALAR(multicastHashEnable);
2917
2918 /*
2919 * Keep track of pending interrupt status.
2920 */
2921 UNSERIALIZE_SCALAR(intrTick);
2922 UNSERIALIZE_SCALAR(cpuPendingIntr);
2923 Tick intrEventTick;
2924 UNSERIALIZE_SCALAR(intrEventTick);
2925 if (intrEventTick) {
2926 intrEvent = new IntrEvent(this, true);
2927 intrEvent->schedule(intrEventTick);
2928 }
2929
2930 /*
2931 * re-add addrRanges to bus bridges
2932 */
2933 if (pioInterface) {
2934 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2935 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2936 }
2937 }
2938
2939 Tick
2940 NSGigE::cacheAccess(MemReqPtr &req)
2941 {
2942 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2943 req->paddr, req->paddr & 0xfff);
2944
2945 return curTick + pioLatency;
2946 }
2947
2948 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2949
2950 SimObjectParam<EtherInt *> peer;
2951 SimObjectParam<NSGigE *> device;
2952
2953 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2954
2955 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2956
2957 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2958 INIT_PARAM(device, "Ethernet device of this interface")
2959
2960 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2961
2962 CREATE_SIM_OBJECT(NSGigEInt)
2963 {
2964 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2965
2966 EtherInt *p = (EtherInt *)peer;
2967 if (p) {
2968 dev_int->setPeer(p);
2969 p->setPeer(dev_int);
2970 }
2971
2972 return dev_int;
2973 }
2974
2975 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2976
2977
2978 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2979
2980 Param<Tick> clock;
2981
2982 Param<Addr> addr;
2983 SimObjectParam<MemoryController *> mmu;
2984 SimObjectParam<PhysicalMemory *> physmem;
2985 SimObjectParam<PciConfigAll *> configspace;
2986 SimObjectParam<PciConfigData *> configdata;
2987 SimObjectParam<Platform *> platform;
2988 Param<uint32_t> pci_bus;
2989 Param<uint32_t> pci_dev;
2990 Param<uint32_t> pci_func;
2991
2992 SimObjectParam<HierParams *> hier;
2993 SimObjectParam<Bus*> pio_bus;
2994 SimObjectParam<Bus*> dma_bus;
2995 SimObjectParam<Bus*> payload_bus;
2996 Param<bool> dma_desc_free;
2997 Param<bool> dma_data_free;
2998 Param<Tick> dma_read_delay;
2999 Param<Tick> dma_write_delay;
3000 Param<Tick> dma_read_factor;
3001 Param<Tick> dma_write_factor;
3002 Param<bool> dma_no_allocate;
3003 Param<Tick> pio_latency;
3004 Param<Tick> intr_delay;
3005
3006 Param<Tick> rx_delay;
3007 Param<Tick> tx_delay;
3008 Param<uint32_t> rx_fifo_size;
3009 Param<uint32_t> tx_fifo_size;
3010
3011 Param<bool> rx_filter;
3012 Param<string> hardware_address;
3013 Param<bool> rx_thread;
3014 Param<bool> tx_thread;
3015 Param<bool> rss;
3016
3017 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
3018
3019 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
3020
3021 INIT_PARAM(clock, "State machine processor frequency"),
3022
3023 INIT_PARAM(addr, "Device Address"),
3024 INIT_PARAM(mmu, "Memory Controller"),
3025 INIT_PARAM(physmem, "Physical Memory"),
3026 INIT_PARAM(configspace, "PCI Configspace"),
3027 INIT_PARAM(configdata, "PCI Config data"),
3028 INIT_PARAM(platform, "Platform"),
3029 INIT_PARAM(pci_bus, "PCI bus"),
3030 INIT_PARAM(pci_dev, "PCI device number"),
3031 INIT_PARAM(pci_func, "PCI function code"),
3032
3033 INIT_PARAM(hier, "Hierarchy global variables"),
3034 INIT_PARAM(pio_bus, ""),
3035 INIT_PARAM(dma_bus, ""),
3036 INIT_PARAM(payload_bus, "The IO Bus to attach to for payload"),
3037 INIT_PARAM(dma_desc_free, "DMA of Descriptors is free"),
3038 INIT_PARAM(dma_data_free, "DMA of Data is free"),
3039 INIT_PARAM(dma_read_delay, "fixed delay for dma reads"),
3040 INIT_PARAM(dma_write_delay, "fixed delay for dma writes"),
3041 INIT_PARAM(dma_read_factor, "multiplier for dma reads"),
3042 INIT_PARAM(dma_write_factor, "multiplier for dma writes"),
3043 INIT_PARAM(dma_no_allocate, "Should DMA reads allocate cache lines"),
3044 INIT_PARAM(pio_latency, "Programmed IO latency in bus cycles"),
3045 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
3046
3047 INIT_PARAM(rx_delay, "Receive Delay"),
3048 INIT_PARAM(tx_delay, "Transmit Delay"),
3049 INIT_PARAM(rx_fifo_size, "max size in bytes of rxFifo"),
3050 INIT_PARAM(tx_fifo_size, "max size in bytes of txFifo"),
3051
3052 INIT_PARAM(rx_filter, "Enable Receive Filter"),
3053 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
3054 INIT_PARAM(rx_thread, ""),
3055 INIT_PARAM(tx_thread, ""),
3056 INIT_PARAM(rss, "")
3057
3058 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
3059
3060
3061 CREATE_SIM_OBJECT(NSGigE)
3062 {
3063 NSGigE::Params *params = new NSGigE::Params;
3064
3065 params->name = getInstanceName();
3066
3067 params->clock = clock;
3068
3069 params->mmu = mmu;
3070 params->pmem = physmem;
3071 params->configSpace = configspace;
3072 params->configData = configdata;
3073 params->plat = platform;
3074 params->busNum = pci_bus;
3075 params->deviceNum = pci_dev;
3076 params->functionNum = pci_func;
3077
3078 params->hier = hier;
3079 params->pio_bus = pio_bus;
3080 params->header_bus = dma_bus;
3081 params->payload_bus = payload_bus;
3082 params->dma_desc_free = dma_desc_free;
3083 params->dma_data_free = dma_data_free;
3084 params->dma_read_delay = dma_read_delay;
3085 params->dma_write_delay = dma_write_delay;
3086 params->dma_read_factor = dma_read_factor;
3087 params->dma_write_factor = dma_write_factor;
3088 params->dma_no_allocate = dma_no_allocate;
3089 params->pio_latency = pio_latency;
3090 params->intr_delay = intr_delay;
3091
3092 params->rx_delay = rx_delay;
3093 params->tx_delay = tx_delay;
3094 params->rx_fifo_size = rx_fifo_size;
3095 params->tx_fifo_size = tx_fifo_size;
3096
3097 params->rx_filter = rx_filter;
3098 params->eaddr = hardware_address;
3099 params->rx_thread = rx_thread;
3100 params->tx_thread = tx_thread;
3101 params->rss = rss;
3102
3103 return new NSGigE(params);
3104 }
3105
3106 REGISTER_SIM_OBJECT("NSGigE", NSGigE)