Tweak the set of coalesced interrupts
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /** @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/etherlink.hh"
40 #include "dev/ns_gige.hh"
41 #include "dev/pciconfigall.hh"
42 #include "mem/bus/bus.hh"
43 #include "mem/bus/dma_interface.hh"
44 #include "mem/bus/pio_interface.hh"
45 #include "mem/bus/pio_interface_impl.hh"
46 #include "mem/functional/memory_control.hh"
47 #include "mem/functional/physical.hh"
48 #include "sim/builder.hh"
49 #include "sim/debug.hh"
50 #include "sim/host.hh"
51 #include "sim/stats.hh"
52 #include "targetarch/vtophys.hh"
53
54 const char *NsRxStateStrings[] =
55 {
56 "rxIdle",
57 "rxDescRefr",
58 "rxDescRead",
59 "rxFifoBlock",
60 "rxFragWrite",
61 "rxDescWrite",
62 "rxAdvance"
63 };
64
65 const char *NsTxStateStrings[] =
66 {
67 "txIdle",
68 "txDescRefr",
69 "txDescRead",
70 "txFifoBlock",
71 "txFragRead",
72 "txDescWrite",
73 "txAdvance"
74 };
75
76 const char *NsDmaState[] =
77 {
78 "dmaIdle",
79 "dmaReading",
80 "dmaWriting",
81 "dmaReadWaiting",
82 "dmaWriteWaiting"
83 };
84
85 using namespace std;
86 using namespace Net;
87
88 ///////////////////////////////////////////////////////////////////////
89 //
90 // NSGigE PCI Device
91 //
92 NSGigE::NSGigE(Params *p)
93 : PciDev(p), ioEnable(false),
94 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
95 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
96 txXferLen(0), rxXferLen(0), clock(p->clock),
97 txState(txIdle), txEnable(false), CTDD(false),
98 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
99 rxEnable(false), CRDD(false), rxPktBytes(0),
100 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
101 eepromState(eepromStart), rxDmaReadEvent(this), rxDmaWriteEvent(this),
102 txDmaReadEvent(this), txDmaWriteEvent(this),
103 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
104 txDelay(p->tx_delay), rxDelay(p->rx_delay),
105 rxKickTick(0), rxKickEvent(this), txKickTick(0), txKickEvent(this),
106 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
107 acceptMulticast(false), acceptUnicast(false),
108 acceptPerfect(false), acceptArp(false), multicastHashEnable(false),
109 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
110 intrEvent(0), interface(0)
111 {
112 if (p->header_bus) {
113 pioInterface = newPioInterface(name() + ".pio", p->hier,
114 p->header_bus, this,
115 &NSGigE::cacheAccess);
116
117 pioLatency = p->pio_latency * p->header_bus->clockRate;
118
119 if (p->payload_bus)
120 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
121 p->header_bus,
122 p->payload_bus, 1,
123 p->dma_no_allocate);
124 else
125 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
126 p->header_bus,
127 p->header_bus, 1,
128 p->dma_no_allocate);
129 } else if (p->payload_bus) {
130 pioInterface = newPioInterface(name() + ".pio2", p->hier,
131 p->payload_bus, this,
132 &NSGigE::cacheAccess);
133
134 pioLatency = p->pio_latency * p->payload_bus->clockRate;
135
136 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
137 p->payload_bus,
138 p->payload_bus, 1,
139 p->dma_no_allocate);
140 }
141
142
143 intrDelay = p->intr_delay;
144 dmaReadDelay = p->dma_read_delay;
145 dmaWriteDelay = p->dma_write_delay;
146 dmaReadFactor = p->dma_read_factor;
147 dmaWriteFactor = p->dma_write_factor;
148
149 regsReset();
150 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
151 }
152
153 NSGigE::~NSGigE()
154 {}
155
156 void
157 NSGigE::regStats()
158 {
159 txBytes
160 .name(name() + ".txBytes")
161 .desc("Bytes Transmitted")
162 .prereq(txBytes)
163 ;
164
165 rxBytes
166 .name(name() + ".rxBytes")
167 .desc("Bytes Received")
168 .prereq(rxBytes)
169 ;
170
171 txPackets
172 .name(name() + ".txPackets")
173 .desc("Number of Packets Transmitted")
174 .prereq(txBytes)
175 ;
176
177 rxPackets
178 .name(name() + ".rxPackets")
179 .desc("Number of Packets Received")
180 .prereq(rxBytes)
181 ;
182
183 txIpChecksums
184 .name(name() + ".txIpChecksums")
185 .desc("Number of tx IP Checksums done by device")
186 .precision(0)
187 .prereq(txBytes)
188 ;
189
190 rxIpChecksums
191 .name(name() + ".rxIpChecksums")
192 .desc("Number of rx IP Checksums done by device")
193 .precision(0)
194 .prereq(rxBytes)
195 ;
196
197 txTcpChecksums
198 .name(name() + ".txTcpChecksums")
199 .desc("Number of tx TCP Checksums done by device")
200 .precision(0)
201 .prereq(txBytes)
202 ;
203
204 rxTcpChecksums
205 .name(name() + ".rxTcpChecksums")
206 .desc("Number of rx TCP Checksums done by device")
207 .precision(0)
208 .prereq(rxBytes)
209 ;
210
211 txUdpChecksums
212 .name(name() + ".txUdpChecksums")
213 .desc("Number of tx UDP Checksums done by device")
214 .precision(0)
215 .prereq(txBytes)
216 ;
217
218 rxUdpChecksums
219 .name(name() + ".rxUdpChecksums")
220 .desc("Number of rx UDP Checksums done by device")
221 .precision(0)
222 .prereq(rxBytes)
223 ;
224
225 descDmaReads
226 .name(name() + ".descDMAReads")
227 .desc("Number of descriptors the device read w/ DMA")
228 .precision(0)
229 ;
230
231 descDmaWrites
232 .name(name() + ".descDMAWrites")
233 .desc("Number of descriptors the device wrote w/ DMA")
234 .precision(0)
235 ;
236
237 descDmaRdBytes
238 .name(name() + ".descDmaReadBytes")
239 .desc("number of descriptor bytes read w/ DMA")
240 .precision(0)
241 ;
242
243 descDmaWrBytes
244 .name(name() + ".descDmaWriteBytes")
245 .desc("number of descriptor bytes write w/ DMA")
246 .precision(0)
247 ;
248
249 txBandwidth
250 .name(name() + ".txBandwidth")
251 .desc("Transmit Bandwidth (bits/s)")
252 .precision(0)
253 .prereq(txBytes)
254 ;
255
256 rxBandwidth
257 .name(name() + ".rxBandwidth")
258 .desc("Receive Bandwidth (bits/s)")
259 .precision(0)
260 .prereq(rxBytes)
261 ;
262
263 totBandwidth
264 .name(name() + ".totBandwidth")
265 .desc("Total Bandwidth (bits/s)")
266 .precision(0)
267 .prereq(totBytes)
268 ;
269
270 totPackets
271 .name(name() + ".totPackets")
272 .desc("Total Packets")
273 .precision(0)
274 .prereq(totBytes)
275 ;
276
277 totBytes
278 .name(name() + ".totBytes")
279 .desc("Total Bytes")
280 .precision(0)
281 .prereq(totBytes)
282 ;
283
284 totPacketRate
285 .name(name() + ".totPPS")
286 .desc("Total Tranmission Rate (packets/s)")
287 .precision(0)
288 .prereq(totBytes)
289 ;
290
291 txPacketRate
292 .name(name() + ".txPPS")
293 .desc("Packet Tranmission Rate (packets/s)")
294 .precision(0)
295 .prereq(txBytes)
296 ;
297
298 rxPacketRate
299 .name(name() + ".rxPPS")
300 .desc("Packet Reception Rate (packets/s)")
301 .precision(0)
302 .prereq(rxBytes)
303 ;
304
305 postedSwi
306 .name(name() + ".postedSwi")
307 .desc("number of software interrupts posted to CPU")
308 .precision(0)
309 ;
310
311 totalSwi
312 .name(name() + ".totalSwi")
313 .desc("number of total Swi written to ISR")
314 .precision(0)
315 ;
316
317 coalescedSwi
318 .name(name() + ".coalescedSwi")
319 .desc("average number of Swi's coalesced into each post")
320 .precision(0)
321 ;
322
323 postedRxIdle
324 .name(name() + ".postedRxIdle")
325 .desc("number of rxIdle interrupts posted to CPU")
326 .precision(0)
327 ;
328
329 totalRxIdle
330 .name(name() + ".totalRxIdle")
331 .desc("number of total RxIdle written to ISR")
332 .precision(0)
333 ;
334
335 coalescedRxIdle
336 .name(name() + ".coalescedRxIdle")
337 .desc("average number of RxIdle's coalesced into each post")
338 .precision(0)
339 ;
340
341 postedRxOk
342 .name(name() + ".postedRxOk")
343 .desc("number of RxOk interrupts posted to CPU")
344 .precision(0)
345 ;
346
347 totalRxOk
348 .name(name() + ".totalRxOk")
349 .desc("number of total RxOk written to ISR")
350 .precision(0)
351 ;
352
353 coalescedRxOk
354 .name(name() + ".coalescedRxOk")
355 .desc("average number of RxOk's coalesced into each post")
356 .precision(0)
357 ;
358
359 postedRxDesc
360 .name(name() + ".postedRxDesc")
361 .desc("number of RxDesc interrupts posted to CPU")
362 .precision(0)
363 ;
364
365 totalRxDesc
366 .name(name() + ".totalRxDesc")
367 .desc("number of total RxDesc written to ISR")
368 .precision(0)
369 ;
370
371 coalescedRxDesc
372 .name(name() + ".coalescedRxDesc")
373 .desc("average number of RxDesc's coalesced into each post")
374 .precision(0)
375 ;
376
377 postedTxOk
378 .name(name() + ".postedTxOk")
379 .desc("number of TxOk interrupts posted to CPU")
380 .precision(0)
381 ;
382
383 totalTxOk
384 .name(name() + ".totalTxOk")
385 .desc("number of total TxOk written to ISR")
386 .precision(0)
387 ;
388
389 coalescedTxOk
390 .name(name() + ".coalescedTxOk")
391 .desc("average number of TxOk's coalesced into each post")
392 .precision(0)
393 ;
394
395 postedTxIdle
396 .name(name() + ".postedTxIdle")
397 .desc("number of TxIdle interrupts posted to CPU")
398 .precision(0)
399 ;
400
401 totalTxIdle
402 .name(name() + ".totalTxIdle")
403 .desc("number of total TxIdle written to ISR")
404 .precision(0)
405 ;
406
407 coalescedTxIdle
408 .name(name() + ".coalescedTxIdle")
409 .desc("average number of TxIdle's coalesced into each post")
410 .precision(0)
411 ;
412
413 postedTxDesc
414 .name(name() + ".postedTxDesc")
415 .desc("number of TxDesc interrupts posted to CPU")
416 .precision(0)
417 ;
418
419 totalTxDesc
420 .name(name() + ".totalTxDesc")
421 .desc("number of total TxDesc written to ISR")
422 .precision(0)
423 ;
424
425 coalescedTxDesc
426 .name(name() + ".coalescedTxDesc")
427 .desc("average number of TxDesc's coalesced into each post")
428 .precision(0)
429 ;
430
431 postedRxOrn
432 .name(name() + ".postedRxOrn")
433 .desc("number of RxOrn posted to CPU")
434 .precision(0)
435 ;
436
437 totalRxOrn
438 .name(name() + ".totalRxOrn")
439 .desc("number of total RxOrn written to ISR")
440 .precision(0)
441 ;
442
443 coalescedRxOrn
444 .name(name() + ".coalescedRxOrn")
445 .desc("average number of RxOrn's coalesced into each post")
446 .precision(0)
447 ;
448
449 coalescedTotal
450 .name(name() + ".coalescedTotal")
451 .desc("average number of interrupts coalesced into each post")
452 .precision(0)
453 ;
454
455 postedInterrupts
456 .name(name() + ".postedInterrupts")
457 .desc("number of posts to CPU")
458 .precision(0)
459 ;
460
461 droppedPackets
462 .name(name() + ".droppedPackets")
463 .desc("number of packets dropped")
464 .precision(0)
465 ;
466
467 coalescedSwi = totalSwi / postedInterrupts;
468 coalescedRxIdle = totalRxIdle / postedInterrupts;
469 coalescedRxOk = totalRxOk / postedInterrupts;
470 coalescedRxDesc = totalRxDesc / postedInterrupts;
471 coalescedTxOk = totalTxOk / postedInterrupts;
472 coalescedTxIdle = totalTxIdle / postedInterrupts;
473 coalescedTxDesc = totalTxDesc / postedInterrupts;
474 coalescedRxOrn = totalRxOrn / postedInterrupts;
475
476 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc +
477 totalTxOk + totalTxIdle + totalTxDesc +
478 totalRxOrn) / postedInterrupts;
479
480 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
481 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
482 totBandwidth = txBandwidth + rxBandwidth;
483 totBytes = txBytes + rxBytes;
484 totPackets = txPackets + rxPackets;
485
486 txPacketRate = txPackets / simSeconds;
487 rxPacketRate = rxPackets / simSeconds;
488 }
489
490 /**
491 * This is to read the PCI general configuration registers
492 */
493 void
494 NSGigE::readConfig(int offset, int size, uint8_t *data)
495 {
496 if (offset < PCI_DEVICE_SPECIFIC)
497 PciDev::readConfig(offset, size, data);
498 else
499 panic("Device specific PCI config space not implemented!\n");
500 }
501
502 /**
503 * This is to write to the PCI general configuration registers
504 */
505 void
506 NSGigE::writeConfig(int offset, int size, const uint8_t* data)
507 {
508 if (offset < PCI_DEVICE_SPECIFIC)
509 PciDev::writeConfig(offset, size, data);
510 else
511 panic("Device specific PCI config space not implemented!\n");
512
513 // Need to catch writes to BARs to update the PIO interface
514 switch (offset) {
515 // seems to work fine without all these PCI settings, but i
516 // put in the IO to double check, an assertion will fail if we
517 // need to properly implement it
518 case PCI_COMMAND:
519 if (config.data[offset] & PCI_CMD_IOSE)
520 ioEnable = true;
521 else
522 ioEnable = false;
523
524 #if 0
525 if (config.data[offset] & PCI_CMD_BME) {
526 bmEnabled = true;
527 }
528 else {
529 bmEnabled = false;
530 }
531
532 if (config.data[offset] & PCI_CMD_MSE) {
533 memEnable = true;
534 }
535 else {
536 memEnable = false;
537 }
538 #endif
539 break;
540
541 case PCI0_BASE_ADDR0:
542 if (BARAddrs[0] != 0) {
543 if (pioInterface)
544 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
545
546 BARAddrs[0] &= EV5::PAddrUncachedMask;
547 }
548 break;
549 case PCI0_BASE_ADDR1:
550 if (BARAddrs[1] != 0) {
551 if (pioInterface)
552 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
553
554 BARAddrs[1] &= EV5::PAddrUncachedMask;
555 }
556 break;
557 }
558 }
559
560 /**
561 * This reads the device registers, which are detailed in the NS83820
562 * spec sheet
563 */
564 Fault
565 NSGigE::read(MemReqPtr &req, uint8_t *data)
566 {
567 assert(ioEnable);
568
569 //The mask is to give you only the offset into the device register file
570 Addr daddr = req->paddr & 0xfff;
571 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
572 daddr, req->paddr, req->vaddr, req->size);
573
574
575 // there are some reserved registers, you can see ns_gige_reg.h and
576 // the spec sheet for details
577 if (daddr > LAST && daddr <= RESERVED) {
578 panic("Accessing reserved register");
579 } else if (daddr > RESERVED && daddr <= 0x3FC) {
580 readConfig(daddr & 0xff, req->size, data);
581 return No_Fault;
582 } else if (daddr >= MIB_START && daddr <= MIB_END) {
583 // don't implement all the MIB's. hopefully the kernel
584 // doesn't actually DEPEND upon their values
585 // MIB are just hardware stats keepers
586 uint32_t &reg = *(uint32_t *) data;
587 reg = 0;
588 return No_Fault;
589 } else if (daddr > 0x3FC)
590 panic("Something is messed up!\n");
591
592 switch (req->size) {
593 case sizeof(uint32_t):
594 {
595 uint32_t &reg = *(uint32_t *)data;
596 uint16_t rfaddr;
597
598 switch (daddr) {
599 case CR:
600 reg = regs.command;
601 //these are supposed to be cleared on a read
602 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
603 break;
604
605 case CFGR:
606 reg = regs.config;
607 break;
608
609 case MEAR:
610 reg = regs.mear;
611 break;
612
613 case PTSCR:
614 reg = regs.ptscr;
615 break;
616
617 case ISR:
618 reg = regs.isr;
619 devIntrClear(ISR_ALL);
620 break;
621
622 case IMR:
623 reg = regs.imr;
624 break;
625
626 case IER:
627 reg = regs.ier;
628 break;
629
630 case IHR:
631 reg = regs.ihr;
632 break;
633
634 case TXDP:
635 reg = regs.txdp;
636 break;
637
638 case TXDP_HI:
639 reg = regs.txdp_hi;
640 break;
641
642 case TX_CFG:
643 reg = regs.txcfg;
644 break;
645
646 case GPIOR:
647 reg = regs.gpior;
648 break;
649
650 case RXDP:
651 reg = regs.rxdp;
652 break;
653
654 case RXDP_HI:
655 reg = regs.rxdp_hi;
656 break;
657
658 case RX_CFG:
659 reg = regs.rxcfg;
660 break;
661
662 case PQCR:
663 reg = regs.pqcr;
664 break;
665
666 case WCSR:
667 reg = regs.wcsr;
668 break;
669
670 case PCR:
671 reg = regs.pcr;
672 break;
673
674 // see the spec sheet for how RFCR and RFDR work
675 // basically, you write to RFCR to tell the machine
676 // what you want to do next, then you act upon RFDR,
677 // and the device will be prepared b/c of what you
678 // wrote to RFCR
679 case RFCR:
680 reg = regs.rfcr;
681 break;
682
683 case RFDR:
684 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
685 switch (rfaddr) {
686 // Read from perfect match ROM octets
687 case 0x000:
688 reg = rom.perfectMatch[1];
689 reg = reg << 8;
690 reg += rom.perfectMatch[0];
691 break;
692 case 0x002:
693 reg = rom.perfectMatch[3] << 8;
694 reg += rom.perfectMatch[2];
695 break;
696 case 0x004:
697 reg = rom.perfectMatch[5] << 8;
698 reg += rom.perfectMatch[4];
699 break;
700 default:
701 // Read filter hash table
702 if (rfaddr >= FHASH_ADDR &&
703 rfaddr < FHASH_ADDR + FHASH_SIZE) {
704
705 // Only word-aligned reads supported
706 if (rfaddr % 2)
707 panic("unaligned read from filter hash table!");
708
709 reg = rom.filterHash[rfaddr - FHASH_ADDR + 1] << 8;
710 reg += rom.filterHash[rfaddr - FHASH_ADDR];
711 break;
712 }
713
714 panic("reading RFDR for something other than pattern"
715 " matching or hashing! %#x\n", rfaddr);
716 }
717 break;
718
719 case SRR:
720 reg = regs.srr;
721 break;
722
723 case MIBC:
724 reg = regs.mibc;
725 reg &= ~(MIBC_MIBS | MIBC_ACLR);
726 break;
727
728 case VRCR:
729 reg = regs.vrcr;
730 break;
731
732 case VTCR:
733 reg = regs.vtcr;
734 break;
735
736 case VDR:
737 reg = regs.vdr;
738 break;
739
740 case CCSR:
741 reg = regs.ccsr;
742 break;
743
744 case TBICR:
745 reg = regs.tbicr;
746 break;
747
748 case TBISR:
749 reg = regs.tbisr;
750 break;
751
752 case TANAR:
753 reg = regs.tanar;
754 break;
755
756 case TANLPAR:
757 reg = regs.tanlpar;
758 break;
759
760 case TANER:
761 reg = regs.taner;
762 break;
763
764 case TESR:
765 reg = regs.tesr;
766 break;
767
768 case M5REG:
769 reg = params()->m5reg;
770 break;
771
772 default:
773 panic("reading unimplemented register: addr=%#x", daddr);
774 }
775
776 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
777 daddr, reg, reg);
778 }
779 break;
780
781 default:
782 panic("accessing register with invalid size: addr=%#x, size=%d",
783 daddr, req->size);
784 }
785
786 return No_Fault;
787 }
788
789 Fault
790 NSGigE::write(MemReqPtr &req, const uint8_t *data)
791 {
792 assert(ioEnable);
793
794 Addr daddr = req->paddr & 0xfff;
795 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
796 daddr, req->paddr, req->vaddr, req->size);
797
798 if (daddr > LAST && daddr <= RESERVED) {
799 panic("Accessing reserved register");
800 } else if (daddr > RESERVED && daddr <= 0x3FC) {
801 writeConfig(daddr & 0xff, req->size, data);
802 return No_Fault;
803 } else if (daddr > 0x3FC)
804 panic("Something is messed up!\n");
805
806 if (req->size == sizeof(uint32_t)) {
807 uint32_t reg = *(uint32_t *)data;
808 uint16_t rfaddr;
809
810 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
811
812 switch (daddr) {
813 case CR:
814 regs.command = reg;
815 if (reg & CR_TXD) {
816 txEnable = false;
817 } else if (reg & CR_TXE) {
818 txEnable = true;
819
820 // the kernel is enabling the transmit machine
821 if (txState == txIdle)
822 txKick();
823 }
824
825 if (reg & CR_RXD) {
826 rxEnable = false;
827 } else if (reg & CR_RXE) {
828 rxEnable = true;
829
830 if (rxState == rxIdle)
831 rxKick();
832 }
833
834 if (reg & CR_TXR)
835 txReset();
836
837 if (reg & CR_RXR)
838 rxReset();
839
840 if (reg & CR_SWI)
841 devIntrPost(ISR_SWI);
842
843 if (reg & CR_RST) {
844 txReset();
845 rxReset();
846
847 regsReset();
848 }
849 break;
850
851 case CFGR:
852 if (reg & CFGR_LNKSTS ||
853 reg & CFGR_SPDSTS ||
854 reg & CFGR_DUPSTS ||
855 reg & CFGR_RESERVED ||
856 reg & CFGR_T64ADDR ||
857 reg & CFGR_PCI64_DET)
858
859 // First clear all writable bits
860 regs.config &= CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
861 CFGR_RESERVED | CFGR_T64ADDR |
862 CFGR_PCI64_DET;
863 // Now set the appropriate writable bits
864 regs.config |= reg & ~(CFGR_LNKSTS | CFGR_SPDSTS | CFGR_DUPSTS |
865 CFGR_RESERVED | CFGR_T64ADDR |
866 CFGR_PCI64_DET);
867
868 // all these #if 0's are because i don't THINK the kernel needs to
869 // have these implemented. if there is a problem relating to one of
870 // these, you may need to add functionality in.
871 #if 0
872 if (reg & CFGR_TBI_EN) ;
873 if (reg & CFGR_MODE_1000) ;
874 #endif
875
876 if (reg & CFGR_AUTO_1000)
877 panic("CFGR_AUTO_1000 not implemented!\n");
878
879 #if 0
880 if (reg & CFGR_PINT_DUPSTS ||
881 reg & CFGR_PINT_LNKSTS ||
882 reg & CFGR_PINT_SPDSTS)
883 ;
884
885 if (reg & CFGR_TMRTEST) ;
886 if (reg & CFGR_MRM_DIS) ;
887 if (reg & CFGR_MWI_DIS) ;
888
889 if (reg & CFGR_T64ADDR)
890 panic("CFGR_T64ADDR is read only register!\n");
891
892 if (reg & CFGR_PCI64_DET)
893 panic("CFGR_PCI64_DET is read only register!\n");
894
895 if (reg & CFGR_DATA64_EN) ;
896 if (reg & CFGR_M64ADDR) ;
897 if (reg & CFGR_PHY_RST) ;
898 if (reg & CFGR_PHY_DIS) ;
899 #endif
900
901 if (reg & CFGR_EXTSTS_EN)
902 extstsEnable = true;
903 else
904 extstsEnable = false;
905
906 #if 0
907 if (reg & CFGR_REQALG) ;
908 if (reg & CFGR_SB) ;
909 if (reg & CFGR_POW) ;
910 if (reg & CFGR_EXD) ;
911 if (reg & CFGR_PESEL) ;
912 if (reg & CFGR_BROM_DIS) ;
913 if (reg & CFGR_EXT_125) ;
914 if (reg & CFGR_BEM) ;
915 #endif
916 break;
917
918 case MEAR:
919 // Clear writable bits
920 regs.mear &= MEAR_EEDO;
921 // Set appropriate writable bits
922 regs.mear |= reg & ~MEAR_EEDO;
923
924 // FreeBSD uses the EEPROM to read PMATCH (for the MAC address)
925 // even though it could get it through RFDR
926 if (reg & MEAR_EESEL) {
927 // Rising edge of clock
928 if (reg & MEAR_EECLK && !eepromClk)
929 eepromKick();
930 }
931 else {
932 eepromState = eepromStart;
933 regs.mear &= ~MEAR_EEDI;
934 }
935
936 eepromClk = reg & MEAR_EECLK;
937
938 // since phy is completely faked, MEAR_MD* don't matter
939 #if 0
940 if (reg & MEAR_MDIO) ;
941 if (reg & MEAR_MDDIR) ;
942 if (reg & MEAR_MDC) ;
943 #endif
944 break;
945
946 case PTSCR:
947 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
948 // these control BISTs for various parts of chip - we
949 // don't care or do just fake that the BIST is done
950 if (reg & PTSCR_RBIST_EN)
951 regs.ptscr |= PTSCR_RBIST_DONE;
952 if (reg & PTSCR_EEBIST_EN)
953 regs.ptscr &= ~PTSCR_EEBIST_EN;
954 if (reg & PTSCR_EELOAD_EN)
955 regs.ptscr &= ~PTSCR_EELOAD_EN;
956 break;
957
958 case ISR: /* writing to the ISR has no effect */
959 panic("ISR is a read only register!\n");
960
961 case IMR:
962 regs.imr = reg;
963 devIntrChangeMask();
964 break;
965
966 case IER:
967 regs.ier = reg;
968 break;
969
970 case IHR:
971 regs.ihr = reg;
972 /* not going to implement real interrupt holdoff */
973 break;
974
975 case TXDP:
976 regs.txdp = (reg & 0xFFFFFFFC);
977 assert(txState == txIdle);
978 CTDD = false;
979 break;
980
981 case TXDP_HI:
982 regs.txdp_hi = reg;
983 break;
984
985 case TX_CFG:
986 regs.txcfg = reg;
987 #if 0
988 if (reg & TX_CFG_CSI) ;
989 if (reg & TX_CFG_HBI) ;
990 if (reg & TX_CFG_MLB) ;
991 if (reg & TX_CFG_ATP) ;
992 if (reg & TX_CFG_ECRETRY) {
993 /*
994 * this could easily be implemented, but considering
995 * the network is just a fake pipe, wouldn't make
996 * sense to do this
997 */
998 }
999
1000 if (reg & TX_CFG_BRST_DIS) ;
1001 #endif
1002
1003 #if 0
1004 /* we handle our own DMA, ignore the kernel's exhortations */
1005 if (reg & TX_CFG_MXDMA) ;
1006 #endif
1007
1008 // also, we currently don't care about fill/drain
1009 // thresholds though this may change in the future with
1010 // more realistic networks or a driver which changes it
1011 // according to feedback
1012
1013 break;
1014
1015 case GPIOR:
1016 // Only write writable bits
1017 regs.gpior &= GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1018 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN;
1019 regs.gpior |= reg & ~(GPIOR_UNUSED | GPIOR_GP5_IN | GPIOR_GP4_IN
1020 | GPIOR_GP3_IN | GPIOR_GP2_IN | GPIOR_GP1_IN);
1021 /* these just control general purpose i/o pins, don't matter */
1022 break;
1023
1024 case RXDP:
1025 regs.rxdp = reg;
1026 CRDD = false;
1027 break;
1028
1029 case RXDP_HI:
1030 regs.rxdp_hi = reg;
1031 break;
1032
1033 case RX_CFG:
1034 regs.rxcfg = reg;
1035 #if 0
1036 if (reg & RX_CFG_AEP) ;
1037 if (reg & RX_CFG_ARP) ;
1038 if (reg & RX_CFG_STRIPCRC) ;
1039 if (reg & RX_CFG_RX_RD) ;
1040 if (reg & RX_CFG_ALP) ;
1041 if (reg & RX_CFG_AIRL) ;
1042
1043 /* we handle our own DMA, ignore what kernel says about it */
1044 if (reg & RX_CFG_MXDMA) ;
1045
1046 //also, we currently don't care about fill/drain thresholds
1047 //though this may change in the future with more realistic
1048 //networks or a driver which changes it according to feedback
1049 if (reg & (RX_CFG_DRTH | RX_CFG_DRTH0)) ;
1050 #endif
1051 break;
1052
1053 case PQCR:
1054 /* there is no priority queueing used in the linux 2.6 driver */
1055 regs.pqcr = reg;
1056 break;
1057
1058 case WCSR:
1059 /* not going to implement wake on LAN */
1060 regs.wcsr = reg;
1061 break;
1062
1063 case PCR:
1064 /* not going to implement pause control */
1065 regs.pcr = reg;
1066 break;
1067
1068 case RFCR:
1069 regs.rfcr = reg;
1070
1071 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
1072 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
1073 acceptMulticast = (reg & RFCR_AAM) ? true : false;
1074 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1075 acceptPerfect = (reg & RFCR_APM) ? true : false;
1076 acceptArp = (reg & RFCR_AARP) ? true : false;
1077 multicastHashEnable = (reg & RFCR_MHEN) ? true : false;
1078
1079 #if 0
1080 if (reg & RFCR_APAT)
1081 panic("RFCR_APAT not implemented!\n");
1082 #endif
1083 if (reg & RFCR_UHEN)
1084 panic("Unicast hash filtering not used by drivers!\n");
1085
1086 if (reg & RFCR_ULM)
1087 panic("RFCR_ULM not implemented!\n");
1088
1089 break;
1090
1091 case RFDR:
1092 rfaddr = (uint16_t)(regs.rfcr & RFCR_RFADDR);
1093 switch (rfaddr) {
1094 case 0x000:
1095 rom.perfectMatch[0] = (uint8_t)reg;
1096 rom.perfectMatch[1] = (uint8_t)(reg >> 8);
1097 break;
1098 case 0x002:
1099 rom.perfectMatch[2] = (uint8_t)reg;
1100 rom.perfectMatch[3] = (uint8_t)(reg >> 8);
1101 break;
1102 case 0x004:
1103 rom.perfectMatch[4] = (uint8_t)reg;
1104 rom.perfectMatch[5] = (uint8_t)(reg >> 8);
1105 break;
1106 default:
1107
1108 if (rfaddr >= FHASH_ADDR &&
1109 rfaddr < FHASH_ADDR + FHASH_SIZE) {
1110
1111 // Only word-aligned writes supported
1112 if (rfaddr % 2)
1113 panic("unaligned write to filter hash table!");
1114
1115 rom.filterHash[rfaddr - FHASH_ADDR] = (uint8_t)reg;
1116 rom.filterHash[rfaddr - FHASH_ADDR + 1]
1117 = (uint8_t)(reg >> 8);
1118 break;
1119 }
1120 panic("writing RFDR for something other than pattern matching\
1121 or hashing! %#x\n", rfaddr);
1122 }
1123
1124 case BRAR:
1125 regs.brar = reg;
1126 break;
1127
1128 case BRDR:
1129 panic("the driver never uses BRDR, something is wrong!\n");
1130
1131 case SRR:
1132 panic("SRR is read only register!\n");
1133
1134 case MIBC:
1135 panic("the driver never uses MIBC, something is wrong!\n");
1136
1137 case VRCR:
1138 regs.vrcr = reg;
1139 break;
1140
1141 case VTCR:
1142 regs.vtcr = reg;
1143 break;
1144
1145 case VDR:
1146 panic("the driver never uses VDR, something is wrong!\n");
1147
1148 case CCSR:
1149 /* not going to implement clockrun stuff */
1150 regs.ccsr = reg;
1151 break;
1152
1153 case TBICR:
1154 regs.tbicr = reg;
1155 if (reg & TBICR_MR_LOOPBACK)
1156 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1157
1158 if (reg & TBICR_MR_AN_ENABLE) {
1159 regs.tanlpar = regs.tanar;
1160 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1161 }
1162
1163 #if 0
1164 if (reg & TBICR_MR_RESTART_AN) ;
1165 #endif
1166
1167 break;
1168
1169 case TBISR:
1170 panic("TBISR is read only register!\n");
1171
1172 case TANAR:
1173 // Only write the writable bits
1174 regs.tanar &= TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED;
1175 regs.tanar |= reg & ~(TANAR_RF1 | TANAR_RF2 | TANAR_UNUSED);
1176
1177 // Pause capability unimplemented
1178 #if 0
1179 if (reg & TANAR_PS2) ;
1180 if (reg & TANAR_PS1) ;
1181 #endif
1182
1183 break;
1184
1185 case TANLPAR:
1186 panic("this should only be written to by the fake phy!\n");
1187
1188 case TANER:
1189 panic("TANER is read only register!\n");
1190
1191 case TESR:
1192 regs.tesr = reg;
1193 break;
1194
1195 default:
1196 panic("invalid register access daddr=%#x", daddr);
1197 }
1198 } else {
1199 panic("Invalid Request Size");
1200 }
1201
1202 return No_Fault;
1203 }
1204
1205 void
1206 NSGigE::devIntrPost(uint32_t interrupts)
1207 {
1208 if (interrupts & ISR_RESERVE)
1209 panic("Cannot set a reserved interrupt");
1210
1211 if (interrupts & ISR_NOIMPL)
1212 warn("interrupt not implemented %#x\n", interrupts);
1213
1214 interrupts &= ISR_IMPL;
1215 regs.isr |= interrupts;
1216
1217 if (interrupts & regs.imr) {
1218 if (interrupts & ISR_SWI) {
1219 totalSwi++;
1220 }
1221 if (interrupts & ISR_RXIDLE) {
1222 totalRxIdle++;
1223 }
1224 if (interrupts & ISR_RXOK) {
1225 totalRxOk++;
1226 }
1227 if (interrupts & ISR_RXDESC) {
1228 totalRxDesc++;
1229 }
1230 if (interrupts & ISR_TXOK) {
1231 totalTxOk++;
1232 }
1233 if (interrupts & ISR_TXIDLE) {
1234 totalTxIdle++;
1235 }
1236 if (interrupts & ISR_TXDESC) {
1237 totalTxDesc++;
1238 }
1239 if (interrupts & ISR_RXORN) {
1240 totalRxOrn++;
1241 }
1242 }
1243
1244 DPRINTF(EthernetIntr,
1245 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1246 interrupts, regs.isr, regs.imr);
1247
1248 if ((regs.isr & regs.imr)) {
1249 Tick when = curTick;
1250 if ((regs.isr & regs.imr & ISR_NODELAY) == 0)
1251 when += intrDelay;
1252 cpuIntrPost(when);
1253 }
1254 }
1255
1256 /* writing this interrupt counting stats inside this means that this function
1257 is now limited to being used to clear all interrupts upon the kernel
1258 reading isr and servicing. just telling you in case you were thinking
1259 of expanding use.
1260 */
1261 void
1262 NSGigE::devIntrClear(uint32_t interrupts)
1263 {
1264 if (interrupts & ISR_RESERVE)
1265 panic("Cannot clear a reserved interrupt");
1266
1267 if (regs.isr & regs.imr & ISR_SWI) {
1268 postedSwi++;
1269 }
1270 if (regs.isr & regs.imr & ISR_RXIDLE) {
1271 postedRxIdle++;
1272 }
1273 if (regs.isr & regs.imr & ISR_RXOK) {
1274 postedRxOk++;
1275 }
1276 if (regs.isr & regs.imr & ISR_RXDESC) {
1277 postedRxDesc++;
1278 }
1279 if (regs.isr & regs.imr & ISR_TXOK) {
1280 postedTxOk++;
1281 }
1282 if (regs.isr & regs.imr & ISR_TXIDLE) {
1283 postedTxIdle++;
1284 }
1285 if (regs.isr & regs.imr & ISR_TXDESC) {
1286 postedTxDesc++;
1287 }
1288 if (regs.isr & regs.imr & ISR_RXORN) {
1289 postedRxOrn++;
1290 }
1291
1292 if (regs.isr & regs.imr & ISR_IMPL)
1293 postedInterrupts++;
1294
1295 interrupts &= ~ISR_NOIMPL;
1296 regs.isr &= ~interrupts;
1297
1298 DPRINTF(EthernetIntr,
1299 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1300 interrupts, regs.isr, regs.imr);
1301
1302 if (!(regs.isr & regs.imr))
1303 cpuIntrClear();
1304 }
1305
1306 void
1307 NSGigE::devIntrChangeMask()
1308 {
1309 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1310 regs.isr, regs.imr, regs.isr & regs.imr);
1311
1312 if (regs.isr & regs.imr)
1313 cpuIntrPost(curTick);
1314 else
1315 cpuIntrClear();
1316 }
1317
1318 void
1319 NSGigE::cpuIntrPost(Tick when)
1320 {
1321 // If the interrupt you want to post is later than an interrupt
1322 // already scheduled, just let it post in the coming one and don't
1323 // schedule another.
1324 // HOWEVER, must be sure that the scheduled intrTick is in the
1325 // future (this was formerly the source of a bug)
1326 /**
1327 * @todo this warning should be removed and the intrTick code should
1328 * be fixed.
1329 */
1330 assert(when >= curTick);
1331 assert(intrTick >= curTick || intrTick == 0);
1332 if (when > intrTick && intrTick != 0) {
1333 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1334 intrTick);
1335 return;
1336 }
1337
1338 intrTick = when;
1339 if (intrTick < curTick) {
1340 debug_break();
1341 intrTick = curTick;
1342 }
1343
1344 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1345 intrTick);
1346
1347 if (intrEvent)
1348 intrEvent->squash();
1349 intrEvent = new IntrEvent(this, true);
1350 intrEvent->schedule(intrTick);
1351 }
1352
1353 void
1354 NSGigE::cpuInterrupt()
1355 {
1356 assert(intrTick == curTick);
1357
1358 // Whether or not there's a pending interrupt, we don't care about
1359 // it anymore
1360 intrEvent = 0;
1361 intrTick = 0;
1362
1363 // Don't send an interrupt if there's already one
1364 if (cpuPendingIntr) {
1365 DPRINTF(EthernetIntr,
1366 "would send an interrupt now, but there's already pending\n");
1367 } else {
1368 // Send interrupt
1369 cpuPendingIntr = true;
1370
1371 DPRINTF(EthernetIntr, "posting interrupt\n");
1372 intrPost();
1373 }
1374 }
1375
1376 void
1377 NSGigE::cpuIntrClear()
1378 {
1379 if (!cpuPendingIntr)
1380 return;
1381
1382 if (intrEvent) {
1383 intrEvent->squash();
1384 intrEvent = 0;
1385 }
1386
1387 intrTick = 0;
1388
1389 cpuPendingIntr = false;
1390
1391 DPRINTF(EthernetIntr, "clearing interrupt\n");
1392 intrClear();
1393 }
1394
1395 bool
1396 NSGigE::cpuIntrPending() const
1397 { return cpuPendingIntr; }
1398
1399 void
1400 NSGigE::txReset()
1401 {
1402
1403 DPRINTF(Ethernet, "transmit reset\n");
1404
1405 CTDD = false;
1406 txEnable = false;;
1407 txFragPtr = 0;
1408 assert(txDescCnt == 0);
1409 txFifo.clear();
1410 txState = txIdle;
1411 assert(txDmaState == dmaIdle);
1412 }
1413
1414 void
1415 NSGigE::rxReset()
1416 {
1417 DPRINTF(Ethernet, "receive reset\n");
1418
1419 CRDD = false;
1420 assert(rxPktBytes == 0);
1421 rxEnable = false;
1422 rxFragPtr = 0;
1423 assert(rxDescCnt == 0);
1424 assert(rxDmaState == dmaIdle);
1425 rxFifo.clear();
1426 rxState = rxIdle;
1427 }
1428
1429 void
1430 NSGigE::regsReset()
1431 {
1432 memset(&regs, 0, sizeof(regs));
1433 regs.config = (CFGR_LNKSTS | CFGR_TBI_EN | CFGR_MODE_1000);
1434 regs.mear = 0x12;
1435 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1436 // fill threshold to 32 bytes
1437 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1438 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1439 regs.mibc = MIBC_FRZ;
1440 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1441 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1442 regs.brar = 0xffffffff;
1443
1444 extstsEnable = false;
1445 acceptBroadcast = false;
1446 acceptMulticast = false;
1447 acceptUnicast = false;
1448 acceptPerfect = false;
1449 acceptArp = false;
1450 }
1451
1452 void
1453 NSGigE::rxDmaReadCopy()
1454 {
1455 assert(rxDmaState == dmaReading);
1456
1457 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1458 rxDmaState = dmaIdle;
1459
1460 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1461 rxDmaAddr, rxDmaLen);
1462 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1463 }
1464
1465 bool
1466 NSGigE::doRxDmaRead()
1467 {
1468 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1469 rxDmaState = dmaReading;
1470
1471 if (dmaInterface && !rxDmaFree) {
1472 if (dmaInterface->busy())
1473 rxDmaState = dmaReadWaiting;
1474 else
1475 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1476 &rxDmaReadEvent, true);
1477 return true;
1478 }
1479
1480 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1481 rxDmaReadCopy();
1482 return false;
1483 }
1484
1485 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1486 Tick start = curTick + dmaReadDelay + factor;
1487 rxDmaReadEvent.schedule(start);
1488 return true;
1489 }
1490
1491 void
1492 NSGigE::rxDmaReadDone()
1493 {
1494 assert(rxDmaState == dmaReading);
1495 rxDmaReadCopy();
1496
1497 // If the transmit state machine has a pending DMA, let it go first
1498 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1499 txKick();
1500
1501 rxKick();
1502 }
1503
1504 void
1505 NSGigE::rxDmaWriteCopy()
1506 {
1507 assert(rxDmaState == dmaWriting);
1508
1509 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1510 rxDmaState = dmaIdle;
1511
1512 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1513 rxDmaAddr, rxDmaLen);
1514 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1515 }
1516
1517 bool
1518 NSGigE::doRxDmaWrite()
1519 {
1520 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1521 rxDmaState = dmaWriting;
1522
1523 if (dmaInterface && !rxDmaFree) {
1524 if (dmaInterface->busy())
1525 rxDmaState = dmaWriteWaiting;
1526 else
1527 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1528 &rxDmaWriteEvent, true);
1529 return true;
1530 }
1531
1532 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1533 rxDmaWriteCopy();
1534 return false;
1535 }
1536
1537 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1538 Tick start = curTick + dmaWriteDelay + factor;
1539 rxDmaWriteEvent.schedule(start);
1540 return true;
1541 }
1542
1543 void
1544 NSGigE::rxDmaWriteDone()
1545 {
1546 assert(rxDmaState == dmaWriting);
1547 rxDmaWriteCopy();
1548
1549 // If the transmit state machine has a pending DMA, let it go first
1550 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1551 txKick();
1552
1553 rxKick();
1554 }
1555
1556 void
1557 NSGigE::rxKick()
1558 {
1559 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1560 NsRxStateStrings[rxState], rxFifo.size());
1561
1562 next:
1563 if (clock) {
1564 if (rxKickTick > curTick) {
1565 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1566 rxKickTick);
1567
1568 goto exit;
1569 }
1570
1571 // Go to the next state machine clock tick.
1572 rxKickTick = curTick + cycles(1);
1573 }
1574
1575 switch(rxDmaState) {
1576 case dmaReadWaiting:
1577 if (doRxDmaRead())
1578 goto exit;
1579 break;
1580 case dmaWriteWaiting:
1581 if (doRxDmaWrite())
1582 goto exit;
1583 break;
1584 default:
1585 break;
1586 }
1587
1588 // see state machine from spec for details
1589 // the way this works is, if you finish work on one state and can
1590 // go directly to another, you do that through jumping to the
1591 // label "next". however, if you have intermediate work, like DMA
1592 // so that you can't go to the next state yet, you go to exit and
1593 // exit the loop. however, when the DMA is done it will trigger
1594 // an event and come back to this loop.
1595 switch (rxState) {
1596 case rxIdle:
1597 if (!rxEnable) {
1598 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1599 goto exit;
1600 }
1601
1602 if (CRDD) {
1603 rxState = rxDescRefr;
1604
1605 rxDmaAddr = regs.rxdp & 0x3fffffff;
1606 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1607 rxDmaLen = sizeof(rxDescCache.link);
1608 rxDmaFree = dmaDescFree;
1609
1610 descDmaReads++;
1611 descDmaRdBytes += rxDmaLen;
1612
1613 if (doRxDmaRead())
1614 goto exit;
1615 } else {
1616 rxState = rxDescRead;
1617
1618 rxDmaAddr = regs.rxdp & 0x3fffffff;
1619 rxDmaData = &rxDescCache;
1620 rxDmaLen = sizeof(ns_desc);
1621 rxDmaFree = dmaDescFree;
1622
1623 descDmaReads++;
1624 descDmaRdBytes += rxDmaLen;
1625
1626 if (doRxDmaRead())
1627 goto exit;
1628 }
1629 break;
1630
1631 case rxDescRefr:
1632 if (rxDmaState != dmaIdle)
1633 goto exit;
1634
1635 rxState = rxAdvance;
1636 break;
1637
1638 case rxDescRead:
1639 if (rxDmaState != dmaIdle)
1640 goto exit;
1641
1642 DPRINTF(EthernetDesc, "rxDescCache: addr=%08x read descriptor\n",
1643 regs.rxdp & 0x3fffffff);
1644 DPRINTF(EthernetDesc,
1645 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1646 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1647 rxDescCache.extsts);
1648
1649 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1650 devIntrPost(ISR_RXIDLE);
1651 rxState = rxIdle;
1652 goto exit;
1653 } else {
1654 rxState = rxFifoBlock;
1655 rxFragPtr = rxDescCache.bufptr;
1656 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1657 }
1658 break;
1659
1660 case rxFifoBlock:
1661 if (!rxPacket) {
1662 /**
1663 * @todo in reality, we should be able to start processing
1664 * the packet as it arrives, and not have to wait for the
1665 * full packet ot be in the receive fifo.
1666 */
1667 if (rxFifo.empty())
1668 goto exit;
1669
1670 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1671
1672 // If we don't have a packet, grab a new one from the fifo.
1673 rxPacket = rxFifo.front();
1674 rxPktBytes = rxPacket->length;
1675 rxPacketBufPtr = rxPacket->data;
1676
1677 #if TRACING_ON
1678 if (DTRACE(Ethernet)) {
1679 IpPtr ip(rxPacket);
1680 if (ip) {
1681 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1682 TcpPtr tcp(ip);
1683 if (tcp) {
1684 DPRINTF(Ethernet,
1685 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1686 tcp->sport(), tcp->dport(), tcp->seq(),
1687 tcp->ack());
1688 }
1689 }
1690 }
1691 #endif
1692
1693 // sanity check - i think the driver behaves like this
1694 assert(rxDescCnt >= rxPktBytes);
1695 rxFifo.pop();
1696 }
1697
1698
1699 // dont' need the && rxDescCnt > 0 if driver sanity check
1700 // above holds
1701 if (rxPktBytes > 0) {
1702 rxState = rxFragWrite;
1703 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1704 // check holds
1705 rxXferLen = rxPktBytes;
1706
1707 rxDmaAddr = rxFragPtr & 0x3fffffff;
1708 rxDmaData = rxPacketBufPtr;
1709 rxDmaLen = rxXferLen;
1710 rxDmaFree = dmaDataFree;
1711
1712 if (doRxDmaWrite())
1713 goto exit;
1714
1715 } else {
1716 rxState = rxDescWrite;
1717
1718 //if (rxPktBytes == 0) { /* packet is done */
1719 assert(rxPktBytes == 0);
1720 DPRINTF(EthernetSM, "done with receiving packet\n");
1721
1722 rxDescCache.cmdsts |= CMDSTS_OWN;
1723 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1724 rxDescCache.cmdsts |= CMDSTS_OK;
1725 rxDescCache.cmdsts &= 0xffff0000;
1726 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1727
1728 #if 0
1729 /*
1730 * all the driver uses these are for its own stats keeping
1731 * which we don't care about, aren't necessary for
1732 * functionality and doing this would just slow us down.
1733 * if they end up using this in a later version for
1734 * functional purposes, just undef
1735 */
1736 if (rxFilterEnable) {
1737 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1738 const EthAddr &dst = rxFifoFront()->dst();
1739 if (dst->unicast())
1740 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1741 if (dst->multicast())
1742 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1743 if (dst->broadcast())
1744 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1745 }
1746 #endif
1747
1748 IpPtr ip(rxPacket);
1749 if (extstsEnable && ip) {
1750 rxDescCache.extsts |= EXTSTS_IPPKT;
1751 rxIpChecksums++;
1752 if (cksum(ip) != 0) {
1753 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1754 rxDescCache.extsts |= EXTSTS_IPERR;
1755 }
1756 TcpPtr tcp(ip);
1757 UdpPtr udp(ip);
1758 if (tcp) {
1759 rxDescCache.extsts |= EXTSTS_TCPPKT;
1760 rxTcpChecksums++;
1761 if (cksum(tcp) != 0) {
1762 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1763 rxDescCache.extsts |= EXTSTS_TCPERR;
1764
1765 }
1766 } else if (udp) {
1767 rxDescCache.extsts |= EXTSTS_UDPPKT;
1768 rxUdpChecksums++;
1769 if (cksum(udp) != 0) {
1770 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1771 rxDescCache.extsts |= EXTSTS_UDPERR;
1772 }
1773 }
1774 }
1775 rxPacket = 0;
1776
1777 /*
1778 * the driver seems to always receive into desc buffers
1779 * of size 1514, so you never have a pkt that is split
1780 * into multiple descriptors on the receive side, so
1781 * i don't implement that case, hence the assert above.
1782 */
1783
1784 DPRINTF(EthernetDesc,
1785 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1786 regs.rxdp & 0x3fffffff);
1787 DPRINTF(EthernetDesc,
1788 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1789 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1790 rxDescCache.extsts);
1791
1792 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1793 rxDmaData = &(rxDescCache.cmdsts);
1794 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1795 rxDmaFree = dmaDescFree;
1796
1797 descDmaWrites++;
1798 descDmaWrBytes += rxDmaLen;
1799
1800 if (doRxDmaWrite())
1801 goto exit;
1802 }
1803 break;
1804
1805 case rxFragWrite:
1806 if (rxDmaState != dmaIdle)
1807 goto exit;
1808
1809 rxPacketBufPtr += rxXferLen;
1810 rxFragPtr += rxXferLen;
1811 rxPktBytes -= rxXferLen;
1812
1813 rxState = rxFifoBlock;
1814 break;
1815
1816 case rxDescWrite:
1817 if (rxDmaState != dmaIdle)
1818 goto exit;
1819
1820 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1821
1822 assert(rxPacket == 0);
1823 devIntrPost(ISR_RXOK);
1824
1825 if (rxDescCache.cmdsts & CMDSTS_INTR)
1826 devIntrPost(ISR_RXDESC);
1827
1828 if (!rxEnable) {
1829 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1830 rxState = rxIdle;
1831 goto exit;
1832 } else
1833 rxState = rxAdvance;
1834 break;
1835
1836 case rxAdvance:
1837 if (rxDescCache.link == 0) {
1838 devIntrPost(ISR_RXIDLE);
1839 rxState = rxIdle;
1840 CRDD = true;
1841 goto exit;
1842 } else {
1843 rxState = rxDescRead;
1844 regs.rxdp = rxDescCache.link;
1845 CRDD = false;
1846
1847 rxDmaAddr = regs.rxdp & 0x3fffffff;
1848 rxDmaData = &rxDescCache;
1849 rxDmaLen = sizeof(ns_desc);
1850 rxDmaFree = dmaDescFree;
1851
1852 if (doRxDmaRead())
1853 goto exit;
1854 }
1855 break;
1856
1857 default:
1858 panic("Invalid rxState!");
1859 }
1860
1861 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1862 NsRxStateStrings[rxState]);
1863 goto next;
1864
1865 exit:
1866 /**
1867 * @todo do we want to schedule a future kick?
1868 */
1869 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1870 NsRxStateStrings[rxState]);
1871
1872 if (clock && !rxKickEvent.scheduled())
1873 rxKickEvent.schedule(rxKickTick);
1874 }
1875
1876 void
1877 NSGigE::transmit()
1878 {
1879 if (txFifo.empty()) {
1880 DPRINTF(Ethernet, "nothing to transmit\n");
1881 return;
1882 }
1883
1884 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1885 txFifo.size());
1886 if (interface->sendPacket(txFifo.front())) {
1887 #if TRACING_ON
1888 if (DTRACE(Ethernet)) {
1889 IpPtr ip(txFifo.front());
1890 if (ip) {
1891 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1892 TcpPtr tcp(ip);
1893 if (tcp) {
1894 DPRINTF(Ethernet,
1895 "Src Port=%d, Dest Port=%d, Seq=%d, Ack=%d\n",
1896 tcp->sport(), tcp->dport(), tcp->seq(), tcp->ack());
1897 }
1898 }
1899 }
1900 #endif
1901
1902 DDUMP(EthernetData, txFifo.front()->data, txFifo.front()->length);
1903 txBytes += txFifo.front()->length;
1904 txPackets++;
1905
1906 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1907 txFifo.avail());
1908 txFifo.pop();
1909
1910 /*
1911 * normally do a writeback of the descriptor here, and ONLY
1912 * after that is done, send this interrupt. but since our
1913 * stuff never actually fails, just do this interrupt here,
1914 * otherwise the code has to stray from this nice format.
1915 * besides, it's functionally the same.
1916 */
1917 devIntrPost(ISR_TXOK);
1918 }
1919
1920 if (!txFifo.empty() && !txEvent.scheduled()) {
1921 DPRINTF(Ethernet, "reschedule transmit\n");
1922 txEvent.schedule(curTick + retryTime);
1923 }
1924 }
1925
1926 void
1927 NSGigE::txDmaReadCopy()
1928 {
1929 assert(txDmaState == dmaReading);
1930
1931 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1932 txDmaState = dmaIdle;
1933
1934 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1935 txDmaAddr, txDmaLen);
1936 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1937 }
1938
1939 bool
1940 NSGigE::doTxDmaRead()
1941 {
1942 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1943 txDmaState = dmaReading;
1944
1945 if (dmaInterface && !txDmaFree) {
1946 if (dmaInterface->busy())
1947 txDmaState = dmaReadWaiting;
1948 else
1949 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1950 &txDmaReadEvent, true);
1951 return true;
1952 }
1953
1954 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1955 txDmaReadCopy();
1956 return false;
1957 }
1958
1959 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1960 Tick start = curTick + dmaReadDelay + factor;
1961 txDmaReadEvent.schedule(start);
1962 return true;
1963 }
1964
1965 void
1966 NSGigE::txDmaReadDone()
1967 {
1968 assert(txDmaState == dmaReading);
1969 txDmaReadCopy();
1970
1971 // If the receive state machine has a pending DMA, let it go first
1972 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1973 rxKick();
1974
1975 txKick();
1976 }
1977
1978 void
1979 NSGigE::txDmaWriteCopy()
1980 {
1981 assert(txDmaState == dmaWriting);
1982
1983 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1984 txDmaState = dmaIdle;
1985
1986 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1987 txDmaAddr, txDmaLen);
1988 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1989 }
1990
1991 bool
1992 NSGigE::doTxDmaWrite()
1993 {
1994 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1995 txDmaState = dmaWriting;
1996
1997 if (dmaInterface && !txDmaFree) {
1998 if (dmaInterface->busy())
1999 txDmaState = dmaWriteWaiting;
2000 else
2001 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
2002 &txDmaWriteEvent, true);
2003 return true;
2004 }
2005
2006 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
2007 txDmaWriteCopy();
2008 return false;
2009 }
2010
2011 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
2012 Tick start = curTick + dmaWriteDelay + factor;
2013 txDmaWriteEvent.schedule(start);
2014 return true;
2015 }
2016
2017 void
2018 NSGigE::txDmaWriteDone()
2019 {
2020 assert(txDmaState == dmaWriting);
2021 txDmaWriteCopy();
2022
2023 // If the receive state machine has a pending DMA, let it go first
2024 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
2025 rxKick();
2026
2027 txKick();
2028 }
2029
2030 void
2031 NSGigE::txKick()
2032 {
2033 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
2034 NsTxStateStrings[txState]);
2035
2036 next:
2037 if (clock) {
2038 if (txKickTick > curTick) {
2039 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
2040 txKickTick);
2041 goto exit;
2042 }
2043
2044 // Go to the next state machine clock tick.
2045 txKickTick = curTick + cycles(1);
2046 }
2047
2048 switch(txDmaState) {
2049 case dmaReadWaiting:
2050 if (doTxDmaRead())
2051 goto exit;
2052 break;
2053 case dmaWriteWaiting:
2054 if (doTxDmaWrite())
2055 goto exit;
2056 break;
2057 default:
2058 break;
2059 }
2060
2061 switch (txState) {
2062 case txIdle:
2063 if (!txEnable) {
2064 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
2065 goto exit;
2066 }
2067
2068 if (CTDD) {
2069 txState = txDescRefr;
2070
2071 txDmaAddr = regs.txdp & 0x3fffffff;
2072 txDmaData = &txDescCache + offsetof(ns_desc, link);
2073 txDmaLen = sizeof(txDescCache.link);
2074 txDmaFree = dmaDescFree;
2075
2076 descDmaReads++;
2077 descDmaRdBytes += txDmaLen;
2078
2079 if (doTxDmaRead())
2080 goto exit;
2081
2082 } else {
2083 txState = txDescRead;
2084
2085 txDmaAddr = regs.txdp & 0x3fffffff;
2086 txDmaData = &txDescCache;
2087 txDmaLen = sizeof(ns_desc);
2088 txDmaFree = dmaDescFree;
2089
2090 descDmaReads++;
2091 descDmaRdBytes += txDmaLen;
2092
2093 if (doTxDmaRead())
2094 goto exit;
2095 }
2096 break;
2097
2098 case txDescRefr:
2099 if (txDmaState != dmaIdle)
2100 goto exit;
2101
2102 txState = txAdvance;
2103 break;
2104
2105 case txDescRead:
2106 if (txDmaState != dmaIdle)
2107 goto exit;
2108
2109 DPRINTF(EthernetDesc, "txDescCache: addr=%08x read descriptor\n",
2110 regs.txdp & 0x3fffffff);
2111 DPRINTF(EthernetDesc,
2112 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
2113 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
2114 txDescCache.extsts);
2115
2116 if (txDescCache.cmdsts & CMDSTS_OWN) {
2117 txState = txFifoBlock;
2118 txFragPtr = txDescCache.bufptr;
2119 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
2120 } else {
2121 devIntrPost(ISR_TXIDLE);
2122 txState = txIdle;
2123 goto exit;
2124 }
2125 break;
2126
2127 case txFifoBlock:
2128 if (!txPacket) {
2129 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2130 txPacket = new PacketData(16384);
2131 txPacketBufPtr = txPacket->data;
2132 }
2133
2134 if (txDescCnt == 0) {
2135 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2136 if (txDescCache.cmdsts & CMDSTS_MORE) {
2137 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2138 txState = txDescWrite;
2139
2140 txDescCache.cmdsts &= ~CMDSTS_OWN;
2141
2142 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2143 txDmaAddr &= 0x3fffffff;
2144 txDmaData = &(txDescCache.cmdsts);
2145 txDmaLen = sizeof(txDescCache.cmdsts);
2146 txDmaFree = dmaDescFree;
2147
2148 if (doTxDmaWrite())
2149 goto exit;
2150
2151 } else { /* this packet is totally done */
2152 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2153 /* deal with the the packet that just finished */
2154 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2155 IpPtr ip(txPacket);
2156 if (txDescCache.extsts & EXTSTS_UDPPKT) {
2157 UdpPtr udp(ip);
2158 udp->sum(0);
2159 udp->sum(cksum(udp));
2160 txUdpChecksums++;
2161 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
2162 TcpPtr tcp(ip);
2163 tcp->sum(0);
2164 tcp->sum(cksum(tcp));
2165 txTcpChecksums++;
2166 }
2167 if (txDescCache.extsts & EXTSTS_IPPKT) {
2168 ip->sum(0);
2169 ip->sum(cksum(ip));
2170 txIpChecksums++;
2171 }
2172 }
2173
2174 txPacket->length = txPacketBufPtr - txPacket->data;
2175 // this is just because the receive can't handle a
2176 // packet bigger want to make sure
2177 assert(txPacket->length <= 1514);
2178 #ifndef NDEBUG
2179 bool success =
2180 #endif
2181 txFifo.push(txPacket);
2182 assert(success);
2183
2184 /*
2185 * this following section is not tqo spec, but
2186 * functionally shouldn't be any different. normally,
2187 * the chip will wait til the transmit has occurred
2188 * before writing back the descriptor because it has
2189 * to wait to see that it was successfully transmitted
2190 * to decide whether to set CMDSTS_OK or not.
2191 * however, in the simulator since it is always
2192 * successfully transmitted, and writing it exactly to
2193 * spec would complicate the code, we just do it here
2194 */
2195
2196 txDescCache.cmdsts &= ~CMDSTS_OWN;
2197 txDescCache.cmdsts |= CMDSTS_OK;
2198
2199 DPRINTF(EthernetDesc,
2200 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2201 txDescCache.cmdsts, txDescCache.extsts);
2202
2203 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2204 txDmaAddr &= 0x3fffffff;
2205 txDmaData = &(txDescCache.cmdsts);
2206 txDmaLen = sizeof(txDescCache.cmdsts) +
2207 sizeof(txDescCache.extsts);
2208 txDmaFree = dmaDescFree;
2209
2210 descDmaWrites++;
2211 descDmaWrBytes += txDmaLen;
2212
2213 transmit();
2214 txPacket = 0;
2215
2216 if (!txEnable) {
2217 DPRINTF(EthernetSM, "halting TX state machine\n");
2218 txState = txIdle;
2219 goto exit;
2220 } else
2221 txState = txAdvance;
2222
2223 if (doTxDmaWrite())
2224 goto exit;
2225 }
2226 } else {
2227 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2228 if (!txFifo.full()) {
2229 txState = txFragRead;
2230
2231 /*
2232 * The number of bytes transferred is either whatever
2233 * is left in the descriptor (txDescCnt), or if there
2234 * is not enough room in the fifo, just whatever room
2235 * is left in the fifo
2236 */
2237 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2238
2239 txDmaAddr = txFragPtr & 0x3fffffff;
2240 txDmaData = txPacketBufPtr;
2241 txDmaLen = txXferLen;
2242 txDmaFree = dmaDataFree;
2243
2244 if (doTxDmaRead())
2245 goto exit;
2246 } else {
2247 txState = txFifoBlock;
2248 transmit();
2249
2250 goto exit;
2251 }
2252
2253 }
2254 break;
2255
2256 case txFragRead:
2257 if (txDmaState != dmaIdle)
2258 goto exit;
2259
2260 txPacketBufPtr += txXferLen;
2261 txFragPtr += txXferLen;
2262 txDescCnt -= txXferLen;
2263 txFifo.reserve(txXferLen);
2264
2265 txState = txFifoBlock;
2266 break;
2267
2268 case txDescWrite:
2269 if (txDmaState != dmaIdle)
2270 goto exit;
2271
2272 if (txDescCache.cmdsts & CMDSTS_INTR)
2273 devIntrPost(ISR_TXDESC);
2274
2275 if (!txEnable) {
2276 DPRINTF(EthernetSM, "halting TX state machine\n");
2277 txState = txIdle;
2278 goto exit;
2279 } else
2280 txState = txAdvance;
2281 break;
2282
2283 case txAdvance:
2284 if (txDescCache.link == 0) {
2285 devIntrPost(ISR_TXIDLE);
2286 txState = txIdle;
2287 goto exit;
2288 } else {
2289 txState = txDescRead;
2290 regs.txdp = txDescCache.link;
2291 CTDD = false;
2292
2293 txDmaAddr = txDescCache.link & 0x3fffffff;
2294 txDmaData = &txDescCache;
2295 txDmaLen = sizeof(ns_desc);
2296 txDmaFree = dmaDescFree;
2297
2298 if (doTxDmaRead())
2299 goto exit;
2300 }
2301 break;
2302
2303 default:
2304 panic("invalid state");
2305 }
2306
2307 DPRINTF(EthernetSM, "entering next txState=%s\n",
2308 NsTxStateStrings[txState]);
2309 goto next;
2310
2311 exit:
2312 /**
2313 * @todo do we want to schedule a future kick?
2314 */
2315 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2316 NsTxStateStrings[txState]);
2317
2318 if (clock && !txKickEvent.scheduled())
2319 txKickEvent.schedule(txKickTick);
2320 }
2321
2322 /**
2323 * Advance the EEPROM state machine
2324 * Called on rising edge of EEPROM clock bit in MEAR
2325 */
2326 void
2327 NSGigE::eepromKick()
2328 {
2329 switch (eepromState) {
2330
2331 case eepromStart:
2332
2333 // Wait for start bit
2334 if (regs.mear & MEAR_EEDI) {
2335 // Set up to get 2 opcode bits
2336 eepromState = eepromGetOpcode;
2337 eepromBitsToRx = 2;
2338 eepromOpcode = 0;
2339 }
2340 break;
2341
2342 case eepromGetOpcode:
2343 eepromOpcode <<= 1;
2344 eepromOpcode += (regs.mear & MEAR_EEDI) ? 1 : 0;
2345 --eepromBitsToRx;
2346
2347 // Done getting opcode
2348 if (eepromBitsToRx == 0) {
2349 if (eepromOpcode != EEPROM_READ)
2350 panic("only EEPROM reads are implemented!");
2351
2352 // Set up to get address
2353 eepromState = eepromGetAddress;
2354 eepromBitsToRx = 6;
2355 eepromAddress = 0;
2356 }
2357 break;
2358
2359 case eepromGetAddress:
2360 eepromAddress <<= 1;
2361 eepromAddress += (regs.mear & MEAR_EEDI) ? 1 : 0;
2362 --eepromBitsToRx;
2363
2364 // Done getting address
2365 if (eepromBitsToRx == 0) {
2366
2367 if (eepromAddress >= EEPROM_SIZE)
2368 panic("EEPROM read access out of range!");
2369
2370 switch (eepromAddress) {
2371
2372 case EEPROM_PMATCH2_ADDR:
2373 eepromData = rom.perfectMatch[5];
2374 eepromData <<= 8;
2375 eepromData += rom.perfectMatch[4];
2376 break;
2377
2378 case EEPROM_PMATCH1_ADDR:
2379 eepromData = rom.perfectMatch[3];
2380 eepromData <<= 8;
2381 eepromData += rom.perfectMatch[2];
2382 break;
2383
2384 case EEPROM_PMATCH0_ADDR:
2385 eepromData = rom.perfectMatch[1];
2386 eepromData <<= 8;
2387 eepromData += rom.perfectMatch[0];
2388 break;
2389
2390 default:
2391 panic("FreeBSD driver only uses EEPROM to read PMATCH!");
2392 }
2393 // Set up to read data
2394 eepromState = eepromRead;
2395 eepromBitsToRx = 16;
2396
2397 // Clear data in bit
2398 regs.mear &= ~MEAR_EEDI;
2399 }
2400 break;
2401
2402 case eepromRead:
2403 // Clear Data Out bit
2404 regs.mear &= ~MEAR_EEDO;
2405 // Set bit to value of current EEPROM bit
2406 regs.mear |= (eepromData & 0x8000) ? MEAR_EEDO : 0x0;
2407
2408 eepromData <<= 1;
2409 --eepromBitsToRx;
2410
2411 // All done
2412 if (eepromBitsToRx == 0) {
2413 eepromState = eepromStart;
2414 }
2415 break;
2416
2417 default:
2418 panic("invalid EEPROM state");
2419 }
2420
2421 }
2422
2423 void
2424 NSGigE::transferDone()
2425 {
2426 if (txFifo.empty()) {
2427 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2428 return;
2429 }
2430
2431 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2432
2433 if (txEvent.scheduled())
2434 txEvent.reschedule(curTick + cycles(1));
2435 else
2436 txEvent.schedule(curTick + cycles(1));
2437 }
2438
2439 bool
2440 NSGigE::rxFilter(const PacketPtr &packet)
2441 {
2442 EthPtr eth = packet;
2443 bool drop = true;
2444 string type;
2445
2446 const EthAddr &dst = eth->dst();
2447 if (dst.unicast()) {
2448 // If we're accepting all unicast addresses
2449 if (acceptUnicast)
2450 drop = false;
2451
2452 // If we make a perfect match
2453 if (acceptPerfect && dst == rom.perfectMatch)
2454 drop = false;
2455
2456 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2457 drop = false;
2458
2459 } else if (dst.broadcast()) {
2460 // if we're accepting broadcasts
2461 if (acceptBroadcast)
2462 drop = false;
2463
2464 } else if (dst.multicast()) {
2465 // if we're accepting all multicasts
2466 if (acceptMulticast)
2467 drop = false;
2468
2469 // Multicast hashing faked - all packets accepted
2470 if (multicastHashEnable)
2471 drop = false;
2472 }
2473
2474 if (drop) {
2475 DPRINTF(Ethernet, "rxFilter drop\n");
2476 DDUMP(EthernetData, packet->data, packet->length);
2477 }
2478
2479 return drop;
2480 }
2481
2482 bool
2483 NSGigE::recvPacket(PacketPtr packet)
2484 {
2485 rxBytes += packet->length;
2486 rxPackets++;
2487
2488 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2489 rxFifo.avail());
2490
2491 if (!rxEnable) {
2492 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2493 interface->recvDone();
2494 return true;
2495 }
2496
2497 if (!rxFilterEnable) {
2498 DPRINTF(Ethernet,
2499 "receive packet filtering disabled . . . packet dropped\n");
2500 interface->recvDone();
2501 return true;
2502 }
2503
2504 if (rxFilter(packet)) {
2505 DPRINTF(Ethernet, "packet filtered...dropped\n");
2506 interface->recvDone();
2507 return true;
2508 }
2509
2510 if (rxFifo.avail() < packet->length) {
2511 #if TRACING_ON
2512 IpPtr ip(packet);
2513 TcpPtr tcp(ip);
2514 if (ip) {
2515 DPRINTF(Ethernet,
2516 "packet won't fit in receive buffer...pkt ID %d dropped\n",
2517 ip->id());
2518 if (tcp) {
2519 DPRINTF(Ethernet, "Seq=%d\n", tcp->seq());
2520 }
2521 }
2522 #endif
2523 droppedPackets++;
2524 devIntrPost(ISR_RXORN);
2525 return false;
2526 }
2527
2528 rxFifo.push(packet);
2529 interface->recvDone();
2530
2531 rxKick();
2532 return true;
2533 }
2534
2535 //=====================================================================
2536 //
2537 //
2538 void
2539 NSGigE::serialize(ostream &os)
2540 {
2541 // Serialize the PciDev base class
2542 PciDev::serialize(os);
2543
2544 /*
2545 * Finalize any DMA events now.
2546 */
2547 if (rxDmaReadEvent.scheduled())
2548 rxDmaReadCopy();
2549 if (rxDmaWriteEvent.scheduled())
2550 rxDmaWriteCopy();
2551 if (txDmaReadEvent.scheduled())
2552 txDmaReadCopy();
2553 if (txDmaWriteEvent.scheduled())
2554 txDmaWriteCopy();
2555
2556 /*
2557 * Serialize the device registers
2558 */
2559 SERIALIZE_SCALAR(regs.command);
2560 SERIALIZE_SCALAR(regs.config);
2561 SERIALIZE_SCALAR(regs.mear);
2562 SERIALIZE_SCALAR(regs.ptscr);
2563 SERIALIZE_SCALAR(regs.isr);
2564 SERIALIZE_SCALAR(regs.imr);
2565 SERIALIZE_SCALAR(regs.ier);
2566 SERIALIZE_SCALAR(regs.ihr);
2567 SERIALIZE_SCALAR(regs.txdp);
2568 SERIALIZE_SCALAR(regs.txdp_hi);
2569 SERIALIZE_SCALAR(regs.txcfg);
2570 SERIALIZE_SCALAR(regs.gpior);
2571 SERIALIZE_SCALAR(regs.rxdp);
2572 SERIALIZE_SCALAR(regs.rxdp_hi);
2573 SERIALIZE_SCALAR(regs.rxcfg);
2574 SERIALIZE_SCALAR(regs.pqcr);
2575 SERIALIZE_SCALAR(regs.wcsr);
2576 SERIALIZE_SCALAR(regs.pcr);
2577 SERIALIZE_SCALAR(regs.rfcr);
2578 SERIALIZE_SCALAR(regs.rfdr);
2579 SERIALIZE_SCALAR(regs.brar);
2580 SERIALIZE_SCALAR(regs.brdr);
2581 SERIALIZE_SCALAR(regs.srr);
2582 SERIALIZE_SCALAR(regs.mibc);
2583 SERIALIZE_SCALAR(regs.vrcr);
2584 SERIALIZE_SCALAR(regs.vtcr);
2585 SERIALIZE_SCALAR(regs.vdr);
2586 SERIALIZE_SCALAR(regs.ccsr);
2587 SERIALIZE_SCALAR(regs.tbicr);
2588 SERIALIZE_SCALAR(regs.tbisr);
2589 SERIALIZE_SCALAR(regs.tanar);
2590 SERIALIZE_SCALAR(regs.tanlpar);
2591 SERIALIZE_SCALAR(regs.taner);
2592 SERIALIZE_SCALAR(regs.tesr);
2593
2594 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2595 SERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2596
2597 SERIALIZE_SCALAR(ioEnable);
2598
2599 /*
2600 * Serialize the data Fifos
2601 */
2602 rxFifo.serialize("rxFifo", os);
2603 txFifo.serialize("txFifo", os);
2604
2605 /*
2606 * Serialize the various helper variables
2607 */
2608 bool txPacketExists = txPacket;
2609 SERIALIZE_SCALAR(txPacketExists);
2610 if (txPacketExists) {
2611 txPacket->length = txPacketBufPtr - txPacket->data;
2612 txPacket->serialize("txPacket", os);
2613 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2614 SERIALIZE_SCALAR(txPktBufPtr);
2615 }
2616
2617 bool rxPacketExists = rxPacket;
2618 SERIALIZE_SCALAR(rxPacketExists);
2619 if (rxPacketExists) {
2620 rxPacket->serialize("rxPacket", os);
2621 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2622 SERIALIZE_SCALAR(rxPktBufPtr);
2623 }
2624
2625 SERIALIZE_SCALAR(txXferLen);
2626 SERIALIZE_SCALAR(rxXferLen);
2627
2628 /*
2629 * Serialize DescCaches
2630 */
2631 SERIALIZE_SCALAR(txDescCache.link);
2632 SERIALIZE_SCALAR(txDescCache.bufptr);
2633 SERIALIZE_SCALAR(txDescCache.cmdsts);
2634 SERIALIZE_SCALAR(txDescCache.extsts);
2635 SERIALIZE_SCALAR(rxDescCache.link);
2636 SERIALIZE_SCALAR(rxDescCache.bufptr);
2637 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2638 SERIALIZE_SCALAR(rxDescCache.extsts);
2639 SERIALIZE_SCALAR(extstsEnable);
2640
2641 /*
2642 * Serialize tx state machine
2643 */
2644 int txState = this->txState;
2645 SERIALIZE_SCALAR(txState);
2646 SERIALIZE_SCALAR(txEnable);
2647 SERIALIZE_SCALAR(CTDD);
2648 SERIALIZE_SCALAR(txFragPtr);
2649 SERIALIZE_SCALAR(txDescCnt);
2650 int txDmaState = this->txDmaState;
2651 SERIALIZE_SCALAR(txDmaState);
2652 SERIALIZE_SCALAR(txKickTick);
2653
2654 /*
2655 * Serialize rx state machine
2656 */
2657 int rxState = this->rxState;
2658 SERIALIZE_SCALAR(rxState);
2659 SERIALIZE_SCALAR(rxEnable);
2660 SERIALIZE_SCALAR(CRDD);
2661 SERIALIZE_SCALAR(rxPktBytes);
2662 SERIALIZE_SCALAR(rxFragPtr);
2663 SERIALIZE_SCALAR(rxDescCnt);
2664 int rxDmaState = this->rxDmaState;
2665 SERIALIZE_SCALAR(rxDmaState);
2666 SERIALIZE_SCALAR(rxKickTick);
2667
2668 /*
2669 * Serialize EEPROM state machine
2670 */
2671 int eepromState = this->eepromState;
2672 SERIALIZE_SCALAR(eepromState);
2673 SERIALIZE_SCALAR(eepromClk);
2674 SERIALIZE_SCALAR(eepromBitsToRx);
2675 SERIALIZE_SCALAR(eepromOpcode);
2676 SERIALIZE_SCALAR(eepromAddress);
2677 SERIALIZE_SCALAR(eepromData);
2678
2679 /*
2680 * If there's a pending transmit, store the time so we can
2681 * reschedule it later
2682 */
2683 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2684 SERIALIZE_SCALAR(transmitTick);
2685
2686 /*
2687 * receive address filter settings
2688 */
2689 SERIALIZE_SCALAR(rxFilterEnable);
2690 SERIALIZE_SCALAR(acceptBroadcast);
2691 SERIALIZE_SCALAR(acceptMulticast);
2692 SERIALIZE_SCALAR(acceptUnicast);
2693 SERIALIZE_SCALAR(acceptPerfect);
2694 SERIALIZE_SCALAR(acceptArp);
2695 SERIALIZE_SCALAR(multicastHashEnable);
2696
2697 /*
2698 * Keep track of pending interrupt status.
2699 */
2700 SERIALIZE_SCALAR(intrTick);
2701 SERIALIZE_SCALAR(cpuPendingIntr);
2702 Tick intrEventTick = 0;
2703 if (intrEvent)
2704 intrEventTick = intrEvent->when();
2705 SERIALIZE_SCALAR(intrEventTick);
2706
2707 }
2708
2709 void
2710 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2711 {
2712 // Unserialize the PciDev base class
2713 PciDev::unserialize(cp, section);
2714
2715 UNSERIALIZE_SCALAR(regs.command);
2716 UNSERIALIZE_SCALAR(regs.config);
2717 UNSERIALIZE_SCALAR(regs.mear);
2718 UNSERIALIZE_SCALAR(regs.ptscr);
2719 UNSERIALIZE_SCALAR(regs.isr);
2720 UNSERIALIZE_SCALAR(regs.imr);
2721 UNSERIALIZE_SCALAR(regs.ier);
2722 UNSERIALIZE_SCALAR(regs.ihr);
2723 UNSERIALIZE_SCALAR(regs.txdp);
2724 UNSERIALIZE_SCALAR(regs.txdp_hi);
2725 UNSERIALIZE_SCALAR(regs.txcfg);
2726 UNSERIALIZE_SCALAR(regs.gpior);
2727 UNSERIALIZE_SCALAR(regs.rxdp);
2728 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2729 UNSERIALIZE_SCALAR(regs.rxcfg);
2730 UNSERIALIZE_SCALAR(regs.pqcr);
2731 UNSERIALIZE_SCALAR(regs.wcsr);
2732 UNSERIALIZE_SCALAR(regs.pcr);
2733 UNSERIALIZE_SCALAR(regs.rfcr);
2734 UNSERIALIZE_SCALAR(regs.rfdr);
2735 UNSERIALIZE_SCALAR(regs.brar);
2736 UNSERIALIZE_SCALAR(regs.brdr);
2737 UNSERIALIZE_SCALAR(regs.srr);
2738 UNSERIALIZE_SCALAR(regs.mibc);
2739 UNSERIALIZE_SCALAR(regs.vrcr);
2740 UNSERIALIZE_SCALAR(regs.vtcr);
2741 UNSERIALIZE_SCALAR(regs.vdr);
2742 UNSERIALIZE_SCALAR(regs.ccsr);
2743 UNSERIALIZE_SCALAR(regs.tbicr);
2744 UNSERIALIZE_SCALAR(regs.tbisr);
2745 UNSERIALIZE_SCALAR(regs.tanar);
2746 UNSERIALIZE_SCALAR(regs.tanlpar);
2747 UNSERIALIZE_SCALAR(regs.taner);
2748 UNSERIALIZE_SCALAR(regs.tesr);
2749
2750 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2751 UNSERIALIZE_ARRAY(rom.filterHash, FHASH_SIZE);
2752
2753 UNSERIALIZE_SCALAR(ioEnable);
2754
2755 /*
2756 * unserialize the data fifos
2757 */
2758 rxFifo.unserialize("rxFifo", cp, section);
2759 txFifo.unserialize("txFifo", cp, section);
2760
2761 /*
2762 * unserialize the various helper variables
2763 */
2764 bool txPacketExists;
2765 UNSERIALIZE_SCALAR(txPacketExists);
2766 if (txPacketExists) {
2767 txPacket = new PacketData(16384);
2768 txPacket->unserialize("txPacket", cp, section);
2769 uint32_t txPktBufPtr;
2770 UNSERIALIZE_SCALAR(txPktBufPtr);
2771 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2772 } else
2773 txPacket = 0;
2774
2775 bool rxPacketExists;
2776 UNSERIALIZE_SCALAR(rxPacketExists);
2777 rxPacket = 0;
2778 if (rxPacketExists) {
2779 rxPacket = new PacketData(16384);
2780 rxPacket->unserialize("rxPacket", cp, section);
2781 uint32_t rxPktBufPtr;
2782 UNSERIALIZE_SCALAR(rxPktBufPtr);
2783 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2784 } else
2785 rxPacket = 0;
2786
2787 UNSERIALIZE_SCALAR(txXferLen);
2788 UNSERIALIZE_SCALAR(rxXferLen);
2789
2790 /*
2791 * Unserialize DescCaches
2792 */
2793 UNSERIALIZE_SCALAR(txDescCache.link);
2794 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2795 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2796 UNSERIALIZE_SCALAR(txDescCache.extsts);
2797 UNSERIALIZE_SCALAR(rxDescCache.link);
2798 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2799 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2800 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2801 UNSERIALIZE_SCALAR(extstsEnable);
2802
2803 /*
2804 * unserialize tx state machine
2805 */
2806 int txState;
2807 UNSERIALIZE_SCALAR(txState);
2808 this->txState = (TxState) txState;
2809 UNSERIALIZE_SCALAR(txEnable);
2810 UNSERIALIZE_SCALAR(CTDD);
2811 UNSERIALIZE_SCALAR(txFragPtr);
2812 UNSERIALIZE_SCALAR(txDescCnt);
2813 int txDmaState;
2814 UNSERIALIZE_SCALAR(txDmaState);
2815 this->txDmaState = (DmaState) txDmaState;
2816 UNSERIALIZE_SCALAR(txKickTick);
2817 if (txKickTick)
2818 txKickEvent.schedule(txKickTick);
2819
2820 /*
2821 * unserialize rx state machine
2822 */
2823 int rxState;
2824 UNSERIALIZE_SCALAR(rxState);
2825 this->rxState = (RxState) rxState;
2826 UNSERIALIZE_SCALAR(rxEnable);
2827 UNSERIALIZE_SCALAR(CRDD);
2828 UNSERIALIZE_SCALAR(rxPktBytes);
2829 UNSERIALIZE_SCALAR(rxFragPtr);
2830 UNSERIALIZE_SCALAR(rxDescCnt);
2831 int rxDmaState;
2832 UNSERIALIZE_SCALAR(rxDmaState);
2833 this->rxDmaState = (DmaState) rxDmaState;
2834 UNSERIALIZE_SCALAR(rxKickTick);
2835 if (rxKickTick)
2836 rxKickEvent.schedule(rxKickTick);
2837
2838 /*
2839 * Unserialize EEPROM state machine
2840 */
2841 int eepromState;
2842 UNSERIALIZE_SCALAR(eepromState);
2843 this->eepromState = (EEPROMState) eepromState;
2844 UNSERIALIZE_SCALAR(eepromClk);
2845 UNSERIALIZE_SCALAR(eepromBitsToRx);
2846 UNSERIALIZE_SCALAR(eepromOpcode);
2847 UNSERIALIZE_SCALAR(eepromAddress);
2848 UNSERIALIZE_SCALAR(eepromData);
2849
2850 /*
2851 * If there's a pending transmit, reschedule it now
2852 */
2853 Tick transmitTick;
2854 UNSERIALIZE_SCALAR(transmitTick);
2855 if (transmitTick)
2856 txEvent.schedule(curTick + transmitTick);
2857
2858 /*
2859 * unserialize receive address filter settings
2860 */
2861 UNSERIALIZE_SCALAR(rxFilterEnable);
2862 UNSERIALIZE_SCALAR(acceptBroadcast);
2863 UNSERIALIZE_SCALAR(acceptMulticast);
2864 UNSERIALIZE_SCALAR(acceptUnicast);
2865 UNSERIALIZE_SCALAR(acceptPerfect);
2866 UNSERIALIZE_SCALAR(acceptArp);
2867 UNSERIALIZE_SCALAR(multicastHashEnable);
2868
2869 /*
2870 * Keep track of pending interrupt status.
2871 */
2872 UNSERIALIZE_SCALAR(intrTick);
2873 UNSERIALIZE_SCALAR(cpuPendingIntr);
2874 Tick intrEventTick;
2875 UNSERIALIZE_SCALAR(intrEventTick);
2876 if (intrEventTick) {
2877 intrEvent = new IntrEvent(this, true);
2878 intrEvent->schedule(intrEventTick);
2879 }
2880
2881 /*
2882 * re-add addrRanges to bus bridges
2883 */
2884 if (pioInterface) {
2885 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2886 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2887 }
2888 }
2889
2890 Tick
2891 NSGigE::cacheAccess(MemReqPtr &req)
2892 {
2893 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2894 req->paddr, req->paddr - addr);
2895 return curTick + pioLatency;
2896 }
2897
2898 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2899
2900 SimObjectParam<EtherInt *> peer;
2901 SimObjectParam<NSGigE *> device;
2902
2903 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2904
2905 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2906
2907 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2908 INIT_PARAM(device, "Ethernet device of this interface")
2909
2910 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2911
2912 CREATE_SIM_OBJECT(NSGigEInt)
2913 {
2914 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2915
2916 EtherInt *p = (EtherInt *)peer;
2917 if (p) {
2918 dev_int->setPeer(p);
2919 p->setPeer(dev_int);
2920 }
2921
2922 return dev_int;
2923 }
2924
2925 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2926
2927
2928 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2929
2930 Param<Addr> addr;
2931 Param<Tick> clock;
2932 Param<Tick> tx_delay;
2933 Param<Tick> rx_delay;
2934 Param<Tick> intr_delay;
2935 SimObjectParam<MemoryController *> mmu;
2936 SimObjectParam<PhysicalMemory *> physmem;
2937 Param<bool> rx_filter;
2938 Param<string> hardware_address;
2939 SimObjectParam<Bus*> io_bus;
2940 SimObjectParam<Bus*> payload_bus;
2941 SimObjectParam<HierParams *> hier;
2942 Param<Tick> pio_latency;
2943 Param<bool> dma_desc_free;
2944 Param<bool> dma_data_free;
2945 Param<Tick> dma_read_delay;
2946 Param<Tick> dma_write_delay;
2947 Param<Tick> dma_read_factor;
2948 Param<Tick> dma_write_factor;
2949 SimObjectParam<PciConfigAll *> configspace;
2950 SimObjectParam<PciConfigData *> configdata;
2951 SimObjectParam<Platform *> platform;
2952 Param<uint32_t> pci_bus;
2953 Param<uint32_t> pci_dev;
2954 Param<uint32_t> pci_func;
2955 Param<uint32_t> tx_fifo_size;
2956 Param<uint32_t> rx_fifo_size;
2957 Param<uint32_t> m5reg;
2958 Param<bool> dma_no_allocate;
2959
2960 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2961
2962 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2963
2964 INIT_PARAM(addr, "Device Address"),
2965 INIT_PARAM(clock, "State machine processor frequency"),
2966 INIT_PARAM(tx_delay, "Transmit Delay"),
2967 INIT_PARAM(rx_delay, "Receive Delay"),
2968 INIT_PARAM(intr_delay, "Interrupt Delay in microseconds"),
2969 INIT_PARAM(mmu, "Memory Controller"),
2970 INIT_PARAM(physmem, "Physical Memory"),
2971 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2972 INIT_PARAM(hardware_address, "Ethernet Hardware Address"),
2973 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL),
2974 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2975 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2976 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2977 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2978 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2979 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2980 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2981 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2982 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2983 INIT_PARAM(configspace, "PCI Configspace"),
2984 INIT_PARAM(configdata, "PCI Config data"),
2985 INIT_PARAM(platform, "Platform"),
2986 INIT_PARAM(pci_bus, "PCI bus"),
2987 INIT_PARAM(pci_dev, "PCI device number"),
2988 INIT_PARAM(pci_func, "PCI function code"),
2989 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2990 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072),
2991 INIT_PARAM(m5reg, "m5 register"),
2992 INIT_PARAM_DFLT(dma_no_allocate, "Should DMA reads allocate cache lines", true)
2993
2994 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2995
2996
2997 CREATE_SIM_OBJECT(NSGigE)
2998 {
2999 NSGigE::Params *params = new NSGigE::Params;
3000
3001 params->name = getInstanceName();
3002 params->mmu = mmu;
3003 params->configSpace = configspace;
3004 params->configData = configdata;
3005 params->plat = platform;
3006 params->busNum = pci_bus;
3007 params->deviceNum = pci_dev;
3008 params->functionNum = pci_func;
3009
3010 params->clock = clock;
3011 params->intr_delay = intr_delay;
3012 params->pmem = physmem;
3013 params->tx_delay = tx_delay;
3014 params->rx_delay = rx_delay;
3015 params->hier = hier;
3016 params->header_bus = io_bus;
3017 params->payload_bus = payload_bus;
3018 params->pio_latency = pio_latency;
3019 params->dma_desc_free = dma_desc_free;
3020 params->dma_data_free = dma_data_free;
3021 params->dma_read_delay = dma_read_delay;
3022 params->dma_write_delay = dma_write_delay;
3023 params->dma_read_factor = dma_read_factor;
3024 params->dma_write_factor = dma_write_factor;
3025 params->rx_filter = rx_filter;
3026 params->eaddr = hardware_address;
3027 params->tx_fifo_size = tx_fifo_size;
3028 params->rx_fifo_size = rx_fifo_size;
3029 params->m5reg = m5reg;
3030 params->dma_no_allocate = dma_no_allocate;
3031 return new NSGigE(params);
3032 }
3033
3034 REGISTER_SIM_OBJECT("NSGigE", NSGigE)