Merge zizzer:/z/m5/Bitkeeper/m5
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "dev/dma.hh"
40 #include "dev/etherlink.hh"
41 #include "dev/ns_gige.hh"
42 #include "dev/pciconfigall.hh"
43 #include "mem/bus/bus.hh"
44 #include "mem/bus/dma_interface.hh"
45 #include "mem/bus/pio_interface.hh"
46 #include "mem/bus/pio_interface_impl.hh"
47 #include "mem/functional_mem/memory_control.hh"
48 #include "mem/functional_mem/physical_memory.hh"
49 #include "sim/builder.hh"
50 #include "sim/debug.hh"
51 #include "sim/host.hh"
52 #include "sim/stats.hh"
53 #include "targetarch/vtophys.hh"
54
55 const char *NsRxStateStrings[] =
56 {
57 "rxIdle",
58 "rxDescRefr",
59 "rxDescRead",
60 "rxFifoBlock",
61 "rxFragWrite",
62 "rxDescWrite",
63 "rxAdvance"
64 };
65
66 const char *NsTxStateStrings[] =
67 {
68 "txIdle",
69 "txDescRefr",
70 "txDescRead",
71 "txFifoBlock",
72 "txFragRead",
73 "txDescWrite",
74 "txAdvance"
75 };
76
77 const char *NsDmaState[] =
78 {
79 "dmaIdle",
80 "dmaReading",
81 "dmaWriting",
82 "dmaReadWaiting",
83 "dmaWriteWaiting"
84 };
85
86 using namespace std;
87 using namespace Net;
88
89 ///////////////////////////////////////////////////////////////////////
90 //
91 // NSGigE PCI Device
92 //
93 NSGigE::NSGigE(Params *p)
94 : PciDev(p), ioEnable(false),
95 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
96 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
97 txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false),
98 CTDD(false),
99 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
100 rxEnable(false), CRDD(false), rxPktBytes(0),
101 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
102 rxDmaReadEvent(this), rxDmaWriteEvent(this),
103 txDmaReadEvent(this), txDmaWriteEvent(this),
104 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
105 txDelay(p->tx_delay), rxDelay(p->rx_delay),
106 rxKickTick(0), txKickTick(0),
107 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
108 acceptMulticast(false), acceptUnicast(false),
109 acceptPerfect(false), acceptArp(false),
110 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
111 intrEvent(0), interface(0)
112 {
113 if (p->header_bus) {
114 pioInterface = newPioInterface(name(), p->hier,
115 p->header_bus, this,
116 &NSGigE::cacheAccess);
117
118 pioLatency = p->pio_latency * p->header_bus->clockRatio;
119
120 if (p->payload_bus)
121 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
122 p->header_bus,
123 p->payload_bus, 1);
124 else
125 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
126 p->header_bus,
127 p->header_bus, 1);
128 } else if (p->payload_bus) {
129 pioInterface = newPioInterface(name(), p->hier,
130 p->payload_bus, this,
131 &NSGigE::cacheAccess);
132
133 pioLatency = p->pio_latency * p->payload_bus->clockRatio;
134
135 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
136 p->payload_bus,
137 p->payload_bus, 1);
138 }
139
140
141 intrDelay = US2Ticks(p->intr_delay);
142 dmaReadDelay = p->dma_read_delay;
143 dmaWriteDelay = p->dma_write_delay;
144 dmaReadFactor = p->dma_read_factor;
145 dmaWriteFactor = p->dma_write_factor;
146
147 regsReset();
148 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
149 }
150
151 NSGigE::~NSGigE()
152 {}
153
154 void
155 NSGigE::regStats()
156 {
157 txBytes
158 .name(name() + ".txBytes")
159 .desc("Bytes Transmitted")
160 .prereq(txBytes)
161 ;
162
163 rxBytes
164 .name(name() + ".rxBytes")
165 .desc("Bytes Received")
166 .prereq(rxBytes)
167 ;
168
169 txPackets
170 .name(name() + ".txPackets")
171 .desc("Number of Packets Transmitted")
172 .prereq(txBytes)
173 ;
174
175 rxPackets
176 .name(name() + ".rxPackets")
177 .desc("Number of Packets Received")
178 .prereq(rxBytes)
179 ;
180
181 txIpChecksums
182 .name(name() + ".txIpChecksums")
183 .desc("Number of tx IP Checksums done by device")
184 .precision(0)
185 .prereq(txBytes)
186 ;
187
188 rxIpChecksums
189 .name(name() + ".rxIpChecksums")
190 .desc("Number of rx IP Checksums done by device")
191 .precision(0)
192 .prereq(rxBytes)
193 ;
194
195 txTcpChecksums
196 .name(name() + ".txTcpChecksums")
197 .desc("Number of tx TCP Checksums done by device")
198 .precision(0)
199 .prereq(txBytes)
200 ;
201
202 rxTcpChecksums
203 .name(name() + ".rxTcpChecksums")
204 .desc("Number of rx TCP Checksums done by device")
205 .precision(0)
206 .prereq(rxBytes)
207 ;
208
209 txUdpChecksums
210 .name(name() + ".txUdpChecksums")
211 .desc("Number of tx UDP Checksums done by device")
212 .precision(0)
213 .prereq(txBytes)
214 ;
215
216 rxUdpChecksums
217 .name(name() + ".rxUdpChecksums")
218 .desc("Number of rx UDP Checksums done by device")
219 .precision(0)
220 .prereq(rxBytes)
221 ;
222
223 descDmaReads
224 .name(name() + ".descDMAReads")
225 .desc("Number of descriptors the device read w/ DMA")
226 .precision(0)
227 ;
228
229 descDmaWrites
230 .name(name() + ".descDMAWrites")
231 .desc("Number of descriptors the device wrote w/ DMA")
232 .precision(0)
233 ;
234
235 descDmaRdBytes
236 .name(name() + ".descDmaReadBytes")
237 .desc("number of descriptor bytes read w/ DMA")
238 .precision(0)
239 ;
240
241 descDmaWrBytes
242 .name(name() + ".descDmaWriteBytes")
243 .desc("number of descriptor bytes write w/ DMA")
244 .precision(0)
245 ;
246
247
248 txBandwidth
249 .name(name() + ".txBandwidth")
250 .desc("Transmit Bandwidth (bits/s)")
251 .precision(0)
252 .prereq(txBytes)
253 ;
254
255 rxBandwidth
256 .name(name() + ".rxBandwidth")
257 .desc("Receive Bandwidth (bits/s)")
258 .precision(0)
259 .prereq(rxBytes)
260 ;
261
262 txPacketRate
263 .name(name() + ".txPPS")
264 .desc("Packet Tranmission Rate (packets/s)")
265 .precision(0)
266 .prereq(txBytes)
267 ;
268
269 rxPacketRate
270 .name(name() + ".rxPPS")
271 .desc("Packet Reception Rate (packets/s)")
272 .precision(0)
273 .prereq(rxBytes)
274 ;
275
276 postedSwi
277 .name(name() + ".postedSwi")
278 .desc("number of software interrupts posted to CPU")
279 .precision(0)
280 ;
281
282 totalSwi
283 .name(name() + ".totalSwi")
284 .desc("number of total Swi written to ISR")
285 .precision(0)
286 ;
287
288 coalescedSwi
289 .name(name() + ".coalescedSwi")
290 .desc("average number of Swi's coalesced into each post")
291 .precision(0)
292 ;
293
294 postedRxIdle
295 .name(name() + ".postedRxIdle")
296 .desc("number of rxIdle interrupts posted to CPU")
297 .precision(0)
298 ;
299
300 totalRxIdle
301 .name(name() + ".totalRxIdle")
302 .desc("number of total RxIdle written to ISR")
303 .precision(0)
304 ;
305
306 coalescedRxIdle
307 .name(name() + ".coalescedRxIdle")
308 .desc("average number of RxIdle's coalesced into each post")
309 .precision(0)
310 ;
311
312 postedRxOk
313 .name(name() + ".postedRxOk")
314 .desc("number of RxOk interrupts posted to CPU")
315 .precision(0)
316 ;
317
318 totalRxOk
319 .name(name() + ".totalRxOk")
320 .desc("number of total RxOk written to ISR")
321 .precision(0)
322 ;
323
324 coalescedRxOk
325 .name(name() + ".coalescedRxOk")
326 .desc("average number of RxOk's coalesced into each post")
327 .precision(0)
328 ;
329
330 postedRxDesc
331 .name(name() + ".postedRxDesc")
332 .desc("number of RxDesc interrupts posted to CPU")
333 .precision(0)
334 ;
335
336 totalRxDesc
337 .name(name() + ".totalRxDesc")
338 .desc("number of total RxDesc written to ISR")
339 .precision(0)
340 ;
341
342 coalescedRxDesc
343 .name(name() + ".coalescedRxDesc")
344 .desc("average number of RxDesc's coalesced into each post")
345 .precision(0)
346 ;
347
348 postedTxOk
349 .name(name() + ".postedTxOk")
350 .desc("number of TxOk interrupts posted to CPU")
351 .precision(0)
352 ;
353
354 totalTxOk
355 .name(name() + ".totalTxOk")
356 .desc("number of total TxOk written to ISR")
357 .precision(0)
358 ;
359
360 coalescedTxOk
361 .name(name() + ".coalescedTxOk")
362 .desc("average number of TxOk's coalesced into each post")
363 .precision(0)
364 ;
365
366 postedTxIdle
367 .name(name() + ".postedTxIdle")
368 .desc("number of TxIdle interrupts posted to CPU")
369 .precision(0)
370 ;
371
372 totalTxIdle
373 .name(name() + ".totalTxIdle")
374 .desc("number of total TxIdle written to ISR")
375 .precision(0)
376 ;
377
378 coalescedTxIdle
379 .name(name() + ".coalescedTxIdle")
380 .desc("average number of TxIdle's coalesced into each post")
381 .precision(0)
382 ;
383
384 postedTxDesc
385 .name(name() + ".postedTxDesc")
386 .desc("number of TxDesc interrupts posted to CPU")
387 .precision(0)
388 ;
389
390 totalTxDesc
391 .name(name() + ".totalTxDesc")
392 .desc("number of total TxDesc written to ISR")
393 .precision(0)
394 ;
395
396 coalescedTxDesc
397 .name(name() + ".coalescedTxDesc")
398 .desc("average number of TxDesc's coalesced into each post")
399 .precision(0)
400 ;
401
402 postedRxOrn
403 .name(name() + ".postedRxOrn")
404 .desc("number of RxOrn posted to CPU")
405 .precision(0)
406 ;
407
408 totalRxOrn
409 .name(name() + ".totalRxOrn")
410 .desc("number of total RxOrn written to ISR")
411 .precision(0)
412 ;
413
414 coalescedRxOrn
415 .name(name() + ".coalescedRxOrn")
416 .desc("average number of RxOrn's coalesced into each post")
417 .precision(0)
418 ;
419
420 coalescedTotal
421 .name(name() + ".coalescedTotal")
422 .desc("average number of interrupts coalesced into each post")
423 .precision(0)
424 ;
425
426 postedInterrupts
427 .name(name() + ".postedInterrupts")
428 .desc("number of posts to CPU")
429 .precision(0)
430 ;
431
432 droppedPackets
433 .name(name() + ".droppedPackets")
434 .desc("number of packets dropped")
435 .precision(0)
436 ;
437
438 coalescedSwi = totalSwi / postedInterrupts;
439 coalescedRxIdle = totalRxIdle / postedInterrupts;
440 coalescedRxOk = totalRxOk / postedInterrupts;
441 coalescedRxDesc = totalRxDesc / postedInterrupts;
442 coalescedTxOk = totalTxOk / postedInterrupts;
443 coalescedTxIdle = totalTxIdle / postedInterrupts;
444 coalescedTxDesc = totalTxDesc / postedInterrupts;
445 coalescedRxOrn = totalRxOrn / postedInterrupts;
446
447 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + totalTxOk
448 + totalTxIdle + totalTxDesc + totalRxOrn) / postedInterrupts;
449
450 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
451 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
452 txPacketRate = txPackets / simSeconds;
453 rxPacketRate = rxPackets / simSeconds;
454 }
455
456 /**
457 * This is to read the PCI general configuration registers
458 */
459 void
460 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
461 {
462 if (offset < PCI_DEVICE_SPECIFIC)
463 PciDev::ReadConfig(offset, size, data);
464 else
465 panic("Device specific PCI config space not implemented!\n");
466 }
467
468 /**
469 * This is to write to the PCI general configuration registers
470 */
471 void
472 NSGigE::WriteConfig(int offset, int size, uint32_t data)
473 {
474 if (offset < PCI_DEVICE_SPECIFIC)
475 PciDev::WriteConfig(offset, size, data);
476 else
477 panic("Device specific PCI config space not implemented!\n");
478
479 // Need to catch writes to BARs to update the PIO interface
480 switch (offset) {
481 // seems to work fine without all these PCI settings, but i
482 // put in the IO to double check, an assertion will fail if we
483 // need to properly implement it
484 case PCI_COMMAND:
485 if (config.data[offset] & PCI_CMD_IOSE)
486 ioEnable = true;
487 else
488 ioEnable = false;
489
490 #if 0
491 if (config.data[offset] & PCI_CMD_BME) {
492 bmEnabled = true;
493 }
494 else {
495 bmEnabled = false;
496 }
497
498 if (config.data[offset] & PCI_CMD_MSE) {
499 memEnable = true;
500 }
501 else {
502 memEnable = false;
503 }
504 #endif
505 break;
506
507 case PCI0_BASE_ADDR0:
508 if (BARAddrs[0] != 0) {
509 if (pioInterface)
510 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
511
512 BARAddrs[0] &= EV5::PAddrUncachedMask;
513 }
514 break;
515 case PCI0_BASE_ADDR1:
516 if (BARAddrs[1] != 0) {
517 if (pioInterface)
518 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
519
520 BARAddrs[1] &= EV5::PAddrUncachedMask;
521 }
522 break;
523 }
524 }
525
526 /**
527 * This reads the device registers, which are detailed in the NS83820
528 * spec sheet
529 */
530 Fault
531 NSGigE::read(MemReqPtr &req, uint8_t *data)
532 {
533 assert(ioEnable);
534
535 //The mask is to give you only the offset into the device register file
536 Addr daddr = req->paddr & 0xfff;
537 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
538 daddr, req->paddr, req->vaddr, req->size);
539
540
541 // there are some reserved registers, you can see ns_gige_reg.h and
542 // the spec sheet for details
543 if (daddr > LAST && daddr <= RESERVED) {
544 panic("Accessing reserved register");
545 } else if (daddr > RESERVED && daddr <= 0x3FC) {
546 ReadConfig(daddr & 0xff, req->size, data);
547 return No_Fault;
548 } else if (daddr >= MIB_START && daddr <= MIB_END) {
549 // don't implement all the MIB's. hopefully the kernel
550 // doesn't actually DEPEND upon their values
551 // MIB are just hardware stats keepers
552 uint32_t &reg = *(uint32_t *) data;
553 reg = 0;
554 return No_Fault;
555 } else if (daddr > 0x3FC)
556 panic("Something is messed up!\n");
557
558 switch (req->size) {
559 case sizeof(uint32_t):
560 {
561 uint32_t &reg = *(uint32_t *)data;
562
563 switch (daddr) {
564 case CR:
565 reg = regs.command;
566 //these are supposed to be cleared on a read
567 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
568 break;
569
570 case CFG:
571 reg = regs.config;
572 break;
573
574 case MEAR:
575 reg = regs.mear;
576 break;
577
578 case PTSCR:
579 reg = regs.ptscr;
580 break;
581
582 case ISR:
583 reg = regs.isr;
584 devIntrClear(ISR_ALL);
585 break;
586
587 case IMR:
588 reg = regs.imr;
589 break;
590
591 case IER:
592 reg = regs.ier;
593 break;
594
595 case IHR:
596 reg = regs.ihr;
597 break;
598
599 case TXDP:
600 reg = regs.txdp;
601 break;
602
603 case TXDP_HI:
604 reg = regs.txdp_hi;
605 break;
606
607 case TXCFG:
608 reg = regs.txcfg;
609 break;
610
611 case GPIOR:
612 reg = regs.gpior;
613 break;
614
615 case RXDP:
616 reg = regs.rxdp;
617 break;
618
619 case RXDP_HI:
620 reg = regs.rxdp_hi;
621 break;
622
623 case RXCFG:
624 reg = regs.rxcfg;
625 break;
626
627 case PQCR:
628 reg = regs.pqcr;
629 break;
630
631 case WCSR:
632 reg = regs.wcsr;
633 break;
634
635 case PCR:
636 reg = regs.pcr;
637 break;
638
639 // see the spec sheet for how RFCR and RFDR work
640 // basically, you write to RFCR to tell the machine
641 // what you want to do next, then you act upon RFDR,
642 // and the device will be prepared b/c of what you
643 // wrote to RFCR
644 case RFCR:
645 reg = regs.rfcr;
646 break;
647
648 case RFDR:
649 switch (regs.rfcr & RFCR_RFADDR) {
650 case 0x000:
651 reg = rom.perfectMatch[1];
652 reg = reg << 8;
653 reg += rom.perfectMatch[0];
654 break;
655 case 0x002:
656 reg = rom.perfectMatch[3] << 8;
657 reg += rom.perfectMatch[2];
658 break;
659 case 0x004:
660 reg = rom.perfectMatch[5] << 8;
661 reg += rom.perfectMatch[4];
662 break;
663 default:
664 panic("reading RFDR for something other than PMATCH!\n");
665 // didn't implement other RFDR functionality b/c
666 // driver didn't use it
667 }
668 break;
669
670 case SRR:
671 reg = regs.srr;
672 break;
673
674 case MIBC:
675 reg = regs.mibc;
676 reg &= ~(MIBC_MIBS | MIBC_ACLR);
677 break;
678
679 case VRCR:
680 reg = regs.vrcr;
681 break;
682
683 case VTCR:
684 reg = regs.vtcr;
685 break;
686
687 case VDR:
688 reg = regs.vdr;
689 break;
690
691 case CCSR:
692 reg = regs.ccsr;
693 break;
694
695 case TBICR:
696 reg = regs.tbicr;
697 break;
698
699 case TBISR:
700 reg = regs.tbisr;
701 break;
702
703 case TANAR:
704 reg = regs.tanar;
705 break;
706
707 case TANLPAR:
708 reg = regs.tanlpar;
709 break;
710
711 case TANER:
712 reg = regs.taner;
713 break;
714
715 case TESR:
716 reg = regs.tesr;
717 break;
718
719 default:
720 panic("reading unimplemented register: addr=%#x", daddr);
721 }
722
723 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
724 daddr, reg, reg);
725 }
726 break;
727
728 default:
729 panic("accessing register with invalid size: addr=%#x, size=%d",
730 daddr, req->size);
731 }
732
733 return No_Fault;
734 }
735
736 Fault
737 NSGigE::write(MemReqPtr &req, const uint8_t *data)
738 {
739 assert(ioEnable);
740
741 Addr daddr = req->paddr & 0xfff;
742 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
743 daddr, req->paddr, req->vaddr, req->size);
744
745 if (daddr > LAST && daddr <= RESERVED) {
746 panic("Accessing reserved register");
747 } else if (daddr > RESERVED && daddr <= 0x3FC) {
748 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
749 return No_Fault;
750 } else if (daddr > 0x3FC)
751 panic("Something is messed up!\n");
752
753 if (req->size == sizeof(uint32_t)) {
754 uint32_t reg = *(uint32_t *)data;
755 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
756
757 switch (daddr) {
758 case CR:
759 regs.command = reg;
760 if (reg & CR_TXD) {
761 txEnable = false;
762 } else if (reg & CR_TXE) {
763 txEnable = true;
764
765 // the kernel is enabling the transmit machine
766 if (txState == txIdle)
767 txKick();
768 }
769
770 if (reg & CR_RXD) {
771 rxEnable = false;
772 } else if (reg & CR_RXE) {
773 rxEnable = true;
774
775 if (rxState == rxIdle)
776 rxKick();
777 }
778
779 if (reg & CR_TXR)
780 txReset();
781
782 if (reg & CR_RXR)
783 rxReset();
784
785 if (reg & CR_SWI)
786 devIntrPost(ISR_SWI);
787
788 if (reg & CR_RST) {
789 txReset();
790 rxReset();
791
792 regsReset();
793 }
794 break;
795
796 case CFG:
797 if (reg & CFG_LNKSTS ||
798 reg & CFG_SPDSTS ||
799 reg & CFG_DUPSTS ||
800 reg & CFG_RESERVED ||
801 reg & CFG_T64ADDR ||
802 reg & CFG_PCI64_DET)
803 panic("writing to read-only or reserved CFG bits!\n");
804
805 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS |
806 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET);
807
808 // all these #if 0's are because i don't THINK the kernel needs to
809 // have these implemented. if there is a problem relating to one of
810 // these, you may need to add functionality in.
811 #if 0
812 if (reg & CFG_TBI_EN) ;
813 if (reg & CFG_MODE_1000) ;
814 #endif
815
816 if (reg & CFG_AUTO_1000)
817 panic("CFG_AUTO_1000 not implemented!\n");
818
819 #if 0
820 if (reg & CFG_PINT_DUPSTS ||
821 reg & CFG_PINT_LNKSTS ||
822 reg & CFG_PINT_SPDSTS)
823 ;
824
825 if (reg & CFG_TMRTEST) ;
826 if (reg & CFG_MRM_DIS) ;
827 if (reg & CFG_MWI_DIS) ;
828
829 if (reg & CFG_T64ADDR)
830 panic("CFG_T64ADDR is read only register!\n");
831
832 if (reg & CFG_PCI64_DET)
833 panic("CFG_PCI64_DET is read only register!\n");
834
835 if (reg & CFG_DATA64_EN) ;
836 if (reg & CFG_M64ADDR) ;
837 if (reg & CFG_PHY_RST) ;
838 if (reg & CFG_PHY_DIS) ;
839 #endif
840
841 if (reg & CFG_EXTSTS_EN)
842 extstsEnable = true;
843 else
844 extstsEnable = false;
845
846 #if 0
847 if (reg & CFG_REQALG) ;
848 if (reg & CFG_SB) ;
849 if (reg & CFG_POW) ;
850 if (reg & CFG_EXD) ;
851 if (reg & CFG_PESEL) ;
852 if (reg & CFG_BROM_DIS) ;
853 if (reg & CFG_EXT_125) ;
854 if (reg & CFG_BEM) ;
855 #endif
856 break;
857
858 case MEAR:
859 regs.mear = reg;
860 // since phy is completely faked, MEAR_MD* don't matter
861 // and since the driver never uses MEAR_EE*, they don't
862 // matter
863 #if 0
864 if (reg & MEAR_EEDI) ;
865 if (reg & MEAR_EEDO) ; // this one is read only
866 if (reg & MEAR_EECLK) ;
867 if (reg & MEAR_EESEL) ;
868 if (reg & MEAR_MDIO) ;
869 if (reg & MEAR_MDDIR) ;
870 if (reg & MEAR_MDC) ;
871 #endif
872 break;
873
874 case PTSCR:
875 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
876 // these control BISTs for various parts of chip - we
877 // don't care or do just fake that the BIST is done
878 if (reg & PTSCR_RBIST_EN)
879 regs.ptscr |= PTSCR_RBIST_DONE;
880 if (reg & PTSCR_EEBIST_EN)
881 regs.ptscr &= ~PTSCR_EEBIST_EN;
882 if (reg & PTSCR_EELOAD_EN)
883 regs.ptscr &= ~PTSCR_EELOAD_EN;
884 break;
885
886 case ISR: /* writing to the ISR has no effect */
887 panic("ISR is a read only register!\n");
888
889 case IMR:
890 regs.imr = reg;
891 devIntrChangeMask();
892 break;
893
894 case IER:
895 regs.ier = reg;
896 break;
897
898 case IHR:
899 regs.ihr = reg;
900 /* not going to implement real interrupt holdoff */
901 break;
902
903 case TXDP:
904 regs.txdp = (reg & 0xFFFFFFFC);
905 assert(txState == txIdle);
906 CTDD = false;
907 break;
908
909 case TXDP_HI:
910 regs.txdp_hi = reg;
911 break;
912
913 case TXCFG:
914 regs.txcfg = reg;
915 #if 0
916 if (reg & TXCFG_CSI) ;
917 if (reg & TXCFG_HBI) ;
918 if (reg & TXCFG_MLB) ;
919 if (reg & TXCFG_ATP) ;
920 if (reg & TXCFG_ECRETRY) {
921 /*
922 * this could easily be implemented, but considering
923 * the network is just a fake pipe, wouldn't make
924 * sense to do this
925 */
926 }
927
928 if (reg & TXCFG_BRST_DIS) ;
929 #endif
930
931 #if 0
932 /* we handle our own DMA, ignore the kernel's exhortations */
933 if (reg & TXCFG_MXDMA) ;
934 #endif
935
936 // also, we currently don't care about fill/drain
937 // thresholds though this may change in the future with
938 // more realistic networks or a driver which changes it
939 // according to feedback
940
941 break;
942
943 case GPIOR:
944 regs.gpior = reg;
945 /* these just control general purpose i/o pins, don't matter */
946 break;
947
948 case RXDP:
949 regs.rxdp = reg;
950 CRDD = false;
951 break;
952
953 case RXDP_HI:
954 regs.rxdp_hi = reg;
955 break;
956
957 case RXCFG:
958 regs.rxcfg = reg;
959 #if 0
960 if (reg & RXCFG_AEP) ;
961 if (reg & RXCFG_ARP) ;
962 if (reg & RXCFG_STRIPCRC) ;
963 if (reg & RXCFG_RX_RD) ;
964 if (reg & RXCFG_ALP) ;
965 if (reg & RXCFG_AIRL) ;
966
967 /* we handle our own DMA, ignore what kernel says about it */
968 if (reg & RXCFG_MXDMA) ;
969
970 //also, we currently don't care about fill/drain thresholds
971 //though this may change in the future with more realistic
972 //networks or a driver which changes it according to feedback
973 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
974 #endif
975 break;
976
977 case PQCR:
978 /* there is no priority queueing used in the linux 2.6 driver */
979 regs.pqcr = reg;
980 break;
981
982 case WCSR:
983 /* not going to implement wake on LAN */
984 regs.wcsr = reg;
985 break;
986
987 case PCR:
988 /* not going to implement pause control */
989 regs.pcr = reg;
990 break;
991
992 case RFCR:
993 regs.rfcr = reg;
994
995 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
996 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
997 acceptMulticast = (reg & RFCR_AAM) ? true : false;
998 acceptUnicast = (reg & RFCR_AAU) ? true : false;
999 acceptPerfect = (reg & RFCR_APM) ? true : false;
1000 acceptArp = (reg & RFCR_AARP) ? true : false;
1001
1002 #if 0
1003 if (reg & RFCR_APAT)
1004 panic("RFCR_APAT not implemented!\n");
1005 #endif
1006
1007 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
1008 panic("hash filtering not implemented!\n");
1009
1010 if (reg & RFCR_ULM)
1011 panic("RFCR_ULM not implemented!\n");
1012
1013 break;
1014
1015 case RFDR:
1016 panic("the driver never writes to RFDR, something is wrong!\n");
1017
1018 case BRAR:
1019 panic("the driver never uses BRAR, something is wrong!\n");
1020
1021 case BRDR:
1022 panic("the driver never uses BRDR, something is wrong!\n");
1023
1024 case SRR:
1025 panic("SRR is read only register!\n");
1026
1027 case MIBC:
1028 panic("the driver never uses MIBC, something is wrong!\n");
1029
1030 case VRCR:
1031 regs.vrcr = reg;
1032 break;
1033
1034 case VTCR:
1035 regs.vtcr = reg;
1036 break;
1037
1038 case VDR:
1039 panic("the driver never uses VDR, something is wrong!\n");
1040 break;
1041
1042 case CCSR:
1043 /* not going to implement clockrun stuff */
1044 regs.ccsr = reg;
1045 break;
1046
1047 case TBICR:
1048 regs.tbicr = reg;
1049 if (reg & TBICR_MR_LOOPBACK)
1050 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1051
1052 if (reg & TBICR_MR_AN_ENABLE) {
1053 regs.tanlpar = regs.tanar;
1054 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1055 }
1056
1057 #if 0
1058 if (reg & TBICR_MR_RESTART_AN) ;
1059 #endif
1060
1061 break;
1062
1063 case TBISR:
1064 panic("TBISR is read only register!\n");
1065
1066 case TANAR:
1067 regs.tanar = reg;
1068 if (reg & TANAR_PS2)
1069 panic("this isn't used in driver, something wrong!\n");
1070
1071 if (reg & TANAR_PS1)
1072 panic("this isn't used in driver, something wrong!\n");
1073 break;
1074
1075 case TANLPAR:
1076 panic("this should only be written to by the fake phy!\n");
1077
1078 case TANER:
1079 panic("TANER is read only register!\n");
1080
1081 case TESR:
1082 regs.tesr = reg;
1083 break;
1084
1085 default:
1086 panic("invalid register access daddr=%#x", daddr);
1087 }
1088 } else {
1089 panic("Invalid Request Size");
1090 }
1091
1092 return No_Fault;
1093 }
1094
1095 void
1096 NSGigE::devIntrPost(uint32_t interrupts)
1097 {
1098 if (interrupts & ISR_RESERVE)
1099 panic("Cannot set a reserved interrupt");
1100
1101 if (interrupts & ISR_NOIMPL)
1102 warn("interrupt not implemented %#x\n", interrupts);
1103
1104 interrupts &= ~ISR_NOIMPL;
1105 regs.isr |= interrupts;
1106
1107 if (interrupts & regs.imr) {
1108 if (interrupts & ISR_SWI) {
1109 totalSwi++;
1110 }
1111 if (interrupts & ISR_RXIDLE) {
1112 totalRxIdle++;
1113 }
1114 if (interrupts & ISR_RXOK) {
1115 totalRxOk++;
1116 }
1117 if (interrupts & ISR_RXDESC) {
1118 totalRxDesc++;
1119 }
1120 if (interrupts & ISR_TXOK) {
1121 totalTxOk++;
1122 }
1123 if (interrupts & ISR_TXIDLE) {
1124 totalTxIdle++;
1125 }
1126 if (interrupts & ISR_TXDESC) {
1127 totalTxDesc++;
1128 }
1129 if (interrupts & ISR_RXORN) {
1130 totalRxOrn++;
1131 }
1132 }
1133
1134 DPRINTF(EthernetIntr,
1135 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1136 interrupts, regs.isr, regs.imr);
1137
1138 if ((regs.isr & regs.imr)) {
1139 Tick when = curTick;
1140 if (!(regs.isr & regs.imr & ISR_NODELAY))
1141 when += intrDelay;
1142 cpuIntrPost(when);
1143 }
1144 }
1145
1146 /* writing this interrupt counting stats inside this means that this function
1147 is now limited to being used to clear all interrupts upon the kernel
1148 reading isr and servicing. just telling you in case you were thinking
1149 of expanding use.
1150 */
1151 void
1152 NSGigE::devIntrClear(uint32_t interrupts)
1153 {
1154 if (interrupts & ISR_RESERVE)
1155 panic("Cannot clear a reserved interrupt");
1156
1157 if (regs.isr & regs.imr & ISR_SWI) {
1158 postedSwi++;
1159 }
1160 if (regs.isr & regs.imr & ISR_RXIDLE) {
1161 postedRxIdle++;
1162 }
1163 if (regs.isr & regs.imr & ISR_RXOK) {
1164 postedRxOk++;
1165 }
1166 if (regs.isr & regs.imr & ISR_RXDESC) {
1167 postedRxDesc++;
1168 }
1169 if (regs.isr & regs.imr & ISR_TXOK) {
1170 postedTxOk++;
1171 }
1172 if (regs.isr & regs.imr & ISR_TXIDLE) {
1173 postedTxIdle++;
1174 }
1175 if (regs.isr & regs.imr & ISR_TXDESC) {
1176 postedTxDesc++;
1177 }
1178 if (regs.isr & regs.imr & ISR_RXORN) {
1179 postedRxOrn++;
1180 }
1181
1182 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC |
1183 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) )
1184 postedInterrupts++;
1185
1186 interrupts &= ~ISR_NOIMPL;
1187 regs.isr &= ~interrupts;
1188
1189 DPRINTF(EthernetIntr,
1190 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1191 interrupts, regs.isr, regs.imr);
1192
1193 if (!(regs.isr & regs.imr))
1194 cpuIntrClear();
1195 }
1196
1197 void
1198 NSGigE::devIntrChangeMask()
1199 {
1200 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1201 regs.isr, regs.imr, regs.isr & regs.imr);
1202
1203 if (regs.isr & regs.imr)
1204 cpuIntrPost(curTick);
1205 else
1206 cpuIntrClear();
1207 }
1208
1209 void
1210 NSGigE::cpuIntrPost(Tick when)
1211 {
1212 // If the interrupt you want to post is later than an interrupt
1213 // already scheduled, just let it post in the coming one and don't
1214 // schedule another.
1215 // HOWEVER, must be sure that the scheduled intrTick is in the
1216 // future (this was formerly the source of a bug)
1217 /**
1218 * @todo this warning should be removed and the intrTick code should
1219 * be fixed.
1220 */
1221 assert(when >= curTick);
1222 assert(intrTick >= curTick || intrTick == 0);
1223 if (when > intrTick && intrTick != 0) {
1224 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1225 intrTick);
1226 return;
1227 }
1228
1229 intrTick = when;
1230 if (intrTick < curTick) {
1231 debug_break();
1232 intrTick = curTick;
1233 }
1234
1235 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1236 intrTick);
1237
1238 if (intrEvent)
1239 intrEvent->squash();
1240 intrEvent = new IntrEvent(this, true);
1241 intrEvent->schedule(intrTick);
1242 }
1243
1244 void
1245 NSGigE::cpuInterrupt()
1246 {
1247 assert(intrTick == curTick);
1248
1249 // Whether or not there's a pending interrupt, we don't care about
1250 // it anymore
1251 intrEvent = 0;
1252 intrTick = 0;
1253
1254 // Don't send an interrupt if there's already one
1255 if (cpuPendingIntr) {
1256 DPRINTF(EthernetIntr,
1257 "would send an interrupt now, but there's already pending\n");
1258 } else {
1259 // Send interrupt
1260 cpuPendingIntr = true;
1261
1262 DPRINTF(EthernetIntr, "posting interrupt\n");
1263 intrPost();
1264 }
1265 }
1266
1267 void
1268 NSGigE::cpuIntrClear()
1269 {
1270 if (!cpuPendingIntr)
1271 return;
1272
1273 if (intrEvent) {
1274 intrEvent->squash();
1275 intrEvent = 0;
1276 }
1277
1278 intrTick = 0;
1279
1280 cpuPendingIntr = false;
1281
1282 DPRINTF(EthernetIntr, "clearing interrupt\n");
1283 intrClear();
1284 }
1285
1286 bool
1287 NSGigE::cpuIntrPending() const
1288 { return cpuPendingIntr; }
1289
1290 void
1291 NSGigE::txReset()
1292 {
1293
1294 DPRINTF(Ethernet, "transmit reset\n");
1295
1296 CTDD = false;
1297 txEnable = false;;
1298 txFragPtr = 0;
1299 assert(txDescCnt == 0);
1300 txFifo.clear();
1301 txState = txIdle;
1302 assert(txDmaState == dmaIdle);
1303 }
1304
1305 void
1306 NSGigE::rxReset()
1307 {
1308 DPRINTF(Ethernet, "receive reset\n");
1309
1310 CRDD = false;
1311 assert(rxPktBytes == 0);
1312 rxEnable = false;
1313 rxFragPtr = 0;
1314 assert(rxDescCnt == 0);
1315 assert(rxDmaState == dmaIdle);
1316 rxFifo.clear();
1317 rxState = rxIdle;
1318 }
1319
1320 void
1321 NSGigE::regsReset()
1322 {
1323 memset(&regs, 0, sizeof(regs));
1324 regs.config = CFG_LNKSTS;
1325 regs.mear = MEAR_MDDIR | MEAR_EEDO;
1326 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1327 // fill threshold to 32 bytes
1328 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1329 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1330 regs.mibc = MIBC_FRZ;
1331 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1332 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1333
1334 extstsEnable = false;
1335 acceptBroadcast = false;
1336 acceptMulticast = false;
1337 acceptUnicast = false;
1338 acceptPerfect = false;
1339 acceptArp = false;
1340 }
1341
1342 void
1343 NSGigE::rxDmaReadCopy()
1344 {
1345 assert(rxDmaState == dmaReading);
1346
1347 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1348 rxDmaState = dmaIdle;
1349
1350 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1351 rxDmaAddr, rxDmaLen);
1352 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1353 }
1354
1355 bool
1356 NSGigE::doRxDmaRead()
1357 {
1358 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1359 rxDmaState = dmaReading;
1360
1361 if (dmaInterface && !rxDmaFree) {
1362 if (dmaInterface->busy())
1363 rxDmaState = dmaReadWaiting;
1364 else
1365 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1366 &rxDmaReadEvent, true);
1367 return true;
1368 }
1369
1370 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1371 rxDmaReadCopy();
1372 return false;
1373 }
1374
1375 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1376 Tick start = curTick + dmaReadDelay + factor;
1377 rxDmaReadEvent.schedule(start);
1378 return true;
1379 }
1380
1381 void
1382 NSGigE::rxDmaReadDone()
1383 {
1384 assert(rxDmaState == dmaReading);
1385 rxDmaReadCopy();
1386
1387 // If the transmit state machine has a pending DMA, let it go first
1388 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1389 txKick();
1390
1391 rxKick();
1392 }
1393
1394 void
1395 NSGigE::rxDmaWriteCopy()
1396 {
1397 assert(rxDmaState == dmaWriting);
1398
1399 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1400 rxDmaState = dmaIdle;
1401
1402 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1403 rxDmaAddr, rxDmaLen);
1404 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1405 }
1406
1407 bool
1408 NSGigE::doRxDmaWrite()
1409 {
1410 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1411 rxDmaState = dmaWriting;
1412
1413 if (dmaInterface && !rxDmaFree) {
1414 if (dmaInterface->busy())
1415 rxDmaState = dmaWriteWaiting;
1416 else
1417 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1418 &rxDmaWriteEvent, true);
1419 return true;
1420 }
1421
1422 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1423 rxDmaWriteCopy();
1424 return false;
1425 }
1426
1427 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1428 Tick start = curTick + dmaWriteDelay + factor;
1429 rxDmaWriteEvent.schedule(start);
1430 return true;
1431 }
1432
1433 void
1434 NSGigE::rxDmaWriteDone()
1435 {
1436 assert(rxDmaState == dmaWriting);
1437 rxDmaWriteCopy();
1438
1439 // If the transmit state machine has a pending DMA, let it go first
1440 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1441 txKick();
1442
1443 rxKick();
1444 }
1445
1446 void
1447 NSGigE::rxKick()
1448 {
1449 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1450 NsRxStateStrings[rxState], rxFifo.size());
1451
1452 if (rxKickTick > curTick) {
1453 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1454 rxKickTick);
1455 return;
1456 }
1457
1458 next:
1459 switch(rxDmaState) {
1460 case dmaReadWaiting:
1461 if (doRxDmaRead())
1462 goto exit;
1463 break;
1464 case dmaWriteWaiting:
1465 if (doRxDmaWrite())
1466 goto exit;
1467 break;
1468 default:
1469 break;
1470 }
1471
1472 // see state machine from spec for details
1473 // the way this works is, if you finish work on one state and can
1474 // go directly to another, you do that through jumping to the
1475 // label "next". however, if you have intermediate work, like DMA
1476 // so that you can't go to the next state yet, you go to exit and
1477 // exit the loop. however, when the DMA is done it will trigger
1478 // an event and come back to this loop.
1479 switch (rxState) {
1480 case rxIdle:
1481 if (!rxEnable) {
1482 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1483 goto exit;
1484 }
1485
1486 if (CRDD) {
1487 rxState = rxDescRefr;
1488
1489 rxDmaAddr = regs.rxdp & 0x3fffffff;
1490 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1491 rxDmaLen = sizeof(rxDescCache.link);
1492 rxDmaFree = dmaDescFree;
1493
1494 descDmaReads++;
1495 descDmaRdBytes += rxDmaLen;
1496
1497 if (doRxDmaRead())
1498 goto exit;
1499 } else {
1500 rxState = rxDescRead;
1501
1502 rxDmaAddr = regs.rxdp & 0x3fffffff;
1503 rxDmaData = &rxDescCache;
1504 rxDmaLen = sizeof(ns_desc);
1505 rxDmaFree = dmaDescFree;
1506
1507 descDmaReads++;
1508 descDmaRdBytes += rxDmaLen;
1509
1510 if (doRxDmaRead())
1511 goto exit;
1512 }
1513 break;
1514
1515 case rxDescRefr:
1516 if (rxDmaState != dmaIdle)
1517 goto exit;
1518
1519 rxState = rxAdvance;
1520 break;
1521
1522 case rxDescRead:
1523 if (rxDmaState != dmaIdle)
1524 goto exit;
1525
1526 DPRINTF(EthernetDesc,
1527 "rxDescCache: addr=%08x read descriptor\n",
1528 regs.rxdp & 0x3fffffff);
1529 DPRINTF(EthernetDesc,
1530 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1531 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1532 rxDescCache.extsts);
1533
1534 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1535 devIntrPost(ISR_RXIDLE);
1536 rxState = rxIdle;
1537 goto exit;
1538 } else {
1539 rxState = rxFifoBlock;
1540 rxFragPtr = rxDescCache.bufptr;
1541 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1542 }
1543 break;
1544
1545 case rxFifoBlock:
1546 if (!rxPacket) {
1547 /**
1548 * @todo in reality, we should be able to start processing
1549 * the packet as it arrives, and not have to wait for the
1550 * full packet ot be in the receive fifo.
1551 */
1552 if (rxFifo.empty())
1553 goto exit;
1554
1555 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1556
1557 // If we don't have a packet, grab a new one from the fifo.
1558 rxPacket = rxFifo.front();
1559 rxPktBytes = rxPacket->length;
1560 rxPacketBufPtr = rxPacket->data;
1561
1562 #if TRACING_ON
1563 if (DTRACE(Ethernet)) {
1564 IpPtr ip(rxPacket);
1565 if (ip) {
1566 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1567 TcpPtr tcp(ip);
1568 if (tcp) {
1569 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1570 tcp->sport(), tcp->dport());
1571 }
1572 }
1573 }
1574 #endif
1575
1576 // sanity check - i think the driver behaves like this
1577 assert(rxDescCnt >= rxPktBytes);
1578 rxFifo.pop();
1579 }
1580
1581
1582 // dont' need the && rxDescCnt > 0 if driver sanity check
1583 // above holds
1584 if (rxPktBytes > 0) {
1585 rxState = rxFragWrite;
1586 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1587 // check holds
1588 rxXferLen = rxPktBytes;
1589
1590 rxDmaAddr = rxFragPtr & 0x3fffffff;
1591 rxDmaData = rxPacketBufPtr;
1592 rxDmaLen = rxXferLen;
1593 rxDmaFree = dmaDataFree;
1594
1595 if (doRxDmaWrite())
1596 goto exit;
1597
1598 } else {
1599 rxState = rxDescWrite;
1600
1601 //if (rxPktBytes == 0) { /* packet is done */
1602 assert(rxPktBytes == 0);
1603 DPRINTF(EthernetSM, "done with receiving packet\n");
1604
1605 rxDescCache.cmdsts |= CMDSTS_OWN;
1606 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1607 rxDescCache.cmdsts |= CMDSTS_OK;
1608 rxDescCache.cmdsts &= 0xffff0000;
1609 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1610
1611 #if 0
1612 /*
1613 * all the driver uses these are for its own stats keeping
1614 * which we don't care about, aren't necessary for
1615 * functionality and doing this would just slow us down.
1616 * if they end up using this in a later version for
1617 * functional purposes, just undef
1618 */
1619 if (rxFilterEnable) {
1620 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1621 const EthAddr &dst = rxFifoFront()->dst();
1622 if (dst->unicast())
1623 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1624 if (dst->multicast())
1625 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1626 if (dst->broadcast())
1627 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1628 }
1629 #endif
1630
1631 IpPtr ip(rxPacket);
1632 if (extstsEnable && ip) {
1633 rxDescCache.extsts |= EXTSTS_IPPKT;
1634 rxIpChecksums++;
1635 if (cksum(ip) != 0) {
1636 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1637 rxDescCache.extsts |= EXTSTS_IPERR;
1638 }
1639 TcpPtr tcp(ip);
1640 UdpPtr udp(ip);
1641 if (tcp) {
1642 rxDescCache.extsts |= EXTSTS_TCPPKT;
1643 rxTcpChecksums++;
1644 if (cksum(tcp) != 0) {
1645 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1646 rxDescCache.extsts |= EXTSTS_TCPERR;
1647
1648 }
1649 } else if (udp) {
1650 rxDescCache.extsts |= EXTSTS_UDPPKT;
1651 rxUdpChecksums++;
1652 if (cksum(udp) != 0) {
1653 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1654 rxDescCache.extsts |= EXTSTS_UDPERR;
1655 }
1656 }
1657 }
1658 rxPacket = 0;
1659
1660 /*
1661 * the driver seems to always receive into desc buffers
1662 * of size 1514, so you never have a pkt that is split
1663 * into multiple descriptors on the receive side, so
1664 * i don't implement that case, hence the assert above.
1665 */
1666
1667 DPRINTF(EthernetDesc,
1668 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1669 regs.rxdp & 0x3fffffff);
1670 DPRINTF(EthernetDesc,
1671 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1672 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1673 rxDescCache.extsts);
1674
1675 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1676 rxDmaData = &(rxDescCache.cmdsts);
1677 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1678 rxDmaFree = dmaDescFree;
1679
1680 descDmaWrites++;
1681 descDmaWrBytes += rxDmaLen;
1682
1683 if (doRxDmaWrite())
1684 goto exit;
1685 }
1686 break;
1687
1688 case rxFragWrite:
1689 if (rxDmaState != dmaIdle)
1690 goto exit;
1691
1692 rxPacketBufPtr += rxXferLen;
1693 rxFragPtr += rxXferLen;
1694 rxPktBytes -= rxXferLen;
1695
1696 rxState = rxFifoBlock;
1697 break;
1698
1699 case rxDescWrite:
1700 if (rxDmaState != dmaIdle)
1701 goto exit;
1702
1703 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1704
1705 assert(rxPacket == 0);
1706 devIntrPost(ISR_RXOK);
1707
1708 if (rxDescCache.cmdsts & CMDSTS_INTR)
1709 devIntrPost(ISR_RXDESC);
1710
1711 if (!rxEnable) {
1712 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1713 rxState = rxIdle;
1714 goto exit;
1715 } else
1716 rxState = rxAdvance;
1717 break;
1718
1719 case rxAdvance:
1720 if (rxDescCache.link == 0) {
1721 devIntrPost(ISR_RXIDLE);
1722 rxState = rxIdle;
1723 CRDD = true;
1724 goto exit;
1725 } else {
1726 rxState = rxDescRead;
1727 regs.rxdp = rxDescCache.link;
1728 CRDD = false;
1729
1730 rxDmaAddr = regs.rxdp & 0x3fffffff;
1731 rxDmaData = &rxDescCache;
1732 rxDmaLen = sizeof(ns_desc);
1733 rxDmaFree = dmaDescFree;
1734
1735 if (doRxDmaRead())
1736 goto exit;
1737 }
1738 break;
1739
1740 default:
1741 panic("Invalid rxState!");
1742 }
1743
1744 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1745 NsRxStateStrings[rxState]);
1746
1747 goto next;
1748
1749 exit:
1750 /**
1751 * @todo do we want to schedule a future kick?
1752 */
1753 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1754 NsRxStateStrings[rxState]);
1755 }
1756
1757 void
1758 NSGigE::transmit()
1759 {
1760 if (txFifo.empty()) {
1761 DPRINTF(Ethernet, "nothing to transmit\n");
1762 return;
1763 }
1764
1765 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1766 txFifo.size());
1767 if (interface->sendPacket(txFifo.front())) {
1768 #if TRACING_ON
1769 if (DTRACE(Ethernet)) {
1770 IpPtr ip(txFifo.front());
1771 if (ip) {
1772 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1773 TcpPtr tcp(ip);
1774 if (tcp) {
1775 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1776 tcp->sport(), tcp->dport());
1777 }
1778 }
1779 }
1780 #endif
1781
1782 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1783 txBytes += txFifo.front()->length;
1784 txPackets++;
1785
1786 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1787 txFifo.avail());
1788 txFifo.pop();
1789
1790 /*
1791 * normally do a writeback of the descriptor here, and ONLY
1792 * after that is done, send this interrupt. but since our
1793 * stuff never actually fails, just do this interrupt here,
1794 * otherwise the code has to stray from this nice format.
1795 * besides, it's functionally the same.
1796 */
1797 devIntrPost(ISR_TXOK);
1798 }
1799
1800 if (!txFifo.empty() && !txEvent.scheduled()) {
1801 DPRINTF(Ethernet, "reschedule transmit\n");
1802 txEvent.schedule(curTick + 1000);
1803 }
1804 }
1805
1806 void
1807 NSGigE::txDmaReadCopy()
1808 {
1809 assert(txDmaState == dmaReading);
1810
1811 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1812 txDmaState = dmaIdle;
1813
1814 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1815 txDmaAddr, txDmaLen);
1816 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1817 }
1818
1819 bool
1820 NSGigE::doTxDmaRead()
1821 {
1822 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1823 txDmaState = dmaReading;
1824
1825 if (dmaInterface && !txDmaFree) {
1826 if (dmaInterface->busy())
1827 txDmaState = dmaReadWaiting;
1828 else
1829 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1830 &txDmaReadEvent, true);
1831 return true;
1832 }
1833
1834 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1835 txDmaReadCopy();
1836 return false;
1837 }
1838
1839 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1840 Tick start = curTick + dmaReadDelay + factor;
1841 txDmaReadEvent.schedule(start);
1842 return true;
1843 }
1844
1845 void
1846 NSGigE::txDmaReadDone()
1847 {
1848 assert(txDmaState == dmaReading);
1849 txDmaReadCopy();
1850
1851 // If the receive state machine has a pending DMA, let it go first
1852 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1853 rxKick();
1854
1855 txKick();
1856 }
1857
1858 void
1859 NSGigE::txDmaWriteCopy()
1860 {
1861 assert(txDmaState == dmaWriting);
1862
1863 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1864 txDmaState = dmaIdle;
1865
1866 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1867 txDmaAddr, txDmaLen);
1868 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1869 }
1870
1871 bool
1872 NSGigE::doTxDmaWrite()
1873 {
1874 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1875 txDmaState = dmaWriting;
1876
1877 if (dmaInterface && !txDmaFree) {
1878 if (dmaInterface->busy())
1879 txDmaState = dmaWriteWaiting;
1880 else
1881 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1882 &txDmaWriteEvent, true);
1883 return true;
1884 }
1885
1886 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1887 txDmaWriteCopy();
1888 return false;
1889 }
1890
1891 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1892 Tick start = curTick + dmaWriteDelay + factor;
1893 txDmaWriteEvent.schedule(start);
1894 return true;
1895 }
1896
1897 void
1898 NSGigE::txDmaWriteDone()
1899 {
1900 assert(txDmaState == dmaWriting);
1901 txDmaWriteCopy();
1902
1903 // If the receive state machine has a pending DMA, let it go first
1904 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1905 rxKick();
1906
1907 txKick();
1908 }
1909
1910 void
1911 NSGigE::txKick()
1912 {
1913 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1914 NsTxStateStrings[txState]);
1915
1916 if (txKickTick > curTick) {
1917 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1918 txKickTick);
1919
1920 return;
1921 }
1922
1923 next:
1924 switch(txDmaState) {
1925 case dmaReadWaiting:
1926 if (doTxDmaRead())
1927 goto exit;
1928 break;
1929 case dmaWriteWaiting:
1930 if (doTxDmaWrite())
1931 goto exit;
1932 break;
1933 default:
1934 break;
1935 }
1936
1937 switch (txState) {
1938 case txIdle:
1939 if (!txEnable) {
1940 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1941 goto exit;
1942 }
1943
1944 if (CTDD) {
1945 txState = txDescRefr;
1946
1947 txDmaAddr = regs.txdp & 0x3fffffff;
1948 txDmaData = &txDescCache + offsetof(ns_desc, link);
1949 txDmaLen = sizeof(txDescCache.link);
1950 txDmaFree = dmaDescFree;
1951
1952 descDmaReads++;
1953 descDmaRdBytes += txDmaLen;
1954
1955 if (doTxDmaRead())
1956 goto exit;
1957
1958 } else {
1959 txState = txDescRead;
1960
1961 txDmaAddr = regs.txdp & 0x3fffffff;
1962 txDmaData = &txDescCache;
1963 txDmaLen = sizeof(ns_desc);
1964 txDmaFree = dmaDescFree;
1965
1966 descDmaReads++;
1967 descDmaRdBytes += txDmaLen;
1968
1969 if (doTxDmaRead())
1970 goto exit;
1971 }
1972 break;
1973
1974 case txDescRefr:
1975 if (txDmaState != dmaIdle)
1976 goto exit;
1977
1978 txState = txAdvance;
1979 break;
1980
1981 case txDescRead:
1982 if (txDmaState != dmaIdle)
1983 goto exit;
1984
1985 DPRINTF(EthernetDesc,
1986 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1987 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1988 txDescCache.extsts);
1989
1990 if (txDescCache.cmdsts & CMDSTS_OWN) {
1991 txState = txFifoBlock;
1992 txFragPtr = txDescCache.bufptr;
1993 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1994 } else {
1995 devIntrPost(ISR_TXIDLE);
1996 txState = txIdle;
1997 goto exit;
1998 }
1999 break;
2000
2001 case txFifoBlock:
2002 if (!txPacket) {
2003 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2004 txPacket = new PacketData(16384);
2005 txPacketBufPtr = txPacket->data;
2006 }
2007
2008 if (txDescCnt == 0) {
2009 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2010 if (txDescCache.cmdsts & CMDSTS_MORE) {
2011 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2012 txState = txDescWrite;
2013
2014 txDescCache.cmdsts &= ~CMDSTS_OWN;
2015
2016 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2017 txDmaAddr &= 0x3fffffff;
2018 txDmaData = &(txDescCache.cmdsts);
2019 txDmaLen = sizeof(txDescCache.cmdsts);
2020 txDmaFree = dmaDescFree;
2021
2022 if (doTxDmaWrite())
2023 goto exit;
2024
2025 } else { /* this packet is totally done */
2026 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2027 /* deal with the the packet that just finished */
2028 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2029 IpPtr ip(txPacket);
2030 if (txDescCache.extsts & EXTSTS_UDPPKT) {
2031 UdpPtr udp(ip);
2032 udp->sum(0);
2033 udp->sum(cksum(udp));
2034 txUdpChecksums++;
2035 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
2036 TcpPtr tcp(ip);
2037 tcp->sum(0);
2038 tcp->sum(cksum(tcp));
2039 txTcpChecksums++;
2040 }
2041 if (txDescCache.extsts & EXTSTS_IPPKT) {
2042 ip->sum(0);
2043 ip->sum(cksum(ip));
2044 txIpChecksums++;
2045 }
2046 }
2047
2048 txPacket->length = txPacketBufPtr - txPacket->data;
2049 // this is just because the receive can't handle a
2050 // packet bigger want to make sure
2051 assert(txPacket->length <= 1514);
2052 #ifndef NDEBUG
2053 bool success =
2054 #endif
2055 txFifo.push(txPacket);
2056 assert(success);
2057
2058 /*
2059 * this following section is not tqo spec, but
2060 * functionally shouldn't be any different. normally,
2061 * the chip will wait til the transmit has occurred
2062 * before writing back the descriptor because it has
2063 * to wait to see that it was successfully transmitted
2064 * to decide whether to set CMDSTS_OK or not.
2065 * however, in the simulator since it is always
2066 * successfully transmitted, and writing it exactly to
2067 * spec would complicate the code, we just do it here
2068 */
2069
2070 txDescCache.cmdsts &= ~CMDSTS_OWN;
2071 txDescCache.cmdsts |= CMDSTS_OK;
2072
2073 DPRINTF(EthernetDesc,
2074 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2075 txDescCache.cmdsts, txDescCache.extsts);
2076
2077 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2078 txDmaAddr &= 0x3fffffff;
2079 txDmaData = &(txDescCache.cmdsts);
2080 txDmaLen = sizeof(txDescCache.cmdsts) +
2081 sizeof(txDescCache.extsts);
2082 txDmaFree = dmaDescFree;
2083
2084 descDmaWrites++;
2085 descDmaWrBytes += txDmaLen;
2086
2087 transmit();
2088 txPacket = 0;
2089
2090 if (!txEnable) {
2091 DPRINTF(EthernetSM, "halting TX state machine\n");
2092 txState = txIdle;
2093 goto exit;
2094 } else
2095 txState = txAdvance;
2096
2097 if (doTxDmaWrite())
2098 goto exit;
2099 }
2100 } else {
2101 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2102 if (!txFifo.full()) {
2103 txState = txFragRead;
2104
2105 /*
2106 * The number of bytes transferred is either whatever
2107 * is left in the descriptor (txDescCnt), or if there
2108 * is not enough room in the fifo, just whatever room
2109 * is left in the fifo
2110 */
2111 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2112
2113 txDmaAddr = txFragPtr & 0x3fffffff;
2114 txDmaData = txPacketBufPtr;
2115 txDmaLen = txXferLen;
2116 txDmaFree = dmaDataFree;
2117
2118 if (doTxDmaRead())
2119 goto exit;
2120 } else {
2121 txState = txFifoBlock;
2122 transmit();
2123
2124 goto exit;
2125 }
2126
2127 }
2128 break;
2129
2130 case txFragRead:
2131 if (txDmaState != dmaIdle)
2132 goto exit;
2133
2134 txPacketBufPtr += txXferLen;
2135 txFragPtr += txXferLen;
2136 txDescCnt -= txXferLen;
2137 txFifo.reserve(txXferLen);
2138
2139 txState = txFifoBlock;
2140 break;
2141
2142 case txDescWrite:
2143 if (txDmaState != dmaIdle)
2144 goto exit;
2145
2146 if (txDescCache.cmdsts & CMDSTS_INTR)
2147 devIntrPost(ISR_TXDESC);
2148
2149 txState = txAdvance;
2150 break;
2151
2152 case txAdvance:
2153 if (txDescCache.link == 0) {
2154 devIntrPost(ISR_TXIDLE);
2155 txState = txIdle;
2156 goto exit;
2157 } else {
2158 txState = txDescRead;
2159 regs.txdp = txDescCache.link;
2160 CTDD = false;
2161
2162 txDmaAddr = txDescCache.link & 0x3fffffff;
2163 txDmaData = &txDescCache;
2164 txDmaLen = sizeof(ns_desc);
2165 txDmaFree = dmaDescFree;
2166
2167 if (doTxDmaRead())
2168 goto exit;
2169 }
2170 break;
2171
2172 default:
2173 panic("invalid state");
2174 }
2175
2176 DPRINTF(EthernetSM, "entering next txState=%s\n",
2177 NsTxStateStrings[txState]);
2178
2179 goto next;
2180
2181 exit:
2182 /**
2183 * @todo do we want to schedule a future kick?
2184 */
2185 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2186 NsTxStateStrings[txState]);
2187 }
2188
2189 void
2190 NSGigE::transferDone()
2191 {
2192 if (txFifo.empty()) {
2193 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2194 return;
2195 }
2196
2197 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2198
2199 if (txEvent.scheduled())
2200 txEvent.reschedule(curTick + 1);
2201 else
2202 txEvent.schedule(curTick + 1);
2203 }
2204
2205 bool
2206 NSGigE::rxFilter(const PacketPtr &packet)
2207 {
2208 EthPtr eth = packet;
2209 bool drop = true;
2210 string type;
2211
2212 const EthAddr &dst = eth->dst();
2213 if (dst.unicast()) {
2214 // If we're accepting all unicast addresses
2215 if (acceptUnicast)
2216 drop = false;
2217
2218 // If we make a perfect match
2219 if (acceptPerfect && dst == rom.perfectMatch)
2220 drop = false;
2221
2222 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2223 drop = false;
2224
2225 } else if (dst.broadcast()) {
2226 // if we're accepting broadcasts
2227 if (acceptBroadcast)
2228 drop = false;
2229
2230 } else if (dst.multicast()) {
2231 // if we're accepting all multicasts
2232 if (acceptMulticast)
2233 drop = false;
2234
2235 }
2236
2237 if (drop) {
2238 DPRINTF(Ethernet, "rxFilter drop\n");
2239 DDUMP(EthernetData, packet->data, packet->length);
2240 }
2241
2242 return drop;
2243 }
2244
2245 bool
2246 NSGigE::recvPacket(PacketPtr packet)
2247 {
2248 rxBytes += packet->length;
2249 rxPackets++;
2250
2251 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2252 rxFifo.avail());
2253
2254 if (!rxEnable) {
2255 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2256 debug_break();
2257 interface->recvDone();
2258 return true;
2259 }
2260
2261 if (rxFilterEnable && rxFilter(packet)) {
2262 DPRINTF(Ethernet, "packet filtered...dropped\n");
2263 interface->recvDone();
2264 return true;
2265 }
2266
2267 if (rxFifo.avail() < packet->length) {
2268 DPRINTF(Ethernet,
2269 "packet will not fit in receive buffer...packet dropped\n");
2270 droppedPackets++;
2271 devIntrPost(ISR_RXORN);
2272 return false;
2273 }
2274
2275 rxFifo.push(packet);
2276 interface->recvDone();
2277
2278 rxKick();
2279 return true;
2280 }
2281
2282 //=====================================================================
2283 //
2284 //
2285 void
2286 NSGigE::serialize(ostream &os)
2287 {
2288 // Serialize the PciDev base class
2289 PciDev::serialize(os);
2290
2291 /*
2292 * Finalize any DMA events now.
2293 */
2294 if (rxDmaReadEvent.scheduled())
2295 rxDmaReadCopy();
2296 if (rxDmaWriteEvent.scheduled())
2297 rxDmaWriteCopy();
2298 if (txDmaReadEvent.scheduled())
2299 txDmaReadCopy();
2300 if (txDmaWriteEvent.scheduled())
2301 txDmaWriteCopy();
2302
2303 /*
2304 * Serialize the device registers
2305 */
2306 SERIALIZE_SCALAR(regs.command);
2307 SERIALIZE_SCALAR(regs.config);
2308 SERIALIZE_SCALAR(regs.mear);
2309 SERIALIZE_SCALAR(regs.ptscr);
2310 SERIALIZE_SCALAR(regs.isr);
2311 SERIALIZE_SCALAR(regs.imr);
2312 SERIALIZE_SCALAR(regs.ier);
2313 SERIALIZE_SCALAR(regs.ihr);
2314 SERIALIZE_SCALAR(regs.txdp);
2315 SERIALIZE_SCALAR(regs.txdp_hi);
2316 SERIALIZE_SCALAR(regs.txcfg);
2317 SERIALIZE_SCALAR(regs.gpior);
2318 SERIALIZE_SCALAR(regs.rxdp);
2319 SERIALIZE_SCALAR(regs.rxdp_hi);
2320 SERIALIZE_SCALAR(regs.rxcfg);
2321 SERIALIZE_SCALAR(regs.pqcr);
2322 SERIALIZE_SCALAR(regs.wcsr);
2323 SERIALIZE_SCALAR(regs.pcr);
2324 SERIALIZE_SCALAR(regs.rfcr);
2325 SERIALIZE_SCALAR(regs.rfdr);
2326 SERIALIZE_SCALAR(regs.srr);
2327 SERIALIZE_SCALAR(regs.mibc);
2328 SERIALIZE_SCALAR(regs.vrcr);
2329 SERIALIZE_SCALAR(regs.vtcr);
2330 SERIALIZE_SCALAR(regs.vdr);
2331 SERIALIZE_SCALAR(regs.ccsr);
2332 SERIALIZE_SCALAR(regs.tbicr);
2333 SERIALIZE_SCALAR(regs.tbisr);
2334 SERIALIZE_SCALAR(regs.tanar);
2335 SERIALIZE_SCALAR(regs.tanlpar);
2336 SERIALIZE_SCALAR(regs.taner);
2337 SERIALIZE_SCALAR(regs.tesr);
2338
2339 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2340
2341 SERIALIZE_SCALAR(ioEnable);
2342
2343 /*
2344 * Serialize the data Fifos
2345 */
2346 rxFifo.serialize("rxFifo", os);
2347 txFifo.serialize("txFifo", os);
2348
2349 /*
2350 * Serialize the various helper variables
2351 */
2352 bool txPacketExists = txPacket;
2353 SERIALIZE_SCALAR(txPacketExists);
2354 if (txPacketExists) {
2355 txPacket->serialize("txPacket", os);
2356 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2357 SERIALIZE_SCALAR(txPktBufPtr);
2358 }
2359
2360 bool rxPacketExists = rxPacket;
2361 SERIALIZE_SCALAR(rxPacketExists);
2362 if (rxPacketExists) {
2363 rxPacket->serialize("rxPacket", os);
2364 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2365 SERIALIZE_SCALAR(rxPktBufPtr);
2366 }
2367
2368 SERIALIZE_SCALAR(txXferLen);
2369 SERIALIZE_SCALAR(rxXferLen);
2370
2371 /*
2372 * Serialize DescCaches
2373 */
2374 SERIALIZE_SCALAR(txDescCache.link);
2375 SERIALIZE_SCALAR(txDescCache.bufptr);
2376 SERIALIZE_SCALAR(txDescCache.cmdsts);
2377 SERIALIZE_SCALAR(txDescCache.extsts);
2378 SERIALIZE_SCALAR(rxDescCache.link);
2379 SERIALIZE_SCALAR(rxDescCache.bufptr);
2380 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2381 SERIALIZE_SCALAR(rxDescCache.extsts);
2382
2383 /*
2384 * Serialize tx state machine
2385 */
2386 int txState = this->txState;
2387 SERIALIZE_SCALAR(txState);
2388 SERIALIZE_SCALAR(txEnable);
2389 SERIALIZE_SCALAR(CTDD);
2390 SERIALIZE_SCALAR(txFragPtr);
2391 SERIALIZE_SCALAR(txDescCnt);
2392 int txDmaState = this->txDmaState;
2393 SERIALIZE_SCALAR(txDmaState);
2394
2395 /*
2396 * Serialize rx state machine
2397 */
2398 int rxState = this->rxState;
2399 SERIALIZE_SCALAR(rxState);
2400 SERIALIZE_SCALAR(rxEnable);
2401 SERIALIZE_SCALAR(CRDD);
2402 SERIALIZE_SCALAR(rxPktBytes);
2403 SERIALIZE_SCALAR(rxFragPtr);
2404 SERIALIZE_SCALAR(rxDescCnt);
2405 int rxDmaState = this->rxDmaState;
2406 SERIALIZE_SCALAR(rxDmaState);
2407
2408 SERIALIZE_SCALAR(extstsEnable);
2409
2410 /*
2411 * If there's a pending transmit, store the time so we can
2412 * reschedule it later
2413 */
2414 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2415 SERIALIZE_SCALAR(transmitTick);
2416
2417 /*
2418 * receive address filter settings
2419 */
2420 SERIALIZE_SCALAR(rxFilterEnable);
2421 SERIALIZE_SCALAR(acceptBroadcast);
2422 SERIALIZE_SCALAR(acceptMulticast);
2423 SERIALIZE_SCALAR(acceptUnicast);
2424 SERIALIZE_SCALAR(acceptPerfect);
2425 SERIALIZE_SCALAR(acceptArp);
2426
2427 /*
2428 * Keep track of pending interrupt status.
2429 */
2430 SERIALIZE_SCALAR(intrTick);
2431 SERIALIZE_SCALAR(cpuPendingIntr);
2432 Tick intrEventTick = 0;
2433 if (intrEvent)
2434 intrEventTick = intrEvent->when();
2435 SERIALIZE_SCALAR(intrEventTick);
2436
2437 }
2438
2439 void
2440 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2441 {
2442 // Unserialize the PciDev base class
2443 PciDev::unserialize(cp, section);
2444
2445 UNSERIALIZE_SCALAR(regs.command);
2446 UNSERIALIZE_SCALAR(regs.config);
2447 UNSERIALIZE_SCALAR(regs.mear);
2448 UNSERIALIZE_SCALAR(regs.ptscr);
2449 UNSERIALIZE_SCALAR(regs.isr);
2450 UNSERIALIZE_SCALAR(regs.imr);
2451 UNSERIALIZE_SCALAR(regs.ier);
2452 UNSERIALIZE_SCALAR(regs.ihr);
2453 UNSERIALIZE_SCALAR(regs.txdp);
2454 UNSERIALIZE_SCALAR(regs.txdp_hi);
2455 UNSERIALIZE_SCALAR(regs.txcfg);
2456 UNSERIALIZE_SCALAR(regs.gpior);
2457 UNSERIALIZE_SCALAR(regs.rxdp);
2458 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2459 UNSERIALIZE_SCALAR(regs.rxcfg);
2460 UNSERIALIZE_SCALAR(regs.pqcr);
2461 UNSERIALIZE_SCALAR(regs.wcsr);
2462 UNSERIALIZE_SCALAR(regs.pcr);
2463 UNSERIALIZE_SCALAR(regs.rfcr);
2464 UNSERIALIZE_SCALAR(regs.rfdr);
2465 UNSERIALIZE_SCALAR(regs.srr);
2466 UNSERIALIZE_SCALAR(regs.mibc);
2467 UNSERIALIZE_SCALAR(regs.vrcr);
2468 UNSERIALIZE_SCALAR(regs.vtcr);
2469 UNSERIALIZE_SCALAR(regs.vdr);
2470 UNSERIALIZE_SCALAR(regs.ccsr);
2471 UNSERIALIZE_SCALAR(regs.tbicr);
2472 UNSERIALIZE_SCALAR(regs.tbisr);
2473 UNSERIALIZE_SCALAR(regs.tanar);
2474 UNSERIALIZE_SCALAR(regs.tanlpar);
2475 UNSERIALIZE_SCALAR(regs.taner);
2476 UNSERIALIZE_SCALAR(regs.tesr);
2477
2478 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2479
2480 UNSERIALIZE_SCALAR(ioEnable);
2481
2482 /*
2483 * unserialize the data fifos
2484 */
2485 rxFifo.unserialize("rxFifo", cp, section);
2486 txFifo.unserialize("txFifo", cp, section);
2487
2488 /*
2489 * unserialize the various helper variables
2490 */
2491 bool txPacketExists;
2492 UNSERIALIZE_SCALAR(txPacketExists);
2493 if (txPacketExists) {
2494 txPacket = new PacketData(16384);
2495 txPacket->unserialize("txPacket", cp, section);
2496 uint32_t txPktBufPtr;
2497 UNSERIALIZE_SCALAR(txPktBufPtr);
2498 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2499 } else
2500 txPacket = 0;
2501
2502 bool rxPacketExists;
2503 UNSERIALIZE_SCALAR(rxPacketExists);
2504 rxPacket = 0;
2505 if (rxPacketExists) {
2506 rxPacket = new PacketData(16384);
2507 rxPacket->unserialize("rxPacket", cp, section);
2508 uint32_t rxPktBufPtr;
2509 UNSERIALIZE_SCALAR(rxPktBufPtr);
2510 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2511 } else
2512 rxPacket = 0;
2513
2514 UNSERIALIZE_SCALAR(txXferLen);
2515 UNSERIALIZE_SCALAR(rxXferLen);
2516
2517 /*
2518 * Unserialize DescCaches
2519 */
2520 UNSERIALIZE_SCALAR(txDescCache.link);
2521 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2522 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2523 UNSERIALIZE_SCALAR(txDescCache.extsts);
2524 UNSERIALIZE_SCALAR(rxDescCache.link);
2525 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2526 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2527 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2528
2529 /*
2530 * unserialize tx state machine
2531 */
2532 int txState;
2533 UNSERIALIZE_SCALAR(txState);
2534 this->txState = (TxState) txState;
2535 UNSERIALIZE_SCALAR(txEnable);
2536 UNSERIALIZE_SCALAR(CTDD);
2537 UNSERIALIZE_SCALAR(txFragPtr);
2538 UNSERIALIZE_SCALAR(txDescCnt);
2539 int txDmaState;
2540 UNSERIALIZE_SCALAR(txDmaState);
2541 this->txDmaState = (DmaState) txDmaState;
2542
2543 /*
2544 * unserialize rx state machine
2545 */
2546 int rxState;
2547 UNSERIALIZE_SCALAR(rxState);
2548 this->rxState = (RxState) rxState;
2549 UNSERIALIZE_SCALAR(rxEnable);
2550 UNSERIALIZE_SCALAR(CRDD);
2551 UNSERIALIZE_SCALAR(rxPktBytes);
2552 UNSERIALIZE_SCALAR(rxFragPtr);
2553 UNSERIALIZE_SCALAR(rxDescCnt);
2554 int rxDmaState;
2555 UNSERIALIZE_SCALAR(rxDmaState);
2556 this->rxDmaState = (DmaState) rxDmaState;
2557
2558 UNSERIALIZE_SCALAR(extstsEnable);
2559
2560 /*
2561 * If there's a pending transmit, reschedule it now
2562 */
2563 Tick transmitTick;
2564 UNSERIALIZE_SCALAR(transmitTick);
2565 if (transmitTick)
2566 txEvent.schedule(curTick + transmitTick);
2567
2568 /*
2569 * unserialize receive address filter settings
2570 */
2571 UNSERIALIZE_SCALAR(rxFilterEnable);
2572 UNSERIALIZE_SCALAR(acceptBroadcast);
2573 UNSERIALIZE_SCALAR(acceptMulticast);
2574 UNSERIALIZE_SCALAR(acceptUnicast);
2575 UNSERIALIZE_SCALAR(acceptPerfect);
2576 UNSERIALIZE_SCALAR(acceptArp);
2577
2578 /*
2579 * Keep track of pending interrupt status.
2580 */
2581 UNSERIALIZE_SCALAR(intrTick);
2582 UNSERIALIZE_SCALAR(cpuPendingIntr);
2583 Tick intrEventTick;
2584 UNSERIALIZE_SCALAR(intrEventTick);
2585 if (intrEventTick) {
2586 intrEvent = new IntrEvent(this, true);
2587 intrEvent->schedule(intrEventTick);
2588 }
2589
2590 /*
2591 * re-add addrRanges to bus bridges
2592 */
2593 if (pioInterface) {
2594 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2595 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2596 }
2597 }
2598
2599 Tick
2600 NSGigE::cacheAccess(MemReqPtr &req)
2601 {
2602 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2603 req->paddr, req->paddr - addr);
2604 return curTick + pioLatency;
2605 }
2606
2607 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2608
2609 SimObjectParam<EtherInt *> peer;
2610 SimObjectParam<NSGigE *> device;
2611
2612 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2613
2614 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2615
2616 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2617 INIT_PARAM(device, "Ethernet device of this interface")
2618
2619 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2620
2621 CREATE_SIM_OBJECT(NSGigEInt)
2622 {
2623 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2624
2625 EtherInt *p = (EtherInt *)peer;
2626 if (p) {
2627 dev_int->setPeer(p);
2628 p->setPeer(dev_int);
2629 }
2630
2631 return dev_int;
2632 }
2633
2634 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2635
2636
2637 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2638
2639 Param<Addr> addr;
2640 Param<Tick> tx_delay;
2641 Param<Tick> rx_delay;
2642 Param<Tick> intr_delay;
2643 SimObjectParam<MemoryController *> mmu;
2644 SimObjectParam<PhysicalMemory *> physmem;
2645 Param<bool> rx_filter;
2646 Param<string> hardware_address;
2647 SimObjectParam<Bus*> io_bus;
2648 SimObjectParam<Bus*> payload_bus;
2649 SimObjectParam<HierParams *> hier;
2650 Param<Tick> pio_latency;
2651 Param<bool> dma_desc_free;
2652 Param<bool> dma_data_free;
2653 Param<Tick> dma_read_delay;
2654 Param<Tick> dma_write_delay;
2655 Param<Tick> dma_read_factor;
2656 Param<Tick> dma_write_factor;
2657 SimObjectParam<PciConfigAll *> configspace;
2658 SimObjectParam<PciConfigData *> configdata;
2659 SimObjectParam<Platform *> platform;
2660 Param<uint32_t> pci_bus;
2661 Param<uint32_t> pci_dev;
2662 Param<uint32_t> pci_func;
2663 Param<uint32_t> tx_fifo_size;
2664 Param<uint32_t> rx_fifo_size;
2665
2666 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2667
2668 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2669
2670 INIT_PARAM(addr, "Device Address"),
2671 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2672 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2673 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2674 INIT_PARAM(mmu, "Memory Controller"),
2675 INIT_PARAM(physmem, "Physical Memory"),
2676 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2677 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2678 "00:99:00:00:00:01"),
2679 INIT_PARAM_DFLT(io_bus, "The IO Bus to attach to for headers", NULL),
2680 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2681 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2682 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2683 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2684 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2685 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2686 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2687 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2688 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2689 INIT_PARAM(configspace, "PCI Configspace"),
2690 INIT_PARAM(configdata, "PCI Config data"),
2691 INIT_PARAM(platform, "Platform"),
2692 INIT_PARAM(pci_bus, "PCI bus"),
2693 INIT_PARAM(pci_dev, "PCI device number"),
2694 INIT_PARAM(pci_func, "PCI function code"),
2695 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2696 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072)
2697
2698 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2699
2700
2701 CREATE_SIM_OBJECT(NSGigE)
2702 {
2703 NSGigE::Params *params = new NSGigE::Params;
2704
2705 params->name = getInstanceName();
2706 params->mmu = mmu;
2707 params->configSpace = configspace;
2708 params->configData = configdata;
2709 params->plat = platform;
2710 params->busNum = pci_bus;
2711 params->deviceNum = pci_dev;
2712 params->functionNum = pci_func;
2713
2714 params->intr_delay = intr_delay;
2715 params->pmem = physmem;
2716 params->tx_delay = tx_delay;
2717 params->rx_delay = rx_delay;
2718 params->hier = hier;
2719 params->header_bus = io_bus;
2720 params->payload_bus = payload_bus;
2721 params->pio_latency = pio_latency;
2722 params->dma_desc_free = dma_desc_free;
2723 params->dma_data_free = dma_data_free;
2724 params->dma_read_delay = dma_read_delay;
2725 params->dma_write_delay = dma_write_delay;
2726 params->dma_read_factor = dma_read_factor;
2727 params->dma_write_factor = dma_write_factor;
2728 params->rx_filter = rx_filter;
2729 params->eaddr = hardware_address;
2730 params->tx_fifo_size = tx_fifo_size;
2731 params->rx_fifo_size = rx_fifo_size;
2732 return new NSGigE(params);
2733 }
2734
2735 REGISTER_SIM_OBJECT("NSGigE", NSGigE)