Merge zizzer:/bk/m5 into zed.eecs.umich.edu:/z/hsul/work/m5
[gem5.git] / dev / ns_gige.cc
1 /*
2 * Copyright (c) 2004 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /* @file
30 * Device module for modelling the National Semiconductor
31 * DP83820 ethernet controller. Does not support priority queueing
32 */
33 #include <cstdio>
34 #include <deque>
35 #include <string>
36
37 #include "base/inet.hh"
38 #include "cpu/exec_context.hh"
39 #include "cpu/intr_control.hh"
40 #include "dev/dma.hh"
41 #include "dev/etherlink.hh"
42 #include "dev/ns_gige.hh"
43 #include "dev/pciconfigall.hh"
44 #include "mem/bus/bus.hh"
45 #include "mem/bus/dma_interface.hh"
46 #include "mem/bus/pio_interface.hh"
47 #include "mem/bus/pio_interface_impl.hh"
48 #include "mem/functional_mem/memory_control.hh"
49 #include "mem/functional_mem/physical_memory.hh"
50 #include "sim/builder.hh"
51 #include "sim/debug.hh"
52 #include "sim/host.hh"
53 #include "sim/stats.hh"
54 #include "targetarch/vtophys.hh"
55
56 const char *NsRxStateStrings[] =
57 {
58 "rxIdle",
59 "rxDescRefr",
60 "rxDescRead",
61 "rxFifoBlock",
62 "rxFragWrite",
63 "rxDescWrite",
64 "rxAdvance"
65 };
66
67 const char *NsTxStateStrings[] =
68 {
69 "txIdle",
70 "txDescRefr",
71 "txDescRead",
72 "txFifoBlock",
73 "txFragRead",
74 "txDescWrite",
75 "txAdvance"
76 };
77
78 const char *NsDmaState[] =
79 {
80 "dmaIdle",
81 "dmaReading",
82 "dmaWriting",
83 "dmaReadWaiting",
84 "dmaWriteWaiting"
85 };
86
87 using namespace std;
88 using namespace Net;
89
90 ///////////////////////////////////////////////////////////////////////
91 //
92 // NSGigE PCI Device
93 //
94 NSGigE::NSGigE(Params *p)
95 : PciDev(p), ioEnable(false),
96 txFifo(p->tx_fifo_size), rxFifo(p->rx_fifo_size),
97 txPacket(0), rxPacket(0), txPacketBufPtr(NULL), rxPacketBufPtr(NULL),
98 txXferLen(0), rxXferLen(0), txState(txIdle), txEnable(false),
99 CTDD(false),
100 txFragPtr(0), txDescCnt(0), txDmaState(dmaIdle), rxState(rxIdle),
101 rxEnable(false), CRDD(false), rxPktBytes(0),
102 rxFragPtr(0), rxDescCnt(0), rxDmaState(dmaIdle), extstsEnable(false),
103 rxDmaReadEvent(this), rxDmaWriteEvent(this),
104 txDmaReadEvent(this), txDmaWriteEvent(this),
105 dmaDescFree(p->dma_desc_free), dmaDataFree(p->dma_data_free),
106 txDelay(p->tx_delay), rxDelay(p->rx_delay),
107 rxKickTick(0), txKickTick(0),
108 txEvent(this), rxFilterEnable(p->rx_filter), acceptBroadcast(false),
109 acceptMulticast(false), acceptUnicast(false),
110 acceptPerfect(false), acceptArp(false),
111 physmem(p->pmem), intrTick(0), cpuPendingIntr(false),
112 intrEvent(0), interface(0)
113 {
114 if (p->header_bus) {
115 pioInterface = newPioInterface(name(), p->hier,
116 p->header_bus, this,
117 &NSGigE::cacheAccess);
118
119 pioLatency = p->pio_latency * p->header_bus->clockRatio;
120
121 if (p->payload_bus)
122 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
123 p->header_bus,
124 p->payload_bus, 1);
125 else
126 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
127 p->header_bus,
128 p->header_bus, 1);
129 } else if (p->payload_bus) {
130 pioInterface = newPioInterface(name(), p->hier,
131 p->payload_bus, this,
132 &NSGigE::cacheAccess);
133
134 pioLatency = p->pio_latency * p->payload_bus->clockRatio;
135
136 dmaInterface = new DMAInterface<Bus>(name() + ".dma",
137 p->payload_bus,
138 p->payload_bus, 1);
139 }
140
141
142 intrDelay = US2Ticks(p->intr_delay);
143 dmaReadDelay = p->dma_read_delay;
144 dmaWriteDelay = p->dma_write_delay;
145 dmaReadFactor = p->dma_read_factor;
146 dmaWriteFactor = p->dma_write_factor;
147
148 regsReset();
149 memcpy(&rom.perfectMatch, p->eaddr.bytes(), ETH_ADDR_LEN);
150 }
151
152 NSGigE::~NSGigE()
153 {}
154
155 void
156 NSGigE::regStats()
157 {
158 txBytes
159 .name(name() + ".txBytes")
160 .desc("Bytes Transmitted")
161 .prereq(txBytes)
162 ;
163
164 rxBytes
165 .name(name() + ".rxBytes")
166 .desc("Bytes Received")
167 .prereq(rxBytes)
168 ;
169
170 txPackets
171 .name(name() + ".txPackets")
172 .desc("Number of Packets Transmitted")
173 .prereq(txBytes)
174 ;
175
176 rxPackets
177 .name(name() + ".rxPackets")
178 .desc("Number of Packets Received")
179 .prereq(rxBytes)
180 ;
181
182 txIpChecksums
183 .name(name() + ".txIpChecksums")
184 .desc("Number of tx IP Checksums done by device")
185 .precision(0)
186 .prereq(txBytes)
187 ;
188
189 rxIpChecksums
190 .name(name() + ".rxIpChecksums")
191 .desc("Number of rx IP Checksums done by device")
192 .precision(0)
193 .prereq(rxBytes)
194 ;
195
196 txTcpChecksums
197 .name(name() + ".txTcpChecksums")
198 .desc("Number of tx TCP Checksums done by device")
199 .precision(0)
200 .prereq(txBytes)
201 ;
202
203 rxTcpChecksums
204 .name(name() + ".rxTcpChecksums")
205 .desc("Number of rx TCP Checksums done by device")
206 .precision(0)
207 .prereq(rxBytes)
208 ;
209
210 txUdpChecksums
211 .name(name() + ".txUdpChecksums")
212 .desc("Number of tx UDP Checksums done by device")
213 .precision(0)
214 .prereq(txBytes)
215 ;
216
217 rxUdpChecksums
218 .name(name() + ".rxUdpChecksums")
219 .desc("Number of rx UDP Checksums done by device")
220 .precision(0)
221 .prereq(rxBytes)
222 ;
223
224 descDmaReads
225 .name(name() + ".descDMAReads")
226 .desc("Number of descriptors the device read w/ DMA")
227 .precision(0)
228 ;
229
230 descDmaWrites
231 .name(name() + ".descDMAWrites")
232 .desc("Number of descriptors the device wrote w/ DMA")
233 .precision(0)
234 ;
235
236 descDmaRdBytes
237 .name(name() + ".descDmaReadBytes")
238 .desc("number of descriptor bytes read w/ DMA")
239 .precision(0)
240 ;
241
242 descDmaWrBytes
243 .name(name() + ".descDmaWriteBytes")
244 .desc("number of descriptor bytes write w/ DMA")
245 .precision(0)
246 ;
247
248
249 txBandwidth
250 .name(name() + ".txBandwidth")
251 .desc("Transmit Bandwidth (bits/s)")
252 .precision(0)
253 .prereq(txBytes)
254 ;
255
256 rxBandwidth
257 .name(name() + ".rxBandwidth")
258 .desc("Receive Bandwidth (bits/s)")
259 .precision(0)
260 .prereq(rxBytes)
261 ;
262
263 txPacketRate
264 .name(name() + ".txPPS")
265 .desc("Packet Tranmission Rate (packets/s)")
266 .precision(0)
267 .prereq(txBytes)
268 ;
269
270 rxPacketRate
271 .name(name() + ".rxPPS")
272 .desc("Packet Reception Rate (packets/s)")
273 .precision(0)
274 .prereq(rxBytes)
275 ;
276
277 postedSwi
278 .name(name() + ".postedSwi")
279 .desc("number of software interrupts posted to CPU")
280 .precision(0)
281 ;
282
283 totalSwi
284 .name(name() + ".totalSwi")
285 .desc("number of total Swi written to ISR")
286 .precision(0)
287 ;
288
289 coalescedSwi
290 .name(name() + ".coalescedSwi")
291 .desc("average number of Swi's coalesced into each post")
292 .precision(0)
293 ;
294
295 postedRxIdle
296 .name(name() + ".postedRxIdle")
297 .desc("number of rxIdle interrupts posted to CPU")
298 .precision(0)
299 ;
300
301 totalRxIdle
302 .name(name() + ".totalRxIdle")
303 .desc("number of total RxIdle written to ISR")
304 .precision(0)
305 ;
306
307 coalescedRxIdle
308 .name(name() + ".coalescedRxIdle")
309 .desc("average number of RxIdle's coalesced into each post")
310 .precision(0)
311 ;
312
313 postedRxOk
314 .name(name() + ".postedRxOk")
315 .desc("number of RxOk interrupts posted to CPU")
316 .precision(0)
317 ;
318
319 totalRxOk
320 .name(name() + ".totalRxOk")
321 .desc("number of total RxOk written to ISR")
322 .precision(0)
323 ;
324
325 coalescedRxOk
326 .name(name() + ".coalescedRxOk")
327 .desc("average number of RxOk's coalesced into each post")
328 .precision(0)
329 ;
330
331 postedRxDesc
332 .name(name() + ".postedRxDesc")
333 .desc("number of RxDesc interrupts posted to CPU")
334 .precision(0)
335 ;
336
337 totalRxDesc
338 .name(name() + ".totalRxDesc")
339 .desc("number of total RxDesc written to ISR")
340 .precision(0)
341 ;
342
343 coalescedRxDesc
344 .name(name() + ".coalescedRxDesc")
345 .desc("average number of RxDesc's coalesced into each post")
346 .precision(0)
347 ;
348
349 postedTxOk
350 .name(name() + ".postedTxOk")
351 .desc("number of TxOk interrupts posted to CPU")
352 .precision(0)
353 ;
354
355 totalTxOk
356 .name(name() + ".totalTxOk")
357 .desc("number of total TxOk written to ISR")
358 .precision(0)
359 ;
360
361 coalescedTxOk
362 .name(name() + ".coalescedTxOk")
363 .desc("average number of TxOk's coalesced into each post")
364 .precision(0)
365 ;
366
367 postedTxIdle
368 .name(name() + ".postedTxIdle")
369 .desc("number of TxIdle interrupts posted to CPU")
370 .precision(0)
371 ;
372
373 totalTxIdle
374 .name(name() + ".totalTxIdle")
375 .desc("number of total TxIdle written to ISR")
376 .precision(0)
377 ;
378
379 coalescedTxIdle
380 .name(name() + ".coalescedTxIdle")
381 .desc("average number of TxIdle's coalesced into each post")
382 .precision(0)
383 ;
384
385 postedTxDesc
386 .name(name() + ".postedTxDesc")
387 .desc("number of TxDesc interrupts posted to CPU")
388 .precision(0)
389 ;
390
391 totalTxDesc
392 .name(name() + ".totalTxDesc")
393 .desc("number of total TxDesc written to ISR")
394 .precision(0)
395 ;
396
397 coalescedTxDesc
398 .name(name() + ".coalescedTxDesc")
399 .desc("average number of TxDesc's coalesced into each post")
400 .precision(0)
401 ;
402
403 postedRxOrn
404 .name(name() + ".postedRxOrn")
405 .desc("number of RxOrn posted to CPU")
406 .precision(0)
407 ;
408
409 totalRxOrn
410 .name(name() + ".totalRxOrn")
411 .desc("number of total RxOrn written to ISR")
412 .precision(0)
413 ;
414
415 coalescedRxOrn
416 .name(name() + ".coalescedRxOrn")
417 .desc("average number of RxOrn's coalesced into each post")
418 .precision(0)
419 ;
420
421 coalescedTotal
422 .name(name() + ".coalescedTotal")
423 .desc("average number of interrupts coalesced into each post")
424 .precision(0)
425 ;
426
427 postedInterrupts
428 .name(name() + ".postedInterrupts")
429 .desc("number of posts to CPU")
430 .precision(0)
431 ;
432
433 droppedPackets
434 .name(name() + ".droppedPackets")
435 .desc("number of packets dropped")
436 .precision(0)
437 ;
438
439 coalescedSwi = totalSwi / postedInterrupts;
440 coalescedRxIdle = totalRxIdle / postedInterrupts;
441 coalescedRxOk = totalRxOk / postedInterrupts;
442 coalescedRxDesc = totalRxDesc / postedInterrupts;
443 coalescedTxOk = totalTxOk / postedInterrupts;
444 coalescedTxIdle = totalTxIdle / postedInterrupts;
445 coalescedTxDesc = totalTxDesc / postedInterrupts;
446 coalescedRxOrn = totalRxOrn / postedInterrupts;
447
448 coalescedTotal = (totalSwi + totalRxIdle + totalRxOk + totalRxDesc + totalTxOk
449 + totalTxIdle + totalTxDesc + totalRxOrn) / postedInterrupts;
450
451 txBandwidth = txBytes * Stats::constant(8) / simSeconds;
452 rxBandwidth = rxBytes * Stats::constant(8) / simSeconds;
453 txPacketRate = txPackets / simSeconds;
454 rxPacketRate = rxPackets / simSeconds;
455 }
456
457 /**
458 * This is to read the PCI general configuration registers
459 */
460 void
461 NSGigE::ReadConfig(int offset, int size, uint8_t *data)
462 {
463 if (offset < PCI_DEVICE_SPECIFIC)
464 PciDev::ReadConfig(offset, size, data);
465 else
466 panic("Device specific PCI config space not implemented!\n");
467 }
468
469 /**
470 * This is to write to the PCI general configuration registers
471 */
472 void
473 NSGigE::WriteConfig(int offset, int size, uint32_t data)
474 {
475 if (offset < PCI_DEVICE_SPECIFIC)
476 PciDev::WriteConfig(offset, size, data);
477 else
478 panic("Device specific PCI config space not implemented!\n");
479
480 // Need to catch writes to BARs to update the PIO interface
481 switch (offset) {
482 // seems to work fine without all these PCI settings, but i
483 // put in the IO to double check, an assertion will fail if we
484 // need to properly implement it
485 case PCI_COMMAND:
486 if (config.data[offset] & PCI_CMD_IOSE)
487 ioEnable = true;
488 else
489 ioEnable = false;
490
491 #if 0
492 if (config.data[offset] & PCI_CMD_BME) {
493 bmEnabled = true;
494 }
495 else {
496 bmEnabled = false;
497 }
498
499 if (config.data[offset] & PCI_CMD_MSE) {
500 memEnable = true;
501 }
502 else {
503 memEnable = false;
504 }
505 #endif
506 break;
507
508 case PCI0_BASE_ADDR0:
509 if (BARAddrs[0] != 0) {
510 if (pioInterface)
511 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
512
513 BARAddrs[0] &= EV5::PAddrUncachedMask;
514 }
515 break;
516 case PCI0_BASE_ADDR1:
517 if (BARAddrs[1] != 0) {
518 if (pioInterface)
519 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
520
521 BARAddrs[1] &= EV5::PAddrUncachedMask;
522 }
523 break;
524 }
525 }
526
527 /**
528 * This reads the device registers, which are detailed in the NS83820
529 * spec sheet
530 */
531 Fault
532 NSGigE::read(MemReqPtr &req, uint8_t *data)
533 {
534 assert(ioEnable);
535
536 //The mask is to give you only the offset into the device register file
537 Addr daddr = req->paddr & 0xfff;
538 DPRINTF(EthernetPIO, "read da=%#x pa=%#x va=%#x size=%d\n",
539 daddr, req->paddr, req->vaddr, req->size);
540
541
542 // there are some reserved registers, you can see ns_gige_reg.h and
543 // the spec sheet for details
544 if (daddr > LAST && daddr <= RESERVED) {
545 panic("Accessing reserved register");
546 } else if (daddr > RESERVED && daddr <= 0x3FC) {
547 ReadConfig(daddr & 0xff, req->size, data);
548 return No_Fault;
549 } else if (daddr >= MIB_START && daddr <= MIB_END) {
550 // don't implement all the MIB's. hopefully the kernel
551 // doesn't actually DEPEND upon their values
552 // MIB are just hardware stats keepers
553 uint32_t &reg = *(uint32_t *) data;
554 reg = 0;
555 return No_Fault;
556 } else if (daddr > 0x3FC)
557 panic("Something is messed up!\n");
558
559 switch (req->size) {
560 case sizeof(uint32_t):
561 {
562 uint32_t &reg = *(uint32_t *)data;
563
564 switch (daddr) {
565 case CR:
566 reg = regs.command;
567 //these are supposed to be cleared on a read
568 reg &= ~(CR_RXD | CR_TXD | CR_TXR | CR_RXR);
569 break;
570
571 case CFG:
572 reg = regs.config;
573 break;
574
575 case MEAR:
576 reg = regs.mear;
577 break;
578
579 case PTSCR:
580 reg = regs.ptscr;
581 break;
582
583 case ISR:
584 reg = regs.isr;
585 devIntrClear(ISR_ALL);
586 break;
587
588 case IMR:
589 reg = regs.imr;
590 break;
591
592 case IER:
593 reg = regs.ier;
594 break;
595
596 case IHR:
597 reg = regs.ihr;
598 break;
599
600 case TXDP:
601 reg = regs.txdp;
602 break;
603
604 case TXDP_HI:
605 reg = regs.txdp_hi;
606 break;
607
608 case TXCFG:
609 reg = regs.txcfg;
610 break;
611
612 case GPIOR:
613 reg = regs.gpior;
614 break;
615
616 case RXDP:
617 reg = regs.rxdp;
618 break;
619
620 case RXDP_HI:
621 reg = regs.rxdp_hi;
622 break;
623
624 case RXCFG:
625 reg = regs.rxcfg;
626 break;
627
628 case PQCR:
629 reg = regs.pqcr;
630 break;
631
632 case WCSR:
633 reg = regs.wcsr;
634 break;
635
636 case PCR:
637 reg = regs.pcr;
638 break;
639
640 // see the spec sheet for how RFCR and RFDR work
641 // basically, you write to RFCR to tell the machine
642 // what you want to do next, then you act upon RFDR,
643 // and the device will be prepared b/c of what you
644 // wrote to RFCR
645 case RFCR:
646 reg = regs.rfcr;
647 break;
648
649 case RFDR:
650 switch (regs.rfcr & RFCR_RFADDR) {
651 case 0x000:
652 reg = rom.perfectMatch[1];
653 reg = reg << 8;
654 reg += rom.perfectMatch[0];
655 break;
656 case 0x002:
657 reg = rom.perfectMatch[3] << 8;
658 reg += rom.perfectMatch[2];
659 break;
660 case 0x004:
661 reg = rom.perfectMatch[5] << 8;
662 reg += rom.perfectMatch[4];
663 break;
664 default:
665 panic("reading RFDR for something other than PMATCH!\n");
666 // didn't implement other RFDR functionality b/c
667 // driver didn't use it
668 }
669 break;
670
671 case SRR:
672 reg = regs.srr;
673 break;
674
675 case MIBC:
676 reg = regs.mibc;
677 reg &= ~(MIBC_MIBS | MIBC_ACLR);
678 break;
679
680 case VRCR:
681 reg = regs.vrcr;
682 break;
683
684 case VTCR:
685 reg = regs.vtcr;
686 break;
687
688 case VDR:
689 reg = regs.vdr;
690 break;
691
692 case CCSR:
693 reg = regs.ccsr;
694 break;
695
696 case TBICR:
697 reg = regs.tbicr;
698 break;
699
700 case TBISR:
701 reg = regs.tbisr;
702 break;
703
704 case TANAR:
705 reg = regs.tanar;
706 break;
707
708 case TANLPAR:
709 reg = regs.tanlpar;
710 break;
711
712 case TANER:
713 reg = regs.taner;
714 break;
715
716 case TESR:
717 reg = regs.tesr;
718 break;
719
720 default:
721 panic("reading unimplemented register: addr=%#x", daddr);
722 }
723
724 DPRINTF(EthernetPIO, "read from %#x: data=%d data=%#x\n",
725 daddr, reg, reg);
726 }
727 break;
728
729 default:
730 panic("accessing register with invalid size: addr=%#x, size=%d",
731 daddr, req->size);
732 }
733
734 return No_Fault;
735 }
736
737 Fault
738 NSGigE::write(MemReqPtr &req, const uint8_t *data)
739 {
740 assert(ioEnable);
741
742 Addr daddr = req->paddr & 0xfff;
743 DPRINTF(EthernetPIO, "write da=%#x pa=%#x va=%#x size=%d\n",
744 daddr, req->paddr, req->vaddr, req->size);
745
746 if (daddr > LAST && daddr <= RESERVED) {
747 panic("Accessing reserved register");
748 } else if (daddr > RESERVED && daddr <= 0x3FC) {
749 WriteConfig(daddr & 0xff, req->size, *(uint32_t *)data);
750 return No_Fault;
751 } else if (daddr > 0x3FC)
752 panic("Something is messed up!\n");
753
754 if (req->size == sizeof(uint32_t)) {
755 uint32_t reg = *(uint32_t *)data;
756 DPRINTF(EthernetPIO, "write data=%d data=%#x\n", reg, reg);
757
758 switch (daddr) {
759 case CR:
760 regs.command = reg;
761 if (reg & CR_TXD) {
762 txEnable = false;
763 } else if (reg & CR_TXE) {
764 txEnable = true;
765
766 // the kernel is enabling the transmit machine
767 if (txState == txIdle)
768 txKick();
769 }
770
771 if (reg & CR_RXD) {
772 rxEnable = false;
773 } else if (reg & CR_RXE) {
774 rxEnable = true;
775
776 if (rxState == rxIdle)
777 rxKick();
778 }
779
780 if (reg & CR_TXR)
781 txReset();
782
783 if (reg & CR_RXR)
784 rxReset();
785
786 if (reg & CR_SWI)
787 devIntrPost(ISR_SWI);
788
789 if (reg & CR_RST) {
790 txReset();
791 rxReset();
792
793 regsReset();
794 }
795 break;
796
797 case CFG:
798 if (reg & CFG_LNKSTS ||
799 reg & CFG_SPDSTS ||
800 reg & CFG_DUPSTS ||
801 reg & CFG_RESERVED ||
802 reg & CFG_T64ADDR ||
803 reg & CFG_PCI64_DET)
804 panic("writing to read-only or reserved CFG bits!\n");
805
806 regs.config |= reg & ~(CFG_LNKSTS | CFG_SPDSTS | CFG_DUPSTS |
807 CFG_RESERVED | CFG_T64ADDR | CFG_PCI64_DET);
808
809 // all these #if 0's are because i don't THINK the kernel needs to
810 // have these implemented. if there is a problem relating to one of
811 // these, you may need to add functionality in.
812 #if 0
813 if (reg & CFG_TBI_EN) ;
814 if (reg & CFG_MODE_1000) ;
815 #endif
816
817 if (reg & CFG_AUTO_1000)
818 panic("CFG_AUTO_1000 not implemented!\n");
819
820 #if 0
821 if (reg & CFG_PINT_DUPSTS ||
822 reg & CFG_PINT_LNKSTS ||
823 reg & CFG_PINT_SPDSTS)
824 ;
825
826 if (reg & CFG_TMRTEST) ;
827 if (reg & CFG_MRM_DIS) ;
828 if (reg & CFG_MWI_DIS) ;
829
830 if (reg & CFG_T64ADDR)
831 panic("CFG_T64ADDR is read only register!\n");
832
833 if (reg & CFG_PCI64_DET)
834 panic("CFG_PCI64_DET is read only register!\n");
835
836 if (reg & CFG_DATA64_EN) ;
837 if (reg & CFG_M64ADDR) ;
838 if (reg & CFG_PHY_RST) ;
839 if (reg & CFG_PHY_DIS) ;
840 #endif
841
842 if (reg & CFG_EXTSTS_EN)
843 extstsEnable = true;
844 else
845 extstsEnable = false;
846
847 #if 0
848 if (reg & CFG_REQALG) ;
849 if (reg & CFG_SB) ;
850 if (reg & CFG_POW) ;
851 if (reg & CFG_EXD) ;
852 if (reg & CFG_PESEL) ;
853 if (reg & CFG_BROM_DIS) ;
854 if (reg & CFG_EXT_125) ;
855 if (reg & CFG_BEM) ;
856 #endif
857 break;
858
859 case MEAR:
860 regs.mear = reg;
861 // since phy is completely faked, MEAR_MD* don't matter
862 // and since the driver never uses MEAR_EE*, they don't
863 // matter
864 #if 0
865 if (reg & MEAR_EEDI) ;
866 if (reg & MEAR_EEDO) ; // this one is read only
867 if (reg & MEAR_EECLK) ;
868 if (reg & MEAR_EESEL) ;
869 if (reg & MEAR_MDIO) ;
870 if (reg & MEAR_MDDIR) ;
871 if (reg & MEAR_MDC) ;
872 #endif
873 break;
874
875 case PTSCR:
876 regs.ptscr = reg & ~(PTSCR_RBIST_RDONLY);
877 // these control BISTs for various parts of chip - we
878 // don't care or do just fake that the BIST is done
879 if (reg & PTSCR_RBIST_EN)
880 regs.ptscr |= PTSCR_RBIST_DONE;
881 if (reg & PTSCR_EEBIST_EN)
882 regs.ptscr &= ~PTSCR_EEBIST_EN;
883 if (reg & PTSCR_EELOAD_EN)
884 regs.ptscr &= ~PTSCR_EELOAD_EN;
885 break;
886
887 case ISR: /* writing to the ISR has no effect */
888 panic("ISR is a read only register!\n");
889
890 case IMR:
891 regs.imr = reg;
892 devIntrChangeMask();
893 break;
894
895 case IER:
896 regs.ier = reg;
897 break;
898
899 case IHR:
900 regs.ihr = reg;
901 /* not going to implement real interrupt holdoff */
902 break;
903
904 case TXDP:
905 regs.txdp = (reg & 0xFFFFFFFC);
906 assert(txState == txIdle);
907 CTDD = false;
908 break;
909
910 case TXDP_HI:
911 regs.txdp_hi = reg;
912 break;
913
914 case TXCFG:
915 regs.txcfg = reg;
916 #if 0
917 if (reg & TXCFG_CSI) ;
918 if (reg & TXCFG_HBI) ;
919 if (reg & TXCFG_MLB) ;
920 if (reg & TXCFG_ATP) ;
921 if (reg & TXCFG_ECRETRY) {
922 /*
923 * this could easily be implemented, but considering
924 * the network is just a fake pipe, wouldn't make
925 * sense to do this
926 */
927 }
928
929 if (reg & TXCFG_BRST_DIS) ;
930 #endif
931
932 #if 0
933 /* we handle our own DMA, ignore the kernel's exhortations */
934 if (reg & TXCFG_MXDMA) ;
935 #endif
936
937 // also, we currently don't care about fill/drain
938 // thresholds though this may change in the future with
939 // more realistic networks or a driver which changes it
940 // according to feedback
941
942 break;
943
944 case GPIOR:
945 regs.gpior = reg;
946 /* these just control general purpose i/o pins, don't matter */
947 break;
948
949 case RXDP:
950 regs.rxdp = reg;
951 CRDD = false;
952 break;
953
954 case RXDP_HI:
955 regs.rxdp_hi = reg;
956 break;
957
958 case RXCFG:
959 regs.rxcfg = reg;
960 #if 0
961 if (reg & RXCFG_AEP) ;
962 if (reg & RXCFG_ARP) ;
963 if (reg & RXCFG_STRIPCRC) ;
964 if (reg & RXCFG_RX_RD) ;
965 if (reg & RXCFG_ALP) ;
966 if (reg & RXCFG_AIRL) ;
967
968 /* we handle our own DMA, ignore what kernel says about it */
969 if (reg & RXCFG_MXDMA) ;
970
971 //also, we currently don't care about fill/drain thresholds
972 //though this may change in the future with more realistic
973 //networks or a driver which changes it according to feedback
974 if (reg & (RXCFG_DRTH | RXCFG_DRTH0)) ;
975 #endif
976 break;
977
978 case PQCR:
979 /* there is no priority queueing used in the linux 2.6 driver */
980 regs.pqcr = reg;
981 break;
982
983 case WCSR:
984 /* not going to implement wake on LAN */
985 regs.wcsr = reg;
986 break;
987
988 case PCR:
989 /* not going to implement pause control */
990 regs.pcr = reg;
991 break;
992
993 case RFCR:
994 regs.rfcr = reg;
995
996 rxFilterEnable = (reg & RFCR_RFEN) ? true : false;
997 acceptBroadcast = (reg & RFCR_AAB) ? true : false;
998 acceptMulticast = (reg & RFCR_AAM) ? true : false;
999 acceptUnicast = (reg & RFCR_AAU) ? true : false;
1000 acceptPerfect = (reg & RFCR_APM) ? true : false;
1001 acceptArp = (reg & RFCR_AARP) ? true : false;
1002
1003 #if 0
1004 if (reg & RFCR_APAT)
1005 panic("RFCR_APAT not implemented!\n");
1006 #endif
1007
1008 if (reg & RFCR_MHEN || reg & RFCR_UHEN)
1009 panic("hash filtering not implemented!\n");
1010
1011 if (reg & RFCR_ULM)
1012 panic("RFCR_ULM not implemented!\n");
1013
1014 break;
1015
1016 case RFDR:
1017 panic("the driver never writes to RFDR, something is wrong!\n");
1018
1019 case BRAR:
1020 panic("the driver never uses BRAR, something is wrong!\n");
1021
1022 case BRDR:
1023 panic("the driver never uses BRDR, something is wrong!\n");
1024
1025 case SRR:
1026 panic("SRR is read only register!\n");
1027
1028 case MIBC:
1029 panic("the driver never uses MIBC, something is wrong!\n");
1030
1031 case VRCR:
1032 regs.vrcr = reg;
1033 break;
1034
1035 case VTCR:
1036 regs.vtcr = reg;
1037 break;
1038
1039 case VDR:
1040 panic("the driver never uses VDR, something is wrong!\n");
1041 break;
1042
1043 case CCSR:
1044 /* not going to implement clockrun stuff */
1045 regs.ccsr = reg;
1046 break;
1047
1048 case TBICR:
1049 regs.tbicr = reg;
1050 if (reg & TBICR_MR_LOOPBACK)
1051 panic("TBICR_MR_LOOPBACK never used, something wrong!\n");
1052
1053 if (reg & TBICR_MR_AN_ENABLE) {
1054 regs.tanlpar = regs.tanar;
1055 regs.tbisr |= (TBISR_MR_AN_COMPLETE | TBISR_MR_LINK_STATUS);
1056 }
1057
1058 #if 0
1059 if (reg & TBICR_MR_RESTART_AN) ;
1060 #endif
1061
1062 break;
1063
1064 case TBISR:
1065 panic("TBISR is read only register!\n");
1066
1067 case TANAR:
1068 regs.tanar = reg;
1069 if (reg & TANAR_PS2)
1070 panic("this isn't used in driver, something wrong!\n");
1071
1072 if (reg & TANAR_PS1)
1073 panic("this isn't used in driver, something wrong!\n");
1074 break;
1075
1076 case TANLPAR:
1077 panic("this should only be written to by the fake phy!\n");
1078
1079 case TANER:
1080 panic("TANER is read only register!\n");
1081
1082 case TESR:
1083 regs.tesr = reg;
1084 break;
1085
1086 default:
1087 panic("invalid register access daddr=%#x", daddr);
1088 }
1089 } else {
1090 panic("Invalid Request Size");
1091 }
1092
1093 return No_Fault;
1094 }
1095
1096 void
1097 NSGigE::devIntrPost(uint32_t interrupts)
1098 {
1099 if (interrupts & ISR_RESERVE)
1100 panic("Cannot set a reserved interrupt");
1101
1102 if (interrupts & ISR_NOIMPL)
1103 warn("interrupt not implemented %#x\n", interrupts);
1104
1105 interrupts &= ~ISR_NOIMPL;
1106 regs.isr |= interrupts;
1107
1108 if (interrupts & regs.imr) {
1109 if (interrupts & ISR_SWI) {
1110 totalSwi++;
1111 }
1112 if (interrupts & ISR_RXIDLE) {
1113 totalRxIdle++;
1114 }
1115 if (interrupts & ISR_RXOK) {
1116 totalRxOk++;
1117 }
1118 if (interrupts & ISR_RXDESC) {
1119 totalRxDesc++;
1120 }
1121 if (interrupts & ISR_TXOK) {
1122 totalTxOk++;
1123 }
1124 if (interrupts & ISR_TXIDLE) {
1125 totalTxIdle++;
1126 }
1127 if (interrupts & ISR_TXDESC) {
1128 totalTxDesc++;
1129 }
1130 if (interrupts & ISR_RXORN) {
1131 totalRxOrn++;
1132 }
1133 }
1134
1135 DPRINTF(EthernetIntr,
1136 "interrupt written to ISR: intr=%#x isr=%#x imr=%#x\n",
1137 interrupts, regs.isr, regs.imr);
1138
1139 if ((regs.isr & regs.imr)) {
1140 Tick when = curTick;
1141 if (!(regs.isr & regs.imr & ISR_NODELAY))
1142 when += intrDelay;
1143 cpuIntrPost(when);
1144 }
1145 }
1146
1147 /* writing this interrupt counting stats inside this means that this function
1148 is now limited to being used to clear all interrupts upon the kernel
1149 reading isr and servicing. just telling you in case you were thinking
1150 of expanding use.
1151 */
1152 void
1153 NSGigE::devIntrClear(uint32_t interrupts)
1154 {
1155 if (interrupts & ISR_RESERVE)
1156 panic("Cannot clear a reserved interrupt");
1157
1158 if (regs.isr & regs.imr & ISR_SWI) {
1159 postedSwi++;
1160 }
1161 if (regs.isr & regs.imr & ISR_RXIDLE) {
1162 postedRxIdle++;
1163 }
1164 if (regs.isr & regs.imr & ISR_RXOK) {
1165 postedRxOk++;
1166 }
1167 if (regs.isr & regs.imr & ISR_RXDESC) {
1168 postedRxDesc++;
1169 }
1170 if (regs.isr & regs.imr & ISR_TXOK) {
1171 postedTxOk++;
1172 }
1173 if (regs.isr & regs.imr & ISR_TXIDLE) {
1174 postedTxIdle++;
1175 }
1176 if (regs.isr & regs.imr & ISR_TXDESC) {
1177 postedTxDesc++;
1178 }
1179 if (regs.isr & regs.imr & ISR_RXORN) {
1180 postedRxOrn++;
1181 }
1182
1183 if (regs.isr & regs.imr & (ISR_SWI | ISR_RXIDLE | ISR_RXOK | ISR_RXDESC |
1184 ISR_TXOK | ISR_TXIDLE | ISR_TXDESC | ISR_RXORN) )
1185 postedInterrupts++;
1186
1187 interrupts &= ~ISR_NOIMPL;
1188 regs.isr &= ~interrupts;
1189
1190 DPRINTF(EthernetIntr,
1191 "interrupt cleared from ISR: intr=%x isr=%x imr=%x\n",
1192 interrupts, regs.isr, regs.imr);
1193
1194 if (!(regs.isr & regs.imr))
1195 cpuIntrClear();
1196 }
1197
1198 void
1199 NSGigE::devIntrChangeMask()
1200 {
1201 DPRINTF(EthernetIntr, "interrupt mask changed: isr=%x imr=%x masked=%x\n",
1202 regs.isr, regs.imr, regs.isr & regs.imr);
1203
1204 if (regs.isr & regs.imr)
1205 cpuIntrPost(curTick);
1206 else
1207 cpuIntrClear();
1208 }
1209
1210 void
1211 NSGigE::cpuIntrPost(Tick when)
1212 {
1213 // If the interrupt you want to post is later than an interrupt
1214 // already scheduled, just let it post in the coming one and don't
1215 // schedule another.
1216 // HOWEVER, must be sure that the scheduled intrTick is in the
1217 // future (this was formerly the source of a bug)
1218 /**
1219 * @todo this warning should be removed and the intrTick code should
1220 * be fixed.
1221 */
1222 assert(when >= curTick);
1223 assert(intrTick >= curTick || intrTick == 0);
1224 if (when > intrTick && intrTick != 0) {
1225 DPRINTF(EthernetIntr, "don't need to schedule event...intrTick=%d\n",
1226 intrTick);
1227 return;
1228 }
1229
1230 intrTick = when;
1231 if (intrTick < curTick) {
1232 debug_break();
1233 intrTick = curTick;
1234 }
1235
1236 DPRINTF(EthernetIntr, "going to schedule an interrupt for intrTick=%d\n",
1237 intrTick);
1238
1239 if (intrEvent)
1240 intrEvent->squash();
1241 intrEvent = new IntrEvent(this, true);
1242 intrEvent->schedule(intrTick);
1243 }
1244
1245 void
1246 NSGigE::cpuInterrupt()
1247 {
1248 assert(intrTick == curTick);
1249
1250 // Whether or not there's a pending interrupt, we don't care about
1251 // it anymore
1252 intrEvent = 0;
1253 intrTick = 0;
1254
1255 // Don't send an interrupt if there's already one
1256 if (cpuPendingIntr) {
1257 DPRINTF(EthernetIntr,
1258 "would send an interrupt now, but there's already pending\n");
1259 } else {
1260 // Send interrupt
1261 cpuPendingIntr = true;
1262
1263 DPRINTF(EthernetIntr, "posting interrupt\n");
1264 intrPost();
1265 }
1266 }
1267
1268 void
1269 NSGigE::cpuIntrClear()
1270 {
1271 if (!cpuPendingIntr)
1272 return;
1273
1274 if (intrEvent) {
1275 intrEvent->squash();
1276 intrEvent = 0;
1277 }
1278
1279 intrTick = 0;
1280
1281 cpuPendingIntr = false;
1282
1283 DPRINTF(EthernetIntr, "clearing interrupt\n");
1284 intrClear();
1285 }
1286
1287 bool
1288 NSGigE::cpuIntrPending() const
1289 { return cpuPendingIntr; }
1290
1291 void
1292 NSGigE::txReset()
1293 {
1294
1295 DPRINTF(Ethernet, "transmit reset\n");
1296
1297 CTDD = false;
1298 txEnable = false;;
1299 txFragPtr = 0;
1300 assert(txDescCnt == 0);
1301 txFifo.clear();
1302 txState = txIdle;
1303 assert(txDmaState == dmaIdle);
1304 }
1305
1306 void
1307 NSGigE::rxReset()
1308 {
1309 DPRINTF(Ethernet, "receive reset\n");
1310
1311 CRDD = false;
1312 assert(rxPktBytes == 0);
1313 rxEnable = false;
1314 rxFragPtr = 0;
1315 assert(rxDescCnt == 0);
1316 assert(rxDmaState == dmaIdle);
1317 rxFifo.clear();
1318 rxState = rxIdle;
1319 }
1320
1321 void
1322 NSGigE::regsReset()
1323 {
1324 memset(&regs, 0, sizeof(regs));
1325 regs.config = CFG_LNKSTS;
1326 regs.mear = MEAR_MDDIR | MEAR_EEDO;
1327 regs.txcfg = 0x120; // set drain threshold to 1024 bytes and
1328 // fill threshold to 32 bytes
1329 regs.rxcfg = 0x4; // set drain threshold to 16 bytes
1330 regs.srr = 0x0103; // set the silicon revision to rev B or 0x103
1331 regs.mibc = MIBC_FRZ;
1332 regs.vdr = 0x81; // set the vlan tag type to 802.1q
1333 regs.tesr = 0xc000; // TBI capable of both full and half duplex
1334
1335 extstsEnable = false;
1336 acceptBroadcast = false;
1337 acceptMulticast = false;
1338 acceptUnicast = false;
1339 acceptPerfect = false;
1340 acceptArp = false;
1341 }
1342
1343 void
1344 NSGigE::rxDmaReadCopy()
1345 {
1346 assert(rxDmaState == dmaReading);
1347
1348 physmem->dma_read((uint8_t *)rxDmaData, rxDmaAddr, rxDmaLen);
1349 rxDmaState = dmaIdle;
1350
1351 DPRINTF(EthernetDMA, "rx dma read paddr=%#x len=%d\n",
1352 rxDmaAddr, rxDmaLen);
1353 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1354 }
1355
1356 bool
1357 NSGigE::doRxDmaRead()
1358 {
1359 assert(rxDmaState == dmaIdle || rxDmaState == dmaReadWaiting);
1360 rxDmaState = dmaReading;
1361
1362 if (dmaInterface && !rxDmaFree) {
1363 if (dmaInterface->busy())
1364 rxDmaState = dmaReadWaiting;
1365 else
1366 dmaInterface->doDMA(Read, rxDmaAddr, rxDmaLen, curTick,
1367 &rxDmaReadEvent, true);
1368 return true;
1369 }
1370
1371 if (dmaReadDelay == 0 && dmaReadFactor == 0) {
1372 rxDmaReadCopy();
1373 return false;
1374 }
1375
1376 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1377 Tick start = curTick + dmaReadDelay + factor;
1378 rxDmaReadEvent.schedule(start);
1379 return true;
1380 }
1381
1382 void
1383 NSGigE::rxDmaReadDone()
1384 {
1385 assert(rxDmaState == dmaReading);
1386 rxDmaReadCopy();
1387
1388 // If the transmit state machine has a pending DMA, let it go first
1389 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1390 txKick();
1391
1392 rxKick();
1393 }
1394
1395 void
1396 NSGigE::rxDmaWriteCopy()
1397 {
1398 assert(rxDmaState == dmaWriting);
1399
1400 physmem->dma_write(rxDmaAddr, (uint8_t *)rxDmaData, rxDmaLen);
1401 rxDmaState = dmaIdle;
1402
1403 DPRINTF(EthernetDMA, "rx dma write paddr=%#x len=%d\n",
1404 rxDmaAddr, rxDmaLen);
1405 DDUMP(EthernetDMA, rxDmaData, rxDmaLen);
1406 }
1407
1408 bool
1409 NSGigE::doRxDmaWrite()
1410 {
1411 assert(rxDmaState == dmaIdle || rxDmaState == dmaWriteWaiting);
1412 rxDmaState = dmaWriting;
1413
1414 if (dmaInterface && !rxDmaFree) {
1415 if (dmaInterface->busy())
1416 rxDmaState = dmaWriteWaiting;
1417 else
1418 dmaInterface->doDMA(WriteInvalidate, rxDmaAddr, rxDmaLen, curTick,
1419 &rxDmaWriteEvent, true);
1420 return true;
1421 }
1422
1423 if (dmaWriteDelay == 0 && dmaWriteFactor == 0) {
1424 rxDmaWriteCopy();
1425 return false;
1426 }
1427
1428 Tick factor = ((rxDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1429 Tick start = curTick + dmaWriteDelay + factor;
1430 rxDmaWriteEvent.schedule(start);
1431 return true;
1432 }
1433
1434 void
1435 NSGigE::rxDmaWriteDone()
1436 {
1437 assert(rxDmaState == dmaWriting);
1438 rxDmaWriteCopy();
1439
1440 // If the transmit state machine has a pending DMA, let it go first
1441 if (txDmaState == dmaReadWaiting || txDmaState == dmaWriteWaiting)
1442 txKick();
1443
1444 rxKick();
1445 }
1446
1447 void
1448 NSGigE::rxKick()
1449 {
1450 DPRINTF(EthernetSM, "receive kick rxState=%s (rxBuf.size=%d)\n",
1451 NsRxStateStrings[rxState], rxFifo.size());
1452
1453 if (rxKickTick > curTick) {
1454 DPRINTF(EthernetSM, "receive kick exiting, can't run till %d\n",
1455 rxKickTick);
1456 return;
1457 }
1458
1459 next:
1460 switch(rxDmaState) {
1461 case dmaReadWaiting:
1462 if (doRxDmaRead())
1463 goto exit;
1464 break;
1465 case dmaWriteWaiting:
1466 if (doRxDmaWrite())
1467 goto exit;
1468 break;
1469 default:
1470 break;
1471 }
1472
1473 // see state machine from spec for details
1474 // the way this works is, if you finish work on one state and can
1475 // go directly to another, you do that through jumping to the
1476 // label "next". however, if you have intermediate work, like DMA
1477 // so that you can't go to the next state yet, you go to exit and
1478 // exit the loop. however, when the DMA is done it will trigger
1479 // an event and come back to this loop.
1480 switch (rxState) {
1481 case rxIdle:
1482 if (!rxEnable) {
1483 DPRINTF(EthernetSM, "Receive Disabled! Nothing to do.\n");
1484 goto exit;
1485 }
1486
1487 if (CRDD) {
1488 rxState = rxDescRefr;
1489
1490 rxDmaAddr = regs.rxdp & 0x3fffffff;
1491 rxDmaData = &rxDescCache + offsetof(ns_desc, link);
1492 rxDmaLen = sizeof(rxDescCache.link);
1493 rxDmaFree = dmaDescFree;
1494
1495 descDmaReads++;
1496 descDmaRdBytes += rxDmaLen;
1497
1498 if (doRxDmaRead())
1499 goto exit;
1500 } else {
1501 rxState = rxDescRead;
1502
1503 rxDmaAddr = regs.rxdp & 0x3fffffff;
1504 rxDmaData = &rxDescCache;
1505 rxDmaLen = sizeof(ns_desc);
1506 rxDmaFree = dmaDescFree;
1507
1508 descDmaReads++;
1509 descDmaRdBytes += rxDmaLen;
1510
1511 if (doRxDmaRead())
1512 goto exit;
1513 }
1514 break;
1515
1516 case rxDescRefr:
1517 if (rxDmaState != dmaIdle)
1518 goto exit;
1519
1520 rxState = rxAdvance;
1521 break;
1522
1523 case rxDescRead:
1524 if (rxDmaState != dmaIdle)
1525 goto exit;
1526
1527 DPRINTF(EthernetDesc,
1528 "rxDescCache: addr=%08x read descriptor\n",
1529 regs.rxdp & 0x3fffffff);
1530 DPRINTF(EthernetDesc,
1531 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1532 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1533 rxDescCache.extsts);
1534
1535 if (rxDescCache.cmdsts & CMDSTS_OWN) {
1536 devIntrPost(ISR_RXIDLE);
1537 rxState = rxIdle;
1538 goto exit;
1539 } else {
1540 rxState = rxFifoBlock;
1541 rxFragPtr = rxDescCache.bufptr;
1542 rxDescCnt = rxDescCache.cmdsts & CMDSTS_LEN_MASK;
1543 }
1544 break;
1545
1546 case rxFifoBlock:
1547 if (!rxPacket) {
1548 /**
1549 * @todo in reality, we should be able to start processing
1550 * the packet as it arrives, and not have to wait for the
1551 * full packet ot be in the receive fifo.
1552 */
1553 if (rxFifo.empty())
1554 goto exit;
1555
1556 DPRINTF(EthernetSM, "****processing receive of new packet****\n");
1557
1558 // If we don't have a packet, grab a new one from the fifo.
1559 rxPacket = rxFifo.front();
1560 rxPktBytes = rxPacket->length;
1561 rxPacketBufPtr = rxPacket->data;
1562
1563 #if TRACING_ON
1564 if (DTRACE(Ethernet)) {
1565 IpPtr ip(rxPacket);
1566 if (ip) {
1567 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1568 TcpPtr tcp(ip);
1569 if (tcp) {
1570 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1571 tcp->sport(), tcp->dport());
1572 }
1573 }
1574 }
1575 #endif
1576
1577 // sanity check - i think the driver behaves like this
1578 assert(rxDescCnt >= rxPktBytes);
1579 rxFifo.pop();
1580 }
1581
1582
1583 // dont' need the && rxDescCnt > 0 if driver sanity check
1584 // above holds
1585 if (rxPktBytes > 0) {
1586 rxState = rxFragWrite;
1587 // don't need min<>(rxPktBytes,rxDescCnt) if above sanity
1588 // check holds
1589 rxXferLen = rxPktBytes;
1590
1591 rxDmaAddr = rxFragPtr & 0x3fffffff;
1592 rxDmaData = rxPacketBufPtr;
1593 rxDmaLen = rxXferLen;
1594 rxDmaFree = dmaDataFree;
1595
1596 if (doRxDmaWrite())
1597 goto exit;
1598
1599 } else {
1600 rxState = rxDescWrite;
1601
1602 //if (rxPktBytes == 0) { /* packet is done */
1603 assert(rxPktBytes == 0);
1604 DPRINTF(EthernetSM, "done with receiving packet\n");
1605
1606 rxDescCache.cmdsts |= CMDSTS_OWN;
1607 rxDescCache.cmdsts &= ~CMDSTS_MORE;
1608 rxDescCache.cmdsts |= CMDSTS_OK;
1609 rxDescCache.cmdsts &= 0xffff0000;
1610 rxDescCache.cmdsts += rxPacket->length; //i.e. set CMDSTS_SIZE
1611
1612 #if 0
1613 /*
1614 * all the driver uses these are for its own stats keeping
1615 * which we don't care about, aren't necessary for
1616 * functionality and doing this would just slow us down.
1617 * if they end up using this in a later version for
1618 * functional purposes, just undef
1619 */
1620 if (rxFilterEnable) {
1621 rxDescCache.cmdsts &= ~CMDSTS_DEST_MASK;
1622 const EthAddr &dst = rxFifoFront()->dst();
1623 if (dst->unicast())
1624 rxDescCache.cmdsts |= CMDSTS_DEST_SELF;
1625 if (dst->multicast())
1626 rxDescCache.cmdsts |= CMDSTS_DEST_MULTI;
1627 if (dst->broadcast())
1628 rxDescCache.cmdsts |= CMDSTS_DEST_MASK;
1629 }
1630 #endif
1631
1632 IpPtr ip(rxPacket);
1633 if (extstsEnable && ip) {
1634 rxDescCache.extsts |= EXTSTS_IPPKT;
1635 rxIpChecksums++;
1636 if (cksum(ip) != 0) {
1637 DPRINTF(EthernetCksum, "Rx IP Checksum Error\n");
1638 rxDescCache.extsts |= EXTSTS_IPERR;
1639 }
1640 TcpPtr tcp(ip);
1641 UdpPtr udp(ip);
1642 if (tcp) {
1643 rxDescCache.extsts |= EXTSTS_TCPPKT;
1644 rxTcpChecksums++;
1645 if (cksum(tcp) != 0) {
1646 DPRINTF(EthernetCksum, "Rx TCP Checksum Error\n");
1647 rxDescCache.extsts |= EXTSTS_TCPERR;
1648
1649 }
1650 } else if (udp) {
1651 rxDescCache.extsts |= EXTSTS_UDPPKT;
1652 rxUdpChecksums++;
1653 if (cksum(udp) != 0) {
1654 DPRINTF(EthernetCksum, "Rx UDP Checksum Error\n");
1655 rxDescCache.extsts |= EXTSTS_UDPERR;
1656 }
1657 }
1658 }
1659 rxPacket = 0;
1660
1661 /*
1662 * the driver seems to always receive into desc buffers
1663 * of size 1514, so you never have a pkt that is split
1664 * into multiple descriptors on the receive side, so
1665 * i don't implement that case, hence the assert above.
1666 */
1667
1668 DPRINTF(EthernetDesc,
1669 "rxDescCache: addr=%08x writeback cmdsts extsts\n",
1670 regs.rxdp & 0x3fffffff);
1671 DPRINTF(EthernetDesc,
1672 "rxDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1673 rxDescCache.link, rxDescCache.bufptr, rxDescCache.cmdsts,
1674 rxDescCache.extsts);
1675
1676 rxDmaAddr = (regs.rxdp + offsetof(ns_desc, cmdsts)) & 0x3fffffff;
1677 rxDmaData = &(rxDescCache.cmdsts);
1678 rxDmaLen = sizeof(rxDescCache.cmdsts) + sizeof(rxDescCache.extsts);
1679 rxDmaFree = dmaDescFree;
1680
1681 descDmaWrites++;
1682 descDmaWrBytes += rxDmaLen;
1683
1684 if (doRxDmaWrite())
1685 goto exit;
1686 }
1687 break;
1688
1689 case rxFragWrite:
1690 if (rxDmaState != dmaIdle)
1691 goto exit;
1692
1693 rxPacketBufPtr += rxXferLen;
1694 rxFragPtr += rxXferLen;
1695 rxPktBytes -= rxXferLen;
1696
1697 rxState = rxFifoBlock;
1698 break;
1699
1700 case rxDescWrite:
1701 if (rxDmaState != dmaIdle)
1702 goto exit;
1703
1704 assert(rxDescCache.cmdsts & CMDSTS_OWN);
1705
1706 assert(rxPacket == 0);
1707 devIntrPost(ISR_RXOK);
1708
1709 if (rxDescCache.cmdsts & CMDSTS_INTR)
1710 devIntrPost(ISR_RXDESC);
1711
1712 if (!rxEnable) {
1713 DPRINTF(EthernetSM, "Halting the RX state machine\n");
1714 rxState = rxIdle;
1715 goto exit;
1716 } else
1717 rxState = rxAdvance;
1718 break;
1719
1720 case rxAdvance:
1721 if (rxDescCache.link == 0) {
1722 devIntrPost(ISR_RXIDLE);
1723 rxState = rxIdle;
1724 CRDD = true;
1725 goto exit;
1726 } else {
1727 rxState = rxDescRead;
1728 regs.rxdp = rxDescCache.link;
1729 CRDD = false;
1730
1731 rxDmaAddr = regs.rxdp & 0x3fffffff;
1732 rxDmaData = &rxDescCache;
1733 rxDmaLen = sizeof(ns_desc);
1734 rxDmaFree = dmaDescFree;
1735
1736 if (doRxDmaRead())
1737 goto exit;
1738 }
1739 break;
1740
1741 default:
1742 panic("Invalid rxState!");
1743 }
1744
1745 DPRINTF(EthernetSM, "entering next rxState=%s\n",
1746 NsRxStateStrings[rxState]);
1747
1748 goto next;
1749
1750 exit:
1751 /**
1752 * @todo do we want to schedule a future kick?
1753 */
1754 DPRINTF(EthernetSM, "rx state machine exited rxState=%s\n",
1755 NsRxStateStrings[rxState]);
1756 }
1757
1758 void
1759 NSGigE::transmit()
1760 {
1761 if (txFifo.empty()) {
1762 DPRINTF(Ethernet, "nothing to transmit\n");
1763 return;
1764 }
1765
1766 DPRINTF(Ethernet, "Attempt Pkt Transmit: txFifo length=%d\n",
1767 txFifo.size());
1768 if (interface->sendPacket(txFifo.front())) {
1769 #if TRACING_ON
1770 if (DTRACE(Ethernet)) {
1771 IpPtr ip(txFifo.front());
1772 if (ip) {
1773 DPRINTF(Ethernet, "ID is %d\n", ip->id());
1774 TcpPtr tcp(ip);
1775 if (tcp) {
1776 DPRINTF(Ethernet, "Src Port=%d, Dest Port=%d\n",
1777 tcp->sport(), tcp->dport());
1778 }
1779 }
1780 }
1781 #endif
1782
1783 DDUMP(Ethernet, txFifo.front()->data, txFifo.front()->length);
1784 txBytes += txFifo.front()->length;
1785 txPackets++;
1786
1787 DPRINTF(Ethernet, "Successful Xmit! now txFifoAvail is %d\n",
1788 txFifo.avail());
1789 txFifo.pop();
1790
1791 /*
1792 * normally do a writeback of the descriptor here, and ONLY
1793 * after that is done, send this interrupt. but since our
1794 * stuff never actually fails, just do this interrupt here,
1795 * otherwise the code has to stray from this nice format.
1796 * besides, it's functionally the same.
1797 */
1798 devIntrPost(ISR_TXOK);
1799 }
1800
1801 if (!txFifo.empty() && !txEvent.scheduled()) {
1802 DPRINTF(Ethernet, "reschedule transmit\n");
1803 txEvent.schedule(curTick + 1000);
1804 }
1805 }
1806
1807 void
1808 NSGigE::txDmaReadCopy()
1809 {
1810 assert(txDmaState == dmaReading);
1811
1812 physmem->dma_read((uint8_t *)txDmaData, txDmaAddr, txDmaLen);
1813 txDmaState = dmaIdle;
1814
1815 DPRINTF(EthernetDMA, "tx dma read paddr=%#x len=%d\n",
1816 txDmaAddr, txDmaLen);
1817 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1818 }
1819
1820 bool
1821 NSGigE::doTxDmaRead()
1822 {
1823 assert(txDmaState == dmaIdle || txDmaState == dmaReadWaiting);
1824 txDmaState = dmaReading;
1825
1826 if (dmaInterface && !txDmaFree) {
1827 if (dmaInterface->busy())
1828 txDmaState = dmaReadWaiting;
1829 else
1830 dmaInterface->doDMA(Read, txDmaAddr, txDmaLen, curTick,
1831 &txDmaReadEvent, true);
1832 return true;
1833 }
1834
1835 if (dmaReadDelay == 0 && dmaReadFactor == 0.0) {
1836 txDmaReadCopy();
1837 return false;
1838 }
1839
1840 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaReadFactor;
1841 Tick start = curTick + dmaReadDelay + factor;
1842 txDmaReadEvent.schedule(start);
1843 return true;
1844 }
1845
1846 void
1847 NSGigE::txDmaReadDone()
1848 {
1849 assert(txDmaState == dmaReading);
1850 txDmaReadCopy();
1851
1852 // If the receive state machine has a pending DMA, let it go first
1853 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1854 rxKick();
1855
1856 txKick();
1857 }
1858
1859 void
1860 NSGigE::txDmaWriteCopy()
1861 {
1862 assert(txDmaState == dmaWriting);
1863
1864 physmem->dma_write(txDmaAddr, (uint8_t *)txDmaData, txDmaLen);
1865 txDmaState = dmaIdle;
1866
1867 DPRINTF(EthernetDMA, "tx dma write paddr=%#x len=%d\n",
1868 txDmaAddr, txDmaLen);
1869 DDUMP(EthernetDMA, txDmaData, txDmaLen);
1870 }
1871
1872 bool
1873 NSGigE::doTxDmaWrite()
1874 {
1875 assert(txDmaState == dmaIdle || txDmaState == dmaWriteWaiting);
1876 txDmaState = dmaWriting;
1877
1878 if (dmaInterface && !txDmaFree) {
1879 if (dmaInterface->busy())
1880 txDmaState = dmaWriteWaiting;
1881 else
1882 dmaInterface->doDMA(WriteInvalidate, txDmaAddr, txDmaLen, curTick,
1883 &txDmaWriteEvent, true);
1884 return true;
1885 }
1886
1887 if (dmaWriteDelay == 0 && dmaWriteFactor == 0.0) {
1888 txDmaWriteCopy();
1889 return false;
1890 }
1891
1892 Tick factor = ((txDmaLen + ULL(63)) >> ULL(6)) * dmaWriteFactor;
1893 Tick start = curTick + dmaWriteDelay + factor;
1894 txDmaWriteEvent.schedule(start);
1895 return true;
1896 }
1897
1898 void
1899 NSGigE::txDmaWriteDone()
1900 {
1901 assert(txDmaState == dmaWriting);
1902 txDmaWriteCopy();
1903
1904 // If the receive state machine has a pending DMA, let it go first
1905 if (rxDmaState == dmaReadWaiting || rxDmaState == dmaWriteWaiting)
1906 rxKick();
1907
1908 txKick();
1909 }
1910
1911 void
1912 NSGigE::txKick()
1913 {
1914 DPRINTF(EthernetSM, "transmit kick txState=%s\n",
1915 NsTxStateStrings[txState]);
1916
1917 if (txKickTick > curTick) {
1918 DPRINTF(EthernetSM, "transmit kick exiting, can't run till %d\n",
1919 txKickTick);
1920
1921 return;
1922 }
1923
1924 next:
1925 switch(txDmaState) {
1926 case dmaReadWaiting:
1927 if (doTxDmaRead())
1928 goto exit;
1929 break;
1930 case dmaWriteWaiting:
1931 if (doTxDmaWrite())
1932 goto exit;
1933 break;
1934 default:
1935 break;
1936 }
1937
1938 switch (txState) {
1939 case txIdle:
1940 if (!txEnable) {
1941 DPRINTF(EthernetSM, "Transmit disabled. Nothing to do.\n");
1942 goto exit;
1943 }
1944
1945 if (CTDD) {
1946 txState = txDescRefr;
1947
1948 txDmaAddr = regs.txdp & 0x3fffffff;
1949 txDmaData = &txDescCache + offsetof(ns_desc, link);
1950 txDmaLen = sizeof(txDescCache.link);
1951 txDmaFree = dmaDescFree;
1952
1953 descDmaReads++;
1954 descDmaRdBytes += txDmaLen;
1955
1956 if (doTxDmaRead())
1957 goto exit;
1958
1959 } else {
1960 txState = txDescRead;
1961
1962 txDmaAddr = regs.txdp & 0x3fffffff;
1963 txDmaData = &txDescCache;
1964 txDmaLen = sizeof(ns_desc);
1965 txDmaFree = dmaDescFree;
1966
1967 descDmaReads++;
1968 descDmaRdBytes += txDmaLen;
1969
1970 if (doTxDmaRead())
1971 goto exit;
1972 }
1973 break;
1974
1975 case txDescRefr:
1976 if (txDmaState != dmaIdle)
1977 goto exit;
1978
1979 txState = txAdvance;
1980 break;
1981
1982 case txDescRead:
1983 if (txDmaState != dmaIdle)
1984 goto exit;
1985
1986 DPRINTF(EthernetDesc,
1987 "txDescCache: link=%08x bufptr=%08x cmdsts=%08x extsts=%08x\n",
1988 txDescCache.link, txDescCache.bufptr, txDescCache.cmdsts,
1989 txDescCache.extsts);
1990
1991 if (txDescCache.cmdsts & CMDSTS_OWN) {
1992 txState = txFifoBlock;
1993 txFragPtr = txDescCache.bufptr;
1994 txDescCnt = txDescCache.cmdsts & CMDSTS_LEN_MASK;
1995 } else {
1996 devIntrPost(ISR_TXIDLE);
1997 txState = txIdle;
1998 goto exit;
1999 }
2000 break;
2001
2002 case txFifoBlock:
2003 if (!txPacket) {
2004 DPRINTF(EthernetSM, "****starting the tx of a new packet****\n");
2005 txPacket = new PacketData;
2006 txPacket->data = new uint8_t[16384];
2007 txPacketBufPtr = txPacket->data;
2008 }
2009
2010 if (txDescCnt == 0) {
2011 DPRINTF(EthernetSM, "the txDescCnt == 0, done with descriptor\n");
2012 if (txDescCache.cmdsts & CMDSTS_MORE) {
2013 DPRINTF(EthernetSM, "there are more descriptors to come\n");
2014 txState = txDescWrite;
2015
2016 txDescCache.cmdsts &= ~CMDSTS_OWN;
2017
2018 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2019 txDmaAddr &= 0x3fffffff;
2020 txDmaData = &(txDescCache.cmdsts);
2021 txDmaLen = sizeof(txDescCache.cmdsts);
2022 txDmaFree = dmaDescFree;
2023
2024 if (doTxDmaWrite())
2025 goto exit;
2026
2027 } else { /* this packet is totally done */
2028 DPRINTF(EthernetSM, "This packet is done, let's wrap it up\n");
2029 /* deal with the the packet that just finished */
2030 if ((regs.vtcr & VTCR_PPCHK) && extstsEnable) {
2031 IpPtr ip(txPacket);
2032 if (txDescCache.extsts & EXTSTS_UDPPKT) {
2033 UdpPtr udp(ip);
2034 udp->sum(0);
2035 udp->sum(cksum(udp));
2036 txUdpChecksums++;
2037 } else if (txDescCache.extsts & EXTSTS_TCPPKT) {
2038 TcpPtr tcp(ip);
2039 tcp->sum(0);
2040 tcp->sum(cksum(tcp));
2041 txTcpChecksums++;
2042 }
2043 if (txDescCache.extsts & EXTSTS_IPPKT) {
2044 ip->sum(0);
2045 ip->sum(cksum(ip));
2046 txIpChecksums++;
2047 }
2048 }
2049
2050 txPacket->length = txPacketBufPtr - txPacket->data;
2051 // this is just because the receive can't handle a
2052 // packet bigger want to make sure
2053 assert(txPacket->length <= 1514);
2054 #ifndef NDEBUG
2055 bool success =
2056 #endif
2057 txFifo.push(txPacket);
2058 assert(success);
2059
2060 /*
2061 * this following section is not tqo spec, but
2062 * functionally shouldn't be any different. normally,
2063 * the chip will wait til the transmit has occurred
2064 * before writing back the descriptor because it has
2065 * to wait to see that it was successfully transmitted
2066 * to decide whether to set CMDSTS_OK or not.
2067 * however, in the simulator since it is always
2068 * successfully transmitted, and writing it exactly to
2069 * spec would complicate the code, we just do it here
2070 */
2071
2072 txDescCache.cmdsts &= ~CMDSTS_OWN;
2073 txDescCache.cmdsts |= CMDSTS_OK;
2074
2075 DPRINTF(EthernetDesc,
2076 "txDesc writeback: cmdsts=%08x extsts=%08x\n",
2077 txDescCache.cmdsts, txDescCache.extsts);
2078
2079 txDmaAddr = regs.txdp + offsetof(ns_desc, cmdsts);
2080 txDmaAddr &= 0x3fffffff;
2081 txDmaData = &(txDescCache.cmdsts);
2082 txDmaLen = sizeof(txDescCache.cmdsts) +
2083 sizeof(txDescCache.extsts);
2084 txDmaFree = dmaDescFree;
2085
2086 descDmaWrites++;
2087 descDmaWrBytes += txDmaLen;
2088
2089 transmit();
2090 txPacket = 0;
2091
2092 if (!txEnable) {
2093 DPRINTF(EthernetSM, "halting TX state machine\n");
2094 txState = txIdle;
2095 goto exit;
2096 } else
2097 txState = txAdvance;
2098
2099 if (doTxDmaWrite())
2100 goto exit;
2101 }
2102 } else {
2103 DPRINTF(EthernetSM, "this descriptor isn't done yet\n");
2104 if (!txFifo.full()) {
2105 txState = txFragRead;
2106
2107 /*
2108 * The number of bytes transferred is either whatever
2109 * is left in the descriptor (txDescCnt), or if there
2110 * is not enough room in the fifo, just whatever room
2111 * is left in the fifo
2112 */
2113 txXferLen = min<uint32_t>(txDescCnt, txFifo.avail());
2114
2115 txDmaAddr = txFragPtr & 0x3fffffff;
2116 txDmaData = txPacketBufPtr;
2117 txDmaLen = txXferLen;
2118 txDmaFree = dmaDataFree;
2119
2120 if (doTxDmaRead())
2121 goto exit;
2122 } else {
2123 txState = txFifoBlock;
2124 transmit();
2125
2126 goto exit;
2127 }
2128
2129 }
2130 break;
2131
2132 case txFragRead:
2133 if (txDmaState != dmaIdle)
2134 goto exit;
2135
2136 txPacketBufPtr += txXferLen;
2137 txFragPtr += txXferLen;
2138 txDescCnt -= txXferLen;
2139 txFifo.reserve(txXferLen);
2140
2141 txState = txFifoBlock;
2142 break;
2143
2144 case txDescWrite:
2145 if (txDmaState != dmaIdle)
2146 goto exit;
2147
2148 if (txDescCache.cmdsts & CMDSTS_INTR)
2149 devIntrPost(ISR_TXDESC);
2150
2151 txState = txAdvance;
2152 break;
2153
2154 case txAdvance:
2155 if (txDescCache.link == 0) {
2156 devIntrPost(ISR_TXIDLE);
2157 txState = txIdle;
2158 goto exit;
2159 } else {
2160 txState = txDescRead;
2161 regs.txdp = txDescCache.link;
2162 CTDD = false;
2163
2164 txDmaAddr = txDescCache.link & 0x3fffffff;
2165 txDmaData = &txDescCache;
2166 txDmaLen = sizeof(ns_desc);
2167 txDmaFree = dmaDescFree;
2168
2169 if (doTxDmaRead())
2170 goto exit;
2171 }
2172 break;
2173
2174 default:
2175 panic("invalid state");
2176 }
2177
2178 DPRINTF(EthernetSM, "entering next txState=%s\n",
2179 NsTxStateStrings[txState]);
2180
2181 goto next;
2182
2183 exit:
2184 /**
2185 * @todo do we want to schedule a future kick?
2186 */
2187 DPRINTF(EthernetSM, "tx state machine exited txState=%s\n",
2188 NsTxStateStrings[txState]);
2189 }
2190
2191 void
2192 NSGigE::transferDone()
2193 {
2194 if (txFifo.empty()) {
2195 DPRINTF(Ethernet, "transfer complete: txFifo empty...nothing to do\n");
2196 return;
2197 }
2198
2199 DPRINTF(Ethernet, "transfer complete: data in txFifo...schedule xmit\n");
2200
2201 if (txEvent.scheduled())
2202 txEvent.reschedule(curTick + 1);
2203 else
2204 txEvent.schedule(curTick + 1);
2205 }
2206
2207 bool
2208 NSGigE::rxFilter(const PacketPtr &packet)
2209 {
2210 EthPtr eth = packet;
2211 bool drop = true;
2212 string type;
2213
2214 const EthAddr &dst = eth->dst();
2215 if (dst.unicast()) {
2216 // If we're accepting all unicast addresses
2217 if (acceptUnicast)
2218 drop = false;
2219
2220 // If we make a perfect match
2221 if (acceptPerfect && dst == rom.perfectMatch)
2222 drop = false;
2223
2224 if (acceptArp && eth->type() == ETH_TYPE_ARP)
2225 drop = false;
2226
2227 } else if (dst.broadcast()) {
2228 // if we're accepting broadcasts
2229 if (acceptBroadcast)
2230 drop = false;
2231
2232 } else if (dst.multicast()) {
2233 // if we're accepting all multicasts
2234 if (acceptMulticast)
2235 drop = false;
2236
2237 }
2238
2239 if (drop) {
2240 DPRINTF(Ethernet, "rxFilter drop\n");
2241 DDUMP(EthernetData, packet->data, packet->length);
2242 }
2243
2244 return drop;
2245 }
2246
2247 bool
2248 NSGigE::recvPacket(PacketPtr packet)
2249 {
2250 rxBytes += packet->length;
2251 rxPackets++;
2252
2253 DPRINTF(Ethernet, "Receiving packet from wire, rxFifoAvail=%d\n",
2254 rxFifo.avail());
2255
2256 if (!rxEnable) {
2257 DPRINTF(Ethernet, "receive disabled...packet dropped\n");
2258 debug_break();
2259 interface->recvDone();
2260 return true;
2261 }
2262
2263 if (rxFilterEnable && rxFilter(packet)) {
2264 DPRINTF(Ethernet, "packet filtered...dropped\n");
2265 interface->recvDone();
2266 return true;
2267 }
2268
2269 if (rxFifo.avail() < packet->length) {
2270 DPRINTF(Ethernet,
2271 "packet will not fit in receive buffer...packet dropped\n");
2272 droppedPackets++;
2273 devIntrPost(ISR_RXORN);
2274 return false;
2275 }
2276
2277 rxFifo.push(packet);
2278 interface->recvDone();
2279
2280 rxKick();
2281 return true;
2282 }
2283
2284 //=====================================================================
2285 //
2286 //
2287 void
2288 NSGigE::serialize(ostream &os)
2289 {
2290 // Serialize the PciDev base class
2291 PciDev::serialize(os);
2292
2293 /*
2294 * Finalize any DMA events now.
2295 */
2296 if (rxDmaReadEvent.scheduled())
2297 rxDmaReadCopy();
2298 if (rxDmaWriteEvent.scheduled())
2299 rxDmaWriteCopy();
2300 if (txDmaReadEvent.scheduled())
2301 txDmaReadCopy();
2302 if (txDmaWriteEvent.scheduled())
2303 txDmaWriteCopy();
2304
2305 /*
2306 * Serialize the device registers
2307 */
2308 SERIALIZE_SCALAR(regs.command);
2309 SERIALIZE_SCALAR(regs.config);
2310 SERIALIZE_SCALAR(regs.mear);
2311 SERIALIZE_SCALAR(regs.ptscr);
2312 SERIALIZE_SCALAR(regs.isr);
2313 SERIALIZE_SCALAR(regs.imr);
2314 SERIALIZE_SCALAR(regs.ier);
2315 SERIALIZE_SCALAR(regs.ihr);
2316 SERIALIZE_SCALAR(regs.txdp);
2317 SERIALIZE_SCALAR(regs.txdp_hi);
2318 SERIALIZE_SCALAR(regs.txcfg);
2319 SERIALIZE_SCALAR(regs.gpior);
2320 SERIALIZE_SCALAR(regs.rxdp);
2321 SERIALIZE_SCALAR(regs.rxdp_hi);
2322 SERIALIZE_SCALAR(regs.rxcfg);
2323 SERIALIZE_SCALAR(regs.pqcr);
2324 SERIALIZE_SCALAR(regs.wcsr);
2325 SERIALIZE_SCALAR(regs.pcr);
2326 SERIALIZE_SCALAR(regs.rfcr);
2327 SERIALIZE_SCALAR(regs.rfdr);
2328 SERIALIZE_SCALAR(regs.srr);
2329 SERIALIZE_SCALAR(regs.mibc);
2330 SERIALIZE_SCALAR(regs.vrcr);
2331 SERIALIZE_SCALAR(regs.vtcr);
2332 SERIALIZE_SCALAR(regs.vdr);
2333 SERIALIZE_SCALAR(regs.ccsr);
2334 SERIALIZE_SCALAR(regs.tbicr);
2335 SERIALIZE_SCALAR(regs.tbisr);
2336 SERIALIZE_SCALAR(regs.tanar);
2337 SERIALIZE_SCALAR(regs.tanlpar);
2338 SERIALIZE_SCALAR(regs.taner);
2339 SERIALIZE_SCALAR(regs.tesr);
2340
2341 SERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2342
2343 SERIALIZE_SCALAR(ioEnable);
2344
2345 /*
2346 * Serialize the data Fifos
2347 */
2348 rxFifo.serialize("rxFifo", os);
2349 txFifo.serialize("txFifo", os);
2350
2351 /*
2352 * Serialize the various helper variables
2353 */
2354 bool txPacketExists = txPacket;
2355 SERIALIZE_SCALAR(txPacketExists);
2356 if (txPacketExists) {
2357 txPacket->serialize("txPacket", os);
2358 uint32_t txPktBufPtr = (uint32_t) (txPacketBufPtr - txPacket->data);
2359 SERIALIZE_SCALAR(txPktBufPtr);
2360 }
2361
2362 bool rxPacketExists = rxPacket;
2363 SERIALIZE_SCALAR(rxPacketExists);
2364 if (rxPacketExists) {
2365 rxPacket->serialize("rxPacket", os);
2366 uint32_t rxPktBufPtr = (uint32_t) (rxPacketBufPtr - rxPacket->data);
2367 SERIALIZE_SCALAR(rxPktBufPtr);
2368 }
2369
2370 SERIALIZE_SCALAR(txXferLen);
2371 SERIALIZE_SCALAR(rxXferLen);
2372
2373 /*
2374 * Serialize DescCaches
2375 */
2376 SERIALIZE_SCALAR(txDescCache.link);
2377 SERIALIZE_SCALAR(txDescCache.bufptr);
2378 SERIALIZE_SCALAR(txDescCache.cmdsts);
2379 SERIALIZE_SCALAR(txDescCache.extsts);
2380 SERIALIZE_SCALAR(rxDescCache.link);
2381 SERIALIZE_SCALAR(rxDescCache.bufptr);
2382 SERIALIZE_SCALAR(rxDescCache.cmdsts);
2383 SERIALIZE_SCALAR(rxDescCache.extsts);
2384
2385 /*
2386 * Serialize tx state machine
2387 */
2388 int txState = this->txState;
2389 SERIALIZE_SCALAR(txState);
2390 SERIALIZE_SCALAR(txEnable);
2391 SERIALIZE_SCALAR(CTDD);
2392 SERIALIZE_SCALAR(txFragPtr);
2393 SERIALIZE_SCALAR(txDescCnt);
2394 int txDmaState = this->txDmaState;
2395 SERIALIZE_SCALAR(txDmaState);
2396
2397 /*
2398 * Serialize rx state machine
2399 */
2400 int rxState = this->rxState;
2401 SERIALIZE_SCALAR(rxState);
2402 SERIALIZE_SCALAR(rxEnable);
2403 SERIALIZE_SCALAR(CRDD);
2404 SERIALIZE_SCALAR(rxPktBytes);
2405 SERIALIZE_SCALAR(rxFragPtr);
2406 SERIALIZE_SCALAR(rxDescCnt);
2407 int rxDmaState = this->rxDmaState;
2408 SERIALIZE_SCALAR(rxDmaState);
2409
2410 SERIALIZE_SCALAR(extstsEnable);
2411
2412 /*
2413 * If there's a pending transmit, store the time so we can
2414 * reschedule it later
2415 */
2416 Tick transmitTick = txEvent.scheduled() ? txEvent.when() - curTick : 0;
2417 SERIALIZE_SCALAR(transmitTick);
2418
2419 /*
2420 * receive address filter settings
2421 */
2422 SERIALIZE_SCALAR(rxFilterEnable);
2423 SERIALIZE_SCALAR(acceptBroadcast);
2424 SERIALIZE_SCALAR(acceptMulticast);
2425 SERIALIZE_SCALAR(acceptUnicast);
2426 SERIALIZE_SCALAR(acceptPerfect);
2427 SERIALIZE_SCALAR(acceptArp);
2428
2429 /*
2430 * Keep track of pending interrupt status.
2431 */
2432 SERIALIZE_SCALAR(intrTick);
2433 SERIALIZE_SCALAR(cpuPendingIntr);
2434 Tick intrEventTick = 0;
2435 if (intrEvent)
2436 intrEventTick = intrEvent->when();
2437 SERIALIZE_SCALAR(intrEventTick);
2438
2439 }
2440
2441 void
2442 NSGigE::unserialize(Checkpoint *cp, const std::string &section)
2443 {
2444 // Unserialize the PciDev base class
2445 PciDev::unserialize(cp, section);
2446
2447 UNSERIALIZE_SCALAR(regs.command);
2448 UNSERIALIZE_SCALAR(regs.config);
2449 UNSERIALIZE_SCALAR(regs.mear);
2450 UNSERIALIZE_SCALAR(regs.ptscr);
2451 UNSERIALIZE_SCALAR(regs.isr);
2452 UNSERIALIZE_SCALAR(regs.imr);
2453 UNSERIALIZE_SCALAR(regs.ier);
2454 UNSERIALIZE_SCALAR(regs.ihr);
2455 UNSERIALIZE_SCALAR(regs.txdp);
2456 UNSERIALIZE_SCALAR(regs.txdp_hi);
2457 UNSERIALIZE_SCALAR(regs.txcfg);
2458 UNSERIALIZE_SCALAR(regs.gpior);
2459 UNSERIALIZE_SCALAR(regs.rxdp);
2460 UNSERIALIZE_SCALAR(regs.rxdp_hi);
2461 UNSERIALIZE_SCALAR(regs.rxcfg);
2462 UNSERIALIZE_SCALAR(regs.pqcr);
2463 UNSERIALIZE_SCALAR(regs.wcsr);
2464 UNSERIALIZE_SCALAR(regs.pcr);
2465 UNSERIALIZE_SCALAR(regs.rfcr);
2466 UNSERIALIZE_SCALAR(regs.rfdr);
2467 UNSERIALIZE_SCALAR(regs.srr);
2468 UNSERIALIZE_SCALAR(regs.mibc);
2469 UNSERIALIZE_SCALAR(regs.vrcr);
2470 UNSERIALIZE_SCALAR(regs.vtcr);
2471 UNSERIALIZE_SCALAR(regs.vdr);
2472 UNSERIALIZE_SCALAR(regs.ccsr);
2473 UNSERIALIZE_SCALAR(regs.tbicr);
2474 UNSERIALIZE_SCALAR(regs.tbisr);
2475 UNSERIALIZE_SCALAR(regs.tanar);
2476 UNSERIALIZE_SCALAR(regs.tanlpar);
2477 UNSERIALIZE_SCALAR(regs.taner);
2478 UNSERIALIZE_SCALAR(regs.tesr);
2479
2480 UNSERIALIZE_ARRAY(rom.perfectMatch, ETH_ADDR_LEN);
2481
2482 UNSERIALIZE_SCALAR(ioEnable);
2483
2484 /*
2485 * unserialize the data fifos
2486 */
2487 rxFifo.unserialize("rxFifo", cp, section);
2488 txFifo.unserialize("txFifo", cp, section);
2489
2490 /*
2491 * unserialize the various helper variables
2492 */
2493 bool txPacketExists;
2494 UNSERIALIZE_SCALAR(txPacketExists);
2495 if (txPacketExists) {
2496 txPacket = new PacketData;
2497 txPacket->unserialize("txPacket", cp, section);
2498 uint32_t txPktBufPtr;
2499 UNSERIALIZE_SCALAR(txPktBufPtr);
2500 txPacketBufPtr = (uint8_t *) txPacket->data + txPktBufPtr;
2501 } else
2502 txPacket = 0;
2503
2504 bool rxPacketExists;
2505 UNSERIALIZE_SCALAR(rxPacketExists);
2506 rxPacket = 0;
2507 if (rxPacketExists) {
2508 rxPacket = new PacketData;
2509 rxPacket->unserialize("rxPacket", cp, section);
2510 uint32_t rxPktBufPtr;
2511 UNSERIALIZE_SCALAR(rxPktBufPtr);
2512 rxPacketBufPtr = (uint8_t *) rxPacket->data + rxPktBufPtr;
2513 } else
2514 rxPacket = 0;
2515
2516 UNSERIALIZE_SCALAR(txXferLen);
2517 UNSERIALIZE_SCALAR(rxXferLen);
2518
2519 /*
2520 * Unserialize DescCaches
2521 */
2522 UNSERIALIZE_SCALAR(txDescCache.link);
2523 UNSERIALIZE_SCALAR(txDescCache.bufptr);
2524 UNSERIALIZE_SCALAR(txDescCache.cmdsts);
2525 UNSERIALIZE_SCALAR(txDescCache.extsts);
2526 UNSERIALIZE_SCALAR(rxDescCache.link);
2527 UNSERIALIZE_SCALAR(rxDescCache.bufptr);
2528 UNSERIALIZE_SCALAR(rxDescCache.cmdsts);
2529 UNSERIALIZE_SCALAR(rxDescCache.extsts);
2530
2531 /*
2532 * unserialize tx state machine
2533 */
2534 int txState;
2535 UNSERIALIZE_SCALAR(txState);
2536 this->txState = (TxState) txState;
2537 UNSERIALIZE_SCALAR(txEnable);
2538 UNSERIALIZE_SCALAR(CTDD);
2539 UNSERIALIZE_SCALAR(txFragPtr);
2540 UNSERIALIZE_SCALAR(txDescCnt);
2541 int txDmaState;
2542 UNSERIALIZE_SCALAR(txDmaState);
2543 this->txDmaState = (DmaState) txDmaState;
2544
2545 /*
2546 * unserialize rx state machine
2547 */
2548 int rxState;
2549 UNSERIALIZE_SCALAR(rxState);
2550 this->rxState = (RxState) rxState;
2551 UNSERIALIZE_SCALAR(rxEnable);
2552 UNSERIALIZE_SCALAR(CRDD);
2553 UNSERIALIZE_SCALAR(rxPktBytes);
2554 UNSERIALIZE_SCALAR(rxFragPtr);
2555 UNSERIALIZE_SCALAR(rxDescCnt);
2556 int rxDmaState;
2557 UNSERIALIZE_SCALAR(rxDmaState);
2558 this->rxDmaState = (DmaState) rxDmaState;
2559
2560 UNSERIALIZE_SCALAR(extstsEnable);
2561
2562 /*
2563 * If there's a pending transmit, reschedule it now
2564 */
2565 Tick transmitTick;
2566 UNSERIALIZE_SCALAR(transmitTick);
2567 if (transmitTick)
2568 txEvent.schedule(curTick + transmitTick);
2569
2570 /*
2571 * unserialize receive address filter settings
2572 */
2573 UNSERIALIZE_SCALAR(rxFilterEnable);
2574 UNSERIALIZE_SCALAR(acceptBroadcast);
2575 UNSERIALIZE_SCALAR(acceptMulticast);
2576 UNSERIALIZE_SCALAR(acceptUnicast);
2577 UNSERIALIZE_SCALAR(acceptPerfect);
2578 UNSERIALIZE_SCALAR(acceptArp);
2579
2580 /*
2581 * Keep track of pending interrupt status.
2582 */
2583 UNSERIALIZE_SCALAR(intrTick);
2584 UNSERIALIZE_SCALAR(cpuPendingIntr);
2585 Tick intrEventTick;
2586 UNSERIALIZE_SCALAR(intrEventTick);
2587 if (intrEventTick) {
2588 intrEvent = new IntrEvent(this, true);
2589 intrEvent->schedule(intrEventTick);
2590 }
2591
2592 /*
2593 * re-add addrRanges to bus bridges
2594 */
2595 if (pioInterface) {
2596 pioInterface->addAddrRange(RangeSize(BARAddrs[0], BARSize[0]));
2597 pioInterface->addAddrRange(RangeSize(BARAddrs[1], BARSize[1]));
2598 }
2599 }
2600
2601 Tick
2602 NSGigE::cacheAccess(MemReqPtr &req)
2603 {
2604 DPRINTF(EthernetPIO, "timing access to paddr=%#x (daddr=%#x)\n",
2605 req->paddr, req->paddr - addr);
2606 return curTick + pioLatency;
2607 }
2608
2609 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2610
2611 SimObjectParam<EtherInt *> peer;
2612 SimObjectParam<NSGigE *> device;
2613
2614 END_DECLARE_SIM_OBJECT_PARAMS(NSGigEInt)
2615
2616 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2617
2618 INIT_PARAM_DFLT(peer, "peer interface", NULL),
2619 INIT_PARAM(device, "Ethernet device of this interface")
2620
2621 END_INIT_SIM_OBJECT_PARAMS(NSGigEInt)
2622
2623 CREATE_SIM_OBJECT(NSGigEInt)
2624 {
2625 NSGigEInt *dev_int = new NSGigEInt(getInstanceName(), device);
2626
2627 EtherInt *p = (EtherInt *)peer;
2628 if (p) {
2629 dev_int->setPeer(p);
2630 p->setPeer(dev_int);
2631 }
2632
2633 return dev_int;
2634 }
2635
2636 REGISTER_SIM_OBJECT("NSGigEInt", NSGigEInt)
2637
2638
2639 BEGIN_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2640
2641 Param<Tick> tx_delay;
2642 Param<Tick> rx_delay;
2643 Param<Tick> intr_delay;
2644 SimObjectParam<MemoryController *> mmu;
2645 SimObjectParam<PhysicalMemory *> physmem;
2646 Param<bool> rx_filter;
2647 Param<string> hardware_address;
2648 SimObjectParam<Bus*> header_bus;
2649 SimObjectParam<Bus*> payload_bus;
2650 SimObjectParam<HierParams *> hier;
2651 Param<Tick> pio_latency;
2652 Param<bool> dma_desc_free;
2653 Param<bool> dma_data_free;
2654 Param<Tick> dma_read_delay;
2655 Param<Tick> dma_write_delay;
2656 Param<Tick> dma_read_factor;
2657 Param<Tick> dma_write_factor;
2658 SimObjectParam<PciConfigAll *> configspace;
2659 SimObjectParam<PciConfigData *> configdata;
2660 SimObjectParam<Platform *> platform;
2661 Param<uint32_t> pci_bus;
2662 Param<uint32_t> pci_dev;
2663 Param<uint32_t> pci_func;
2664 Param<uint32_t> tx_fifo_size;
2665 Param<uint32_t> rx_fifo_size;
2666
2667 END_DECLARE_SIM_OBJECT_PARAMS(NSGigE)
2668
2669 BEGIN_INIT_SIM_OBJECT_PARAMS(NSGigE)
2670
2671 INIT_PARAM_DFLT(tx_delay, "Transmit Delay", 1000),
2672 INIT_PARAM_DFLT(rx_delay, "Receive Delay", 1000),
2673 INIT_PARAM_DFLT(intr_delay, "Interrupt Delay in microseconds", 0),
2674 INIT_PARAM(mmu, "Memory Controller"),
2675 INIT_PARAM(physmem, "Physical Memory"),
2676 INIT_PARAM_DFLT(rx_filter, "Enable Receive Filter", true),
2677 INIT_PARAM_DFLT(hardware_address, "Ethernet Hardware Address",
2678 "00:99:00:00:00:01"),
2679 INIT_PARAM_DFLT(header_bus, "The IO Bus to attach to for headers", NULL),
2680 INIT_PARAM_DFLT(payload_bus, "The IO Bus to attach to for payload", NULL),
2681 INIT_PARAM_DFLT(hier, "Hierarchy global variables", &defaultHierParams),
2682 INIT_PARAM_DFLT(pio_latency, "Programmed IO latency in bus cycles", 1),
2683 INIT_PARAM_DFLT(dma_desc_free, "DMA of Descriptors is free", false),
2684 INIT_PARAM_DFLT(dma_data_free, "DMA of Data is free", false),
2685 INIT_PARAM_DFLT(dma_read_delay, "fixed delay for dma reads", 0),
2686 INIT_PARAM_DFLT(dma_write_delay, "fixed delay for dma writes", 0),
2687 INIT_PARAM_DFLT(dma_read_factor, "multiplier for dma reads", 0),
2688 INIT_PARAM_DFLT(dma_write_factor, "multiplier for dma writes", 0),
2689 INIT_PARAM(configspace, "PCI Configspace"),
2690 INIT_PARAM(configdata, "PCI Config data"),
2691 INIT_PARAM(platform, "Platform"),
2692 INIT_PARAM(pci_bus, "PCI bus"),
2693 INIT_PARAM(pci_dev, "PCI device number"),
2694 INIT_PARAM(pci_func, "PCI function code"),
2695 INIT_PARAM_DFLT(tx_fifo_size, "max size in bytes of txFifo", 131072),
2696 INIT_PARAM_DFLT(rx_fifo_size, "max size in bytes of rxFifo", 131072)
2697
2698 END_INIT_SIM_OBJECT_PARAMS(NSGigE)
2699
2700
2701 CREATE_SIM_OBJECT(NSGigE)
2702 {
2703 NSGigE::Params *params = new NSGigE::Params;
2704
2705 params->name = getInstanceName();
2706 params->mmu = mmu;
2707 params->configSpace = configspace;
2708 params->configData = configdata;
2709 params->plat = platform;
2710 params->busNum = pci_bus;
2711 params->deviceNum = pci_dev;
2712 params->functionNum = pci_func;
2713
2714 params->intr_delay = intr_delay;
2715 params->pmem = physmem;
2716 params->tx_delay = tx_delay;
2717 params->rx_delay = rx_delay;
2718 params->hier = hier;
2719 params->header_bus = header_bus;
2720 params->payload_bus = payload_bus;
2721 params->pio_latency = pio_latency;
2722 params->dma_desc_free = dma_desc_free;
2723 params->dma_data_free = dma_data_free;
2724 params->dma_read_delay = dma_read_delay;
2725 params->dma_write_delay = dma_write_delay;
2726 params->dma_read_factor = dma_read_factor;
2727 params->dma_write_factor = dma_write_factor;
2728 params->rx_filter = rx_filter;
2729 params->eaddr = hardware_address;
2730 params->tx_fifo_size = tx_fifo_size;
2731 params->rx_fifo_size = rx_fifo_size;
2732 return new NSGigE(params);
2733 }
2734
2735 REGISTER_SIM_OBJECT("NSGigE", NSGigE)